code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase = TapasConfig.from_json_file(__A )
# set absolute/relative position embeddings parameter
UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase = TapasForQuestionAnswering(config=__A )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase = 4
UpperCAmelCase = True
# hparam_utils.py hparams
UpperCAmelCase = 0.664694
UpperCAmelCase = 0.207951
UpperCAmelCase = 0.121194
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = 0.0352513
UpperCAmelCase = TapasForQuestionAnswering(config=__A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase = 4
UpperCAmelCase = False
# hparam_utils.py hparams
UpperCAmelCase = 36.4519
UpperCAmelCase = 0.903421
UpperCAmelCase = 222.088
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0.763141
UpperCAmelCase = TapasForQuestionAnswering(config=__A )
elif task == "TABFACT":
UpperCAmelCase = TapasForSequenceClassification(config=__A )
elif task == "MLM":
UpperCAmelCase = TapasForMaskedLM(config=__A )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase = TapasModel(config=__A )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__A , __A , __A )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__A )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__A )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 1 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=1_3 , lowerCAmelCase__ : List[str]=3_0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=3_2 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : Union[str, Any]=3_7 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[Any]=1_0 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : int=0.6 , lowerCAmelCase__ : Any=None , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = mask_ratio
UpperCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> Any:
UpperCAmelCase = ViTMAEModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> int:
UpperCAmelCase = ViTMAEForPreTraining(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(lowerCAmelCase__ )
UpperCAmelCase = (self.image_size // self.patch_size) ** 2
UpperCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = ViTMAEForPreTraining(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(lowerCAmelCase__ )
UpperCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCAmelCase = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase = ViTMAEModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def _UpperCamelCase ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[Any] ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> Optional[int]:
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase = torch.from_numpy(lowerCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase = pt_noise
super().check_pt_tf_models(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCAmelCase = outputs[0].cpu().numpy()
UpperCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase = model_class.from_pretrained(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Make sure we don't have nans
UpperCAmelCase = after_outputs[0].cpu().numpy()
UpperCAmelCase = 0
UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _UpperCamelCase ( self : Any ) -> Tuple:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
pass
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> int:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ViTMAEModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _lowerCAmelCase( ):
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Tuple ) -> Dict:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : str ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCAmelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase = ViTMAEConfig()
UpperCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase__ , noise=torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ) )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCAmelCase = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase__ ) , atol=1e-4 ) )
| 1 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = CpmAntTokenizer
UpperCAmelCase = False
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
super().setUp()
UpperCAmelCase = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = ["今天", "天气", "真", "好", "!"]
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = "今天天气真好!"
UpperCAmelCase = [tokenizer.bos_token] + tokens
UpperCAmelCase = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
UpperCAmelCase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
lowerCAmelCase__ = "Input must be a string of 8 numbers plus letter"
lowerCAmelCase__ = "TRWAGMYFPDXBNJZSQVHLCKE"
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
UpperCAmelCase = F"Expected string as input, found {type(__A ).__name__}"
raise TypeError(__A )
UpperCAmelCase = spanish_id.replace("-" , "" ).upper()
if len(__A ) != 9:
raise ValueError(__A )
try:
UpperCAmelCase = int(spanish_id_clean[0:8] )
UpperCAmelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__A ) from ex
if letter.isdigit():
raise ValueError(__A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 1 |
def _lowerCAmelCase( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase__ = generate_large_matrix()
lowerCAmelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowerCAmelCase( __A ):
assert all(row == sorted(__A , reverse=__A ) for row in grid )
assert all(list(__A ) == sorted(__A , reverse=__A ) for col in zip(*__A ) )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase = (left + right) // 2
UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase = mid + 1
else:
UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
UpperCAmelCase = len(grid[0] )
for i in range(len(__A ) ):
UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(__A ) * len(grid[0] )) - total
def _lowerCAmelCase( __A ):
return len([number for row in grid for number in row if number < 0] )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(__A ):
if number < 0:
total += len(__A ) - i
break
return total
def _lowerCAmelCase( ):
from timeit import timeit
print("Running benchmarks" )
UpperCAmelCase = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase = timeit(F"{func}(grid=grid)" , setup=__A , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 1 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 1 |
import math
def _lowerCAmelCase( __A ):
return math.sqrt(__A ) * math.sqrt(__A ) == num
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
UpperCAmelCase = n
while left <= right:
UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase = mid - 1
else:
UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = "▁"
lowerCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowerCAmelCase__ = {
"google/pegasus-xsum": 512,
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = PegasusTokenizer
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Dict="<pad>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : str="<unk>" , lowerCAmelCase__ : Dict="<mask_2>" , lowerCAmelCase__ : str="<mask_1>" , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Optional[Any]=1_0_3 , **lowerCAmelCase__ : Any , ) -> int:
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is"
f" {type(lowerCAmelCase__ )}" )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : Any ) -> List[str]:
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : List , lowerCAmelCase__ : Optional[List] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 1 |
def _lowerCAmelCase( __A , __A ):
return abs(__A ) if a == 0 else greatest_common_divisor(b % a , __A )
def _lowerCAmelCase( __A , __A ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase , UpperCAmelCase = y, x % y
return abs(__A )
def _lowerCAmelCase( ):
try:
UpperCAmelCase = input("Enter two integers separated by comma (,): " ).split("," )
UpperCAmelCase = int(nums[0] )
UpperCAmelCase = int(nums[1] )
print(
F"greatest_common_divisor({num_a}, {num_a}) = "
F"{greatest_common_divisor(__A , __A )}" )
print(F"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__A , __A )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 1 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """data2vec-text"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : int=1e-1_2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : List[Any]="absolute" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : str , ) -> List[str]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class __magic_name__ ( _snake_case ):
@property
def _UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 1 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
from math import pi
def _lowerCAmelCase( __A , __A ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCAmelCase = field(default=_snake_case , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
UpperCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
UpperCAmelCase = import_module("tasks" )
try:
UpperCAmelCase = getattr(__A , model_args.task_type )
UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
UpperCAmelCase = dict(enumerate(__A ) )
UpperCAmelCase = len(__A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , idalabel=__A , labelaid={label: i for i, label in enumerate(__A )} , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__A , data_dir=data_args.data_dir , tokenizer=__A , labels=__A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__A , data_dir=data_args.data_dir , tokenizer=__A , labels=__A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__A , __A ) -> Tuple[List[int], List[int]]:
UpperCAmelCase = np.argmax(__A , axis=2 )
UpperCAmelCase , UpperCAmelCase = preds.shape
UpperCAmelCase = [[] for _ in range(__A )]
UpperCAmelCase = [[] for _ in range(__A )]
for i in range(__A ):
for j in range(__A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__A ) -> Dict:
UpperCAmelCase , UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__A , __A ),
"precision": precision_score(__A , __A ),
"recall": recall_score(__A , __A ),
"f1": fa_score(__A , __A ),
}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(__A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , data_collator=__A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(__A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , __A , __A )
writer.write("%s = %s\n" % (key, value) )
results.update(__A )
# Predict
if training_args.do_predict:
UpperCAmelCase = TokenClassificationDataset(
token_classification_task=__A , data_dir=data_args.data_dir , tokenizer=__A , labels=__A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = trainer.predict(__A )
UpperCAmelCase , UpperCAmelCase = align_predictions(__A , __A )
UpperCAmelCase = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(__A , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , __A , __A )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
UpperCAmelCase = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(__A , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(__A , __A , __A )
return results
def _lowerCAmelCase( __A ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 1 |
def _lowerCAmelCase( __A , __A , __A ):
def count_of_possible_combinations(__A ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__A )
def _lowerCAmelCase( __A , __A , __A ):
def count_of_possible_combinations_with_dp_array(
__A , __A ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , __A )
for item in array )
UpperCAmelCase = answer
return answer
UpperCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__A , __A )
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = [0] * (target + 1)
UpperCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(__A ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = 3
lowerCAmelCase__ = 5
lowerCAmelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 1 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = DiTPipeline
UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase = False
def _UpperCamelCase ( self : List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_0_0_0 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCAmelCase__ , )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0 ) -> Tuple:
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
UpperCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCAmelCase = ["vase", "umbrella", "white shark", "white wolf"]
UpperCAmelCase = pipe.get_label_ids(lowerCAmelCase__ )
UpperCAmelCase = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=4_0 , output_type="np" ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCAmelCase = ["vase", "umbrella"]
UpperCAmelCase = pipe.get_label_ids(lowerCAmelCase__ )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2_5 , output_type="np" ).images
for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _lowerCAmelCase( __A ):
if isinstance(__A , torch.Tensor ):
return image
elif isinstance(__A , PIL.Image.Image ):
UpperCAmelCase = [image]
UpperCAmelCase = [trans(img.convert("RGB" ) ) for img in image]
UpperCAmelCase = torch.stack(__A )
return image
class __magic_name__ ( _snake_case ):
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str ) -> Dict:
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowerCAmelCase__ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=None ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase__ )}" )
UpperCAmelCase = image.to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCAmelCase__ )
UpperCAmelCase = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCAmelCase__ : float = 0.8 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(lowerCAmelCase__ )
# 2. Preprocess image
UpperCAmelCase = preprocess(lowerCAmelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , self.device )
UpperCAmelCase = timesteps[:1].repeat(lowerCAmelCase__ )
# 4. Prepare latent variables
UpperCAmelCase = self.prepare_latents(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.unet.dtype , self.device , lowerCAmelCase__ )
UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCAmelCase__ ):
# 1. predict noise model_output
UpperCAmelCase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ , ).prev_sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 1 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __magic_name__ ( _snake_case , _snake_case ):
@register_to_config
def __init__( self : Any , lowerCAmelCase__ : int = 7_6_8 , ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase = nn.Parameter(torch.zeros(1 , lowerCAmelCase__ ) )
UpperCAmelCase = nn.Parameter(torch.ones(1 , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Optional[Union[str, torch.device]] = None , lowerCAmelCase__ : Optional[torch.dtype] = None , ) -> List[Any]:
UpperCAmelCase = nn.Parameter(self.mean.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) )
UpperCAmelCase = nn.Parameter(self.std.to(lowerCAmelCase__ ).to(lowerCAmelCase__ ) )
return self
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Any ) -> Optional[int]:
UpperCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[str] ) -> List[Any]:
UpperCAmelCase = (embeds * self.std) + self.mean
return embeds
| 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """"""
UpperCAmelCase = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[DatasetInfo] = None , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : Union[str, Any] , ) -> Optional[Any]:
super().__init__(self , **lowerCAmelCase__ )
UpperCAmelCase = repo_info
UpperCAmelCase = token
UpperCAmelCase = None
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {"name": str(lowerCAmelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str = "rb" , **lowerCAmelCase__ : List[str] , ) -> Dict:
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : int ) -> Optional[int]:
self._get_dirs()
UpperCAmelCase = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : Any ) -> Union[str, Any]:
self._get_dirs()
UpperCAmelCase = PurePosixPath(path.strip("/" ) )
UpperCAmelCase = {}
for p, f in self.dir_cache.items():
UpperCAmelCase = PurePosixPath(p.strip("/" ) )
UpperCAmelCase = p.parent
if root == path:
UpperCAmelCase = f
UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 1 |
from math import pi, sqrt
def _lowerCAmelCase( __A ):
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(__A ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(__A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _lowerCAmelCase( ):
assert gamma(0.5 ) == sqrt(__A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = 1.0
while num:
lowerCAmelCase__ = float(input("Gamma of: "))
print(f"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 1 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCAmelCase__ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCAmelCase__ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
UpperCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCAmelCase = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ )
return score
| 1 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowerCAmelCase( *__A ):
with open(__A , "r" ) as fh:
fcntl.flock(__A , fcntl.LOCK_EX )
try:
print(*__A )
finally:
fcntl.flock(__A , fcntl.LOCK_UN )
lowerCAmelCase__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ = torch.device("cuda", local_rank)
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ = dist.get_rank()
lowerCAmelCase__ = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : List[str]=1_8 , lowerCAmelCase__ : Union[str, Any]=3_0 , lowerCAmelCase__ : List[str]=4_0_0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=True , ) -> List[Any]:
UpperCAmelCase = size if size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = apply_ocr
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCamelCase ( self : int ) -> Union[str, Any]:
UpperCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCamelCase ( self : int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self : Any ) -> str:
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "apply_ocr" ) )
def _UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _UpperCamelCase ( self : Any ) -> int:
pass
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase__ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase__ )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self : List[str] ) -> Tuple:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self : int ) -> int:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCamelCase ( self : Dict ) -> Tuple:
# with apply_OCR = True
UpperCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCAmelCase = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase__ )
self.assertListEqual(encoding.boxes , lowerCAmelCase__ )
# with apply_OCR = False
UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ )
UpperCAmelCase = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 1 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase__ = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase__ = {
"yjernite/retribert-base-uncased": 512,
}
lowerCAmelCase__ = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = RetriBertTokenizer
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Union[str, Any]="[UNK]" , lowerCAmelCase__ : Union[str, Any]="[SEP]" , lowerCAmelCase__ : Dict="[PAD]" , lowerCAmelCase__ : Any="[CLS]" , lowerCAmelCase__ : List[str]="[MASK]" , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase__ )
UpperCAmelCase = do_lower_case
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=None ) -> Optional[Any]:
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 1 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 1 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ = "path-to-your-trained-model"
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowerCAmelCase__ = "A photo of sks dog in a bucket"
lowerCAmelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 1 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """fnet"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[str]=3_2_0_0_0 , lowerCAmelCase__ : Tuple=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : int=3_0_7_2 , lowerCAmelCase__ : Optional[Any]="gelu_new" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : List[Any]=1e-1_2 , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : List[Any]=2 , **lowerCAmelCase__ : Optional[Any] , ) -> str:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_tpu_fourier_optimizations
UpperCAmelCase = tpu_short_seq_length
| 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ["""image_processor""", """tokenizer"""]
UpperCAmelCase = """CLIPImageProcessor"""
UpperCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : int , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Optional[Any] ) -> str:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
UpperCAmelCase = kwargs.pop("feature_extractor" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : List[str] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Tuple ) -> Tuple:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[Any] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : str ) -> Dict:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[str] ) -> int:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase__ , )
return self.image_processor
| 1 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase__ = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _lowerCAmelCase( __A , __A ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def _lowerCAmelCase( __A ):
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=__A )
def _lowerCAmelCase( __A , __A ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCAmelCase = tmp_path_factory.getbasetemp() / "cache"
UpperCAmelCase = test_hf_cache_home / "datasets"
UpperCAmelCase = test_hf_cache_home / "metrics"
UpperCAmelCase = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(__A ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(__A ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(__A ) )
UpperCAmelCase = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(__A ) )
UpperCAmelCase = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__A ) )
@pytest.fixture(autouse=__A , scope="session" )
def _lowerCAmelCase( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=__A )
def _lowerCAmelCase( __A ):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , __A )
@pytest.fixture
def _lowerCAmelCase( __A ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , __A )
| 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = VideoToVideoSDPipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase = False
# No `output_type`.
UpperCAmelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _UpperCamelCase ( self : str ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=0 ) -> Optional[int]:
# 3 frames
UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**lowerCAmelCase__ )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase = "np"
UpperCAmelCase = sd_pipe(**lowerCAmelCase__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : str ) -> Tuple:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _UpperCamelCase ( self : List[str] ) -> Dict:
pass
def _UpperCamelCase ( self : List[str] ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=lowerCAmelCase__ )
UpperCAmelCase = video.to("cuda" )
UpperCAmelCase = "Spiderman is surfing"
UpperCAmelCase = pipe(lowerCAmelCase__ , video=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=3 , output_type="pt" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 1 |
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 0
UpperCAmelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _lowerCAmelCase( __A , __A=() , __A=None , __A="no" , __A="29500" ):
UpperCAmelCase = False
UpperCAmelCase = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
UpperCAmelCase = True
elif "IPython" in sys.modules:
UpperCAmelCase = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
UpperCAmelCase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
UpperCAmelCase = 8
UpperCAmelCase = PrepareForLaunch(__A , distributed_type="TPU" )
print(F"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__A , args=__A , nprocs=__A , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__A )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr="127.0.01" , master_port=__A , mixed_precision=__A ):
UpperCAmelCase = PrepareForLaunch(__A , distributed_type="MULTI_GPU" )
print(F"Launching training on {num_processes} GPUs." )
try:
start_processes(__A , args=__A , nprocs=__A , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__A )
def _lowerCAmelCase( __A , __A=() , __A=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
UpperCAmelCase = PrepareForLaunch(__A , debug=__A )
start_processes(__A , args=__A , nprocs=__A , start_method="fork" )
| 1 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """xlnet"""
UpperCAmelCase = ["""mems"""]
UpperCAmelCase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , lowerCAmelCase__ : List[Any]=3_2_0_0_0 , lowerCAmelCase__ : Optional[Any]=1_0_2_4 , lowerCAmelCase__ : str=2_4 , lowerCAmelCase__ : Optional[Any]=1_6 , lowerCAmelCase__ : List[Any]=4_0_9_6 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]="bi" , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Union[str, Any]=5_1_2 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Tuple=-1 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Any="last" , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Union[str, Any]="tanh" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Dict=2 , **lowerCAmelCase__ : Tuple , ) -> Any:
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
UpperCAmelCase = d_model // n_head
UpperCAmelCase = ff_activation
UpperCAmelCase = d_inner
UpperCAmelCase = untie_r
UpperCAmelCase = attn_type
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = dropout
UpperCAmelCase = mem_len
UpperCAmelCase = reuse_len
UpperCAmelCase = bi_data
UpperCAmelCase = clamp_len
UpperCAmelCase = same_length
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_last_dropout
UpperCAmelCase = start_n_top
UpperCAmelCase = end_n_top
UpperCAmelCase = bos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , lowerCAmelCase__ , )
UpperCAmelCase = kwargs["use_cache"]
UpperCAmelCase = use_mems_eval
UpperCAmelCase = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : int ) -> Optional[int]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _lowerCAmelCase( __A ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(__A )
UpperCAmelCase = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(__A , __A )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__A , "__name__" , __A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module("transformers" )
if hasattr(__A , __A ):
return getattr(__A , __A )
return None
def _lowerCAmelCase( __A , __A = None , __A = False , __A = False , __A = None , __A = None , __A = None , __A = False , **__A , ):
UpperCAmelCase = get_file_from_repo(
__A , __A , cache_dir=__A , force_download=__A , resume_download=__A , proxies=__A , use_auth_token=__A , revision=__A , local_files_only=__A , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__A , encoding="utf-8" ) as reader:
return json.load(__A )
class __magic_name__ :
def __init__( self : Union[str, Any] ) -> str:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase__ )
def _UpperCamelCase ( cls : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any ) -> str:
UpperCAmelCase = kwargs.pop("config" , lowerCAmelCase__ )
UpperCAmelCase = kwargs.pop("trust_remote_code" , lowerCAmelCase__ )
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = config_dict.get("feature_extractor_type" , lowerCAmelCase__ )
UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
UpperCAmelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# It could be in `config.feature_extractor_type``
UpperCAmelCase = getattr(lowerCAmelCase__ , "feature_extractor_type" , lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
UpperCAmelCase = feature_extractor_class_from_name(lowerCAmelCase__ )
UpperCAmelCase = feature_extractor_auto_map is not None
UpperCAmelCase = feature_extractor_class is not None or type(lowerCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = kwargs.pop("code_revision" , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase__ )]
return feature_extractor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def _UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCAmelCase__ = "Usage of script: script_name <size_of_canvas:int>"
lowerCAmelCase__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowerCAmelCase( __A ):
UpperCAmelCase = [[False for i in range(__A )] for j in range(__A )]
return canvas
def _lowerCAmelCase( __A ):
for i, row in enumerate(__A ):
for j, _ in enumerate(__A ):
UpperCAmelCase = bool(random.getrandbits(1 ) )
def _lowerCAmelCase( __A ):
UpperCAmelCase = np.array(__A )
UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__A ):
for c, pt in enumerate(__A ):
UpperCAmelCase = __judge_point(
__A , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = 0
UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase = pt
if pt:
if alive < 2:
UpperCAmelCase = False
elif alive == 2 or alive == 3:
UpperCAmelCase = True
elif alive > 3:
UpperCAmelCase = False
else:
if alive == 3:
UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCAmelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCAmelCase__ = create_canvas(canvas_size)
seed(c)
lowerCAmelCase__, lowerCAmelCase__ = plt.subplots()
fig.show()
lowerCAmelCase__ = ListedColormap(["w", "k"])
try:
while True:
lowerCAmelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
UpperCAmelCase = ZeroShotClassificationPipeline(
model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# No kwarg
UpperCAmelCase = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
UpperCAmelCase = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
UpperCAmelCase = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
UpperCAmelCase = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
UpperCAmelCase = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(1 )
] , )
UpperCAmelCase = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
lowerCAmelCase__ , [
{"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier(lowerCAmelCase__ , candidate_labels="politics" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(lowerCAmelCase__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(lowerCAmelCase__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , )
self.run_entailment_id(lowerCAmelCase__ )
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Pipeline ) -> str:
UpperCAmelCase = zero_shot_classifier.model.config
UpperCAmelCase = config.labelaid
UpperCAmelCase = zero_shot_classifier.entailment_id
UpperCAmelCase = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase = original_labelaid
self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
UpperCAmelCase = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_0_0 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _UpperCamelCase ( self : int ) -> Tuple:
UpperCAmelCase = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
UpperCAmelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def _UpperCamelCase ( self : List[str] ) -> Tuple:
UpperCAmelCase = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
UpperCAmelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
UpperCAmelCase = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
UpperCAmelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
UpperCAmelCase = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _UpperCamelCase ( self : Any ) -> Dict:
UpperCAmelCase = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
UpperCAmelCase = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
UpperCAmelCase = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 1 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 1 |
def _lowerCAmelCase( __A = 1000 ):
UpperCAmelCase = 3
UpperCAmelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 1 |
from math import pow, sqrt
def _lowerCAmelCase( *__A ):
UpperCAmelCase = len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def _lowerCAmelCase( __A , __A ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def _lowerCAmelCase( __A , __A , __A ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 1 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 1 |
import argparse
import json
from tqdm import tqdm
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__A , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__A , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__A , help="where to store parsed gold_data_path file" , )
UpperCAmelCase = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
UpperCAmelCase = json.load(__A )
for dpr_record in tqdm(__A ):
UpperCAmelCase = dpr_record["question"]
UpperCAmelCase = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__A ) + "\n" )
if __name__ == "__main__":
main()
| 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ["""image_processor"""]
UpperCAmelCase = """SamImageProcessor"""
def __init__( self : List[Any] , lowerCAmelCase__ : int ) -> Dict:
super().__init__(lowerCAmelCase__ )
UpperCAmelCase = self.image_processor
UpperCAmelCase = -1_0
UpperCAmelCase = self.image_processor.size["longest_edge"]
def __call__( self : Dict , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> BatchEncoding:
UpperCAmelCase = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase = encoding_image_processor["original_sizes"]
if hasattr(lowerCAmelCase__ , "numpy" ): # Checks if Torch or TF tensor
UpperCAmelCase = original_sizes.numpy()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._check_and_preprocess_points(
input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , input_boxes=lowerCAmelCase__ , )
UpperCAmelCase = self._normalize_and_convert(
lowerCAmelCase__ , lowerCAmelCase__ , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , input_boxes=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , )
return encoding_image_processor
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Optional[Any]="pt" , ) -> Optional[int]:
if input_points is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , original_sizes[0] ) for point in input_points
]
else:
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , lowerCAmelCase__ )
for point, original_size in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCAmelCase , UpperCAmelCase = self._pad_points_and_labels(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.array(lowerCAmelCase__ )
if input_labels is not None:
UpperCAmelCase = np.array(lowerCAmelCase__ )
if input_boxes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , original_sizes[0] , is_bounding_box=lowerCAmelCase__ )
for box in input_boxes
]
else:
UpperCAmelCase = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , lowerCAmelCase__ , is_bounding_box=lowerCAmelCase__ )
for box, original_size in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
UpperCAmelCase = np.array(lowerCAmelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase = torch.from_numpy(lowerCAmelCase__ )
# boxes batch size of 1 by default
UpperCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase = tf.convert_to_tensor(lowerCAmelCase__ )
# boxes batch size of 1 by default
UpperCAmelCase = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase = torch.from_numpy(lowerCAmelCase__ )
# point batch size of 1 by default
UpperCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase = tf.convert_to_tensor(lowerCAmelCase__ )
# point batch size of 1 by default
UpperCAmelCase = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase = torch.from_numpy(lowerCAmelCase__ )
# point batch size of 1 by default
UpperCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase = tf.convert_to_tensor(lowerCAmelCase__ )
# point batch size of 1 by default
UpperCAmelCase = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ) -> Any:
UpperCAmelCase = max([point.shape[0] for point in input_points] )
UpperCAmelCase = []
for i, point in enumerate(lowerCAmelCase__ ):
if point.shape[0] != expected_nb_points:
UpperCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowerCAmelCase__ )
UpperCAmelCase = processed_input_points
return input_points, input_labels
def _UpperCamelCase ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=False ) -> np.ndarray:
UpperCAmelCase , UpperCAmelCase = original_size
UpperCAmelCase , UpperCAmelCase = self.image_processor._get_preprocess_shape(lowerCAmelCase__ , longest_edge=lowerCAmelCase__ )
UpperCAmelCase = deepcopy(lowerCAmelCase__ ).astype(lowerCAmelCase__ )
if is_bounding_box:
UpperCAmelCase = coords.reshape(-1 , 2 , 2 )
UpperCAmelCase = coords[..., 0] * (new_w / old_w)
UpperCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase = coords.reshape(-1 , 4 )
return coords
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[int]=None , ) -> int:
if input_points is not None:
if hasattr(lowerCAmelCase__ , "numpy" ): # Checks for TF or Torch tensor
UpperCAmelCase = input_points.numpy().tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(input_points[0] , lowerCAmelCase__ ):
raise ValueError("Input points must be a list of list of floating points." )
UpperCAmelCase = [np.array(lowerCAmelCase__ ) for input_point in input_points]
else:
UpperCAmelCase = None
if input_labels is not None:
if hasattr(lowerCAmelCase__ , "numpy" ):
UpperCAmelCase = input_labels.numpy().tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(input_labels[0] , lowerCAmelCase__ ):
raise ValueError("Input labels must be a list of list integers." )
UpperCAmelCase = [np.array(lowerCAmelCase__ ) for label in input_labels]
else:
UpperCAmelCase = None
if input_boxes is not None:
if hasattr(lowerCAmelCase__ , "numpy" ):
UpperCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
or not isinstance(input_boxes[0] , lowerCAmelCase__ )
or not isinstance(input_boxes[0][0] , lowerCAmelCase__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
UpperCAmelCase = [np.array(lowerCAmelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCAmelCase = None
return input_points, input_labels, input_boxes
@property
def _UpperCamelCase ( self : Optional[Any] ) -> int:
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[str] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> List[Any]:
return self.image_processor.post_process_masks(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 1 |
lowerCAmelCase__ = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["MobileViTFeatureExtractor"]
lowerCAmelCase__ = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = IFImgaImgSuperResolutionPipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=0 ) -> List[str]:
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _UpperCamelCase ( self : str ) -> Optional[int]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCamelCase ( self : str ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _UpperCamelCase ( self : Any ) -> Tuple:
self._test_save_load_local()
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 1 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] )
def _lowerCAmelCase( __A , __A , __A ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , __A )
UpperCAmelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase = dataset_size < in_memory_max_size
else:
UpperCAmelCase = False
UpperCAmelCase = is_small_dataset(__A )
assert result == expected
| 1 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __magic_name__ ( _snake_case ):
def __init__( self : Any , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Union[str, Any] ) -> None:
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 1 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = "▁"
lowerCAmelCase__ = {"vocab_file": "spiece.model"}
lowerCAmelCase__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
lowerCAmelCase__ = {
"google/pegasus-xsum": 512,
}
lowerCAmelCase__ = logging.get_logger(__name__)
class __magic_name__ ( _snake_case ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Tuple="<mask_2>" , lowerCAmelCase__ : int="<mask_1>" , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[Any]=1_0_3 , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is"
f" {type(lowerCAmelCase__ )}" )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
UpperCAmelCase = mask_token_sent
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
UpperCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return len(self.sp_model ) + self.offset
def _UpperCamelCase ( self : str ) -> Dict[str, int]:
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : List[Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : str ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Optional[int] ) -> str:
UpperCAmelCase = []
UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : int=False ) -> int:
return 1
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Any ) -> str:
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _UpperCamelCase ( self : int , lowerCAmelCase__ : List , lowerCAmelCase__ : Optional[List] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 1 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
from functools import reduce
lowerCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _lowerCAmelCase( __A = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __A , __A : str(int(__A ) * int(__A ) ) , n[i : i + 13] ) )
for i in range(len(__A ) - 12 ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[Any]=1_0 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Dict=3_2 * 4 , lowerCAmelCase__ : Any=3_2 * 6 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : List[str]=3_2 , ) -> List[Any]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = is_training
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = num_queries
UpperCAmelCase = num_channels
UpperCAmelCase = min_size
UpperCAmelCase = max_size
UpperCAmelCase = num_labels
UpperCAmelCase = mask_feature_size
def _UpperCamelCase ( self : List[Any] ) -> str:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> int:
UpperCAmelCase = output.encoder_hidden_states
UpperCAmelCase = output.pixel_decoder_hidden_states
UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_config.decoder_layers )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple=False ) -> Union[str, Any]:
with torch.no_grad():
UpperCAmelCase = MaskFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
UpperCAmelCase = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> Any:
UpperCAmelCase = MaskFormerForInstanceSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
UpperCAmelCase = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
UpperCAmelCase = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : int ) -> Optional[Any]:
UpperCAmelCase = MaskFormerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _UpperCamelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _UpperCamelCase ( self : str ) -> Tuple:
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _UpperCamelCase ( self : List[Any] ) -> Dict:
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase ( self : str ) -> Dict:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
def _UpperCamelCase ( self : int ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase = MaskFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _UpperCamelCase ( self : int ) -> List[Any]:
UpperCAmelCase = (self.model_tester.min_size,) * 2
UpperCAmelCase = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
"mask_labels": torch.randn((2, 1_0, *size) , device=lowerCAmelCase__ ),
"class_labels": torch.zeros(2 , 1_0 , device=lowerCAmelCase__ ).long(),
}
UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase__ )
UpperCAmelCase = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase ( self : Any ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
UpperCAmelCase = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase ( self : int ) -> List[Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
UpperCAmelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def _UpperCamelCase ( self : int ) -> Dict:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
UpperCAmelCase = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1e-4
def _lowerCAmelCase( ):
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(lowerCAmelCase__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase__ )
UpperCAmelCase = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
UpperCAmelCase = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
UpperCAmelCase = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCAmelCase__ )
.eval()
)
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase__ )
# masks_queries_logits
UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
UpperCAmelCase = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(lowerCAmelCase__ )
.eval()
)
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
UpperCAmelCase = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase__ )
# masks_queries_logits
UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
UpperCAmelCase = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCAmelCase__ )
.eval()
)
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCAmelCase = inputs["pixel_values"].to(lowerCAmelCase__ )
UpperCAmelCase = [el.to(lowerCAmelCase__ ) for el in inputs["mask_labels"]]
UpperCAmelCase = [el.to(lowerCAmelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 1 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _snake_case , _snake_case ):
@register_to_config
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None ) -> Optional[int]:
super().__init__()
UpperCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ )
else:
UpperCAmelCase = None
UpperCAmelCase = torch.nn.Parameter(lowerCAmelCase__ )
class __magic_name__ ( _snake_case ):
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
def __init__( self : str , lowerCAmelCase__ : VQModel , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : TransformeraDModel , lowerCAmelCase__ : VQDiffusionScheduler , lowerCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings , ) -> int:
super().__init__()
self.register_modules(
vqvae=lowerCAmelCase__ , transformer=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Union[str, Any]:
UpperCAmelCase = len(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else 1
# get prompt text embeddings
UpperCAmelCase = self.tokenizer(
lowerCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase = prompt_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCAmelCase__ , 1 , 1 )
else:
UpperCAmelCase = [""] * batch_size
UpperCAmelCase = text_input_ids.shape[-1]
UpperCAmelCase = self.tokenizer(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" , )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase = negative_prompt_embeds.shape[1]
UpperCAmelCase = negative_prompt_embeds.repeat(1 , lowerCAmelCase__ , 1 )
UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 5.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = len(lowerCAmelCase__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}" )
UpperCAmelCase = batch_size * num_images_per_prompt
UpperCAmelCase = guidance_scale > 1.0
UpperCAmelCase = self._encode_prompt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(lowerCAmelCase__ )}." )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase = self.transformer.num_vector_embeds - 1
UpperCAmelCase = torch.full(lowerCAmelCase__ , lowerCAmelCase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
UpperCAmelCase = self.scheduler.timesteps.to(self.device )
UpperCAmelCase = latents
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase = self.transformer(lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ ).sample
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = model_output.chunk(2 )
UpperCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCAmelCase__ , dim=1 , keepdim=lowerCAmelCase__ )
UpperCAmelCase = self.truncate(lowerCAmelCase__ , lowerCAmelCase__ )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = self.vqvae.config.vq_embed_dim
UpperCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase = self.vqvae.quantize.get_codebook_entry(lowerCAmelCase__ , shape=lowerCAmelCase__ )
UpperCAmelCase = self.vqvae.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : float ) -> torch.FloatTensor:
UpperCAmelCase , UpperCAmelCase = torch.sort(lowerCAmelCase__ , 1 , descending=lowerCAmelCase__ )
UpperCAmelCase = torch.exp(lowerCAmelCase__ )
UpperCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase__ )
UpperCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase = keep_mask[:, :-1, :]
UpperCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase = log_p_x_0.clone()
UpperCAmelCase = -torch.inf # -inf = log(0)
return rv
| 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures/dummy-config.json")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase = 0
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
UpperCAmelCase = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> int:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : str ) -> Any:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : int ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(lowerCAmelCase__ , "fake-roberta" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("model" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("bert" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase = AutoConfig.from_pretrained("bert-base" )
def _UpperCamelCase ( self : int ) -> int:
with self.assertRaisesRegex(
lowerCAmelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _UpperCamelCase ( self : str ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _UpperCamelCase ( self : Tuple ) -> Tuple:
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """new-model"""
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 1 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
UpperCAmelCase = hex_num[0] == "-"
if is_negative:
UpperCAmelCase = hex_num[1:]
try:
UpperCAmelCase = int(__A , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
UpperCAmelCase = ""
while int_num > 0:
UpperCAmelCase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ["""image_processor""", """tokenizer"""]
UpperCAmelCase = """CLIPImageProcessor"""
UpperCAmelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : int , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
UpperCAmelCase = kwargs.pop("feature_extractor" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : Optional[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : List[str] ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[Any] , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[Any] ) -> Any:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[Any] , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : str ) -> Union[str, Any]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __magic_name__ ( _snake_case ):
UpperCAmelCase = ["""image_processor""", """tokenizer"""]
UpperCAmelCase = """AutoImageProcessor"""
UpperCAmelCase = """AutoTokenizer"""
def __init__( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : int ) -> int:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _UpperCamelCase ( self : Tuple , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[str] ) -> int:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self : int , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[Any] ) -> Dict:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _UpperCamelCase ( self : List[str] ) -> Any:
return ["input_ids", "attention_mask", "pixel_values"]
| 1 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 1 |
from __future__ import annotations
class __magic_name__ :
def __init__( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Tuple:
UpperCAmelCase , UpperCAmelCase = text, pattern
UpperCAmelCase , UpperCAmelCase = len(lowerCAmelCase__ ), len(lowerCAmelCase__ )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCamelCase ( self : int , lowerCAmelCase__ : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCamelCase ( self : List[Any] ) -> list[int]:
# searches pattern in text and returns index positions
UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase = self.mismatch_in_text(lowerCAmelCase__ )
if mismatch_index == -1:
positions.append(lowerCAmelCase__ )
else:
UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ = "ABAABA"
lowerCAmelCase__ = "AB"
lowerCAmelCase__ = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 1 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCAmelCase( __A=2 , __A=3 , __A=16 , __A = 10 , __A = 2 ):
def get_dataset(__A ):
UpperCAmelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__A , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase = get_dataset(__A )
UpperCAmelCase = get_dataset(__A )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCAmelCase( __A , __A , __A , __A , __A , __A=None ):
UpperCAmelCase = []
for epoch in range(__A ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase = batch
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.mse_loss(__A , __A )
accelerator.backward(__A )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module ):
def __init__( self : Union[str, Any] ) -> List[Any]:
super().__init__()
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase__ , automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
UpperCAmelCase = Accelerator(project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
# Train baseline
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
UpperCAmelCase = os.path.join(lowerCAmelCase__ , "initial" )
accelerator.save_state(lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
UpperCAmelCase = os.path.join(lowerCAmelCase__ , "checkpoint" )
accelerator.save_state(lowerCAmelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase__ )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase__ )
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_0" ) )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
UpperCAmelCase = torch.tensor([1, 2, 3] )
UpperCAmelCase = torch.tensor([2, 3, 4] )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(net.parameters() )
UpperCAmelCase = Accelerator()
with self.assertRaises(lowerCAmelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _UpperCamelCase ( self : Tuple ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase = torch.optim.lr_scheduler.StepLR(lowerCAmelCase__ , step_size=1 , gamma=0.99 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
UpperCAmelCase = scheduler.state_dict()
train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(lowerCAmelCase__ , scheduler.state_dict() )
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ , total_limit=2 )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
UpperCAmelCase = accelerator.prepare(lowerCAmelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def _UpperCamelCase ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = "/tmp/accelerate/state_checkpointing"
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowerCAmelCase__ = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowerCAmelCase__ = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 1 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = False, False, False
@dataclass
class __magic_name__ :
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
UpperCAmelCase = field(default="""Audio""" , init=_snake_case , repr=_snake_case )
def __call__( self : List[str] ) -> List[Any]:
return self.pa_type
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"bytes": None, "path": value}
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase = BytesIO()
sf.write(lowerCAmelCase__ , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase = BytesIO(bytes() )
sf.write(lowerCAmelCase__ , lowerCAmelCase__ , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : dict , lowerCAmelCase__ : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
UpperCAmelCase , UpperCAmelCase = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
UpperCAmelCase = xsplitext(lowerCAmelCase__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
UpperCAmelCase = token_per_repo_id or {}
UpperCAmelCase = path.split("::" )[-1]
try:
UpperCAmelCase = string_to_dict(lowerCAmelCase__ , config.HUB_DATASETS_URL )["repo_id"]
UpperCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase = None
with xopen(lowerCAmelCase__ , "rb" , use_auth_token=lowerCAmelCase__ ) as f:
UpperCAmelCase , UpperCAmelCase = sf.read(lowerCAmelCase__ )
else:
UpperCAmelCase , UpperCAmelCase = sf.read(lowerCAmelCase__ )
UpperCAmelCase = array.T
if self.mono:
UpperCAmelCase = librosa.to_mono(lowerCAmelCase__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase = librosa.resample(lowerCAmelCase__ , orig_sr=lowerCAmelCase__ , target_sr=self.sampling_rate )
UpperCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCamelCase ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
UpperCAmelCase = pa.array([Audio().encode_example(lowerCAmelCase__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCAmelCase = storage.field("bytes" )
else:
UpperCAmelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCAmelCase = storage.field("path" )
else:
UpperCAmelCase = pa.array([None] * len(lowerCAmelCase__ ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase__ : List[Any] ):
with xopen(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase = f.read()
return bytes_
UpperCAmelCase = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase = pa.array(
[os.path.basename(lowerCAmelCase__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase__ , self.pa_type )
| 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase__ = Mapping[str, np.ndarray]
lowerCAmelCase__ = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase__ = 0.0_1
@dataclasses.dataclass(frozen=_snake_case )
class __magic_name__ :
UpperCAmelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase = None
# Chain corresponding to each parent
UpperCAmelCase = None
def _lowerCAmelCase( __A ):
UpperCAmelCase = r"(\[[A-Z]+\]\n)"
UpperCAmelCase = [tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
UpperCAmelCase = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
UpperCAmelCase = ["N", "CA", "C"]
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCAmelCase = g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
UpperCAmelCase = "X" # FIXME: strings are immutable
UpperCAmelCase = np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCAmelCase = []
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
UpperCAmelCase = np.array(__A )
UpperCAmelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCAmelCase = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
UpperCAmelCase = np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def _lowerCAmelCase( __A , __A = 0 ):
UpperCAmelCase = []
UpperCAmelCase = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
UpperCAmelCase = prot.parents
UpperCAmelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCAmelCase = [p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
UpperCAmelCase = ["N/A"]
pdb_headers.append(F"PARENT {' '.join(__A )}" )
return pdb_headers
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = pdb_str.split("\n" )
UpperCAmelCase = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
UpperCAmelCase = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCAmelCase = []
if prot.parents_chain_index is not None:
UpperCAmelCase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
UpperCAmelCase = max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCAmelCase = parent_dict.get(str(__A ) , ["N/A"] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCAmelCase = [["N/A"]]
def make_parent_line(__A ) -> str:
return F"PARENT {' '.join(__A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCAmelCase = 0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
UpperCAmelCase = parents_per_chain[chain_counter]
else:
UpperCAmelCase = ["N/A"]
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = residue_constants.restypes + ["X"]
def res_atoa(__A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
UpperCAmelCase = residue_constants.atom_types
UpperCAmelCase = []
UpperCAmelCase = prot.atom_mask
UpperCAmelCase = prot.aatype
UpperCAmelCase = prot.atom_positions
UpperCAmelCase = prot.residue_index.astype(np.intaa )
UpperCAmelCase = prot.b_factors
UpperCAmelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
UpperCAmelCase = get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
UpperCAmelCase = aatype.shape[0]
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = string.ascii_uppercase
UpperCAmelCase = None
# Add all atom sites.
for i in range(__A ):
UpperCAmelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCAmelCase = "ATOM"
UpperCAmelCase = atom_name if len(__A ) == 4 else F" {atom_name}"
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = 1.00
UpperCAmelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCAmelCase = ""
UpperCAmelCase = "A"
if chain_index is not None:
UpperCAmelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCAmelCase = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(__A )
atom_index += 1
UpperCAmelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCAmelCase = True
UpperCAmelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCAmelCase = "TER"
UpperCAmelCase = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__A )
def _lowerCAmelCase( __A ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase( __A , __A , __A = None , __A = None , __A = None , __A = None , __A = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 1 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 1 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while number > 0:
UpperCAmelCase = number % 10
sum_of_digits += last_digit
UpperCAmelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = factorial(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = split_and_add(SCREAMING_SNAKE_CASE_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
def _lowerCAmelCase( __A ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class __magic_name__ ( _SCREAMING_SNAKE_CASE ):
UpperCAmelCase = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCAmelCase = Features({"""text""": Value("""string""" )} )
UpperCAmelCase = Features({} )
UpperCAmelCase = "text"
@property
def _UpperCamelCase ( self : List[str] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __magic_name__ ( _snake_case ):
def __init__( self : Tuple , **lowerCAmelCase__ : List[str] ) -> int:
super().__init__(**UpperCamelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , lowerCAmelCase__ : Union[str, List[str], "Image", List["Image"]] , **lowerCAmelCase__ : Tuple ) -> List[Any]:
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def _UpperCamelCase ( self : List[Any] , **lowerCAmelCase__ : str ) -> List[str]:
UpperCAmelCase = {}
if "candidate_labels" in kwargs:
UpperCAmelCase = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
UpperCAmelCase = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="This is a photo of {}." ) -> Union[str, Any]:
UpperCAmelCase = load_image(UpperCamelCase_ )
UpperCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase = candidate_labels
UpperCAmelCase = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
UpperCAmelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
UpperCAmelCase = [text_inputs]
return inputs
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = model_inputs.pop("candidate_labels" )
UpperCAmelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
UpperCAmelCase = text_inputs[0]
else:
# Batching case.
UpperCAmelCase = text_inputs[0][0]
UpperCAmelCase = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : int ) -> List[str]:
UpperCAmelCase = model_outputs.pop("candidate_labels" )
UpperCAmelCase = model_outputs['logits'][0]
if self.framework == "pt":
UpperCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase = [scores]
elif self.framework == "tf":
UpperCAmelCase = stable_softmax(UpperCamelCase_ , axis=-1 )
UpperCAmelCase = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowercase__ , unittest.TestCase ):
UpperCAmelCase = FunnelTokenizer
UpperCAmelCase = FunnelTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def _UpperCamelCase ( self : Dict ) -> Dict:
super().setUp()
UpperCAmelCase = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _UpperCamelCase ( self : List[Any] , **lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _UpperCamelCase ( self : Any , **lowerCAmelCase__ : Optional[int] ) -> Dict:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = "UNwant\u00E9d,running"
UpperCAmelCase = "unwanted, running"
return input_text, output_text
def _UpperCamelCase ( self : List[str] ) -> Any:
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [7, 4, 5, 1_0, 8, 9] )
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
UpperCAmelCase = tokenizer("UNwant\u00E9d,running" )
UpperCAmelCase = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCAmelCase = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 704 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 0 |
from torch import nn
def _lowerCAmelCase( __A ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 705 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 0 |
lowerCAmelCase__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 706 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
__A , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
UpperCAmelCase = torch.load(hf_hub_download(repo_id=__A , filename="pytorch_model.bin" ) )
UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
UpperCAmelCase = '''roberta_prelayernorm.''' + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
UpperCAmelCase = tensor_value
UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__A , config=__A , state_dict=__A )
model.save_pretrained(__A )
# convert tokenizer
UpperCAmelCase = AutoTokenizer.from_pretrained(__A )
tokenizer.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |
def _lowerCAmelCase( __A : int | float | str ):
try:
UpperCAmelCase = float(__lowerCAmelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCAmelCase = decimal - int(__lowerCAmelCase )
if fractional_part == 0:
return int(__lowerCAmelCase ), 1
else:
UpperCAmelCase = len(str(__lowerCAmelCase ).split("." )[1] )
UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase = 10**number_of_frac_digits
UpperCAmelCase , UpperCAmelCase = denominator, numerator
while True:
UpperCAmelCase = dividend % divisor
if remainder == 0:
break
UpperCAmelCase , UpperCAmelCase = divisor, remainder
UpperCAmelCase , UpperCAmelCase = numerator / divisor, denominator / divisor
return int(__lowerCAmelCase ), int(__lowerCAmelCase )
if __name__ == "__main__":
print(f"{decimal_to_fraction(2) = }")
print(f"{decimal_to_fraction(8_9.0) = }")
print(f"{decimal_to_fraction('67') = }")
print(f"{decimal_to_fraction('45.0') = }")
print(f"{decimal_to_fraction(1.5) = }")
print(f"{decimal_to_fraction('6.25') = }")
print(f"{decimal_to_fraction('78td') = }")
| 708 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import sys
from collections import defaultdict
class __magic_name__ :
def __init__( self : Optional[int] ) -> Tuple:
UpperCAmelCase = []
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : int ) -> Dict:
return self.node_position[vertex]
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ) -> Dict:
UpperCAmelCase = pos
def _UpperCamelCase ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ) -> List[str]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase = 2 * start + 1
else:
UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase = heap[smallest_child], positions[smallest_child]
UpperCAmelCase = (
heap[start],
positions[start],
)
UpperCAmelCase = temp, tempa
UpperCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase_ )
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase = position[index]
while index != 0:
UpperCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase = heap[parent]
UpperCAmelCase = position[parent]
self.set_position(position[parent] , UpperCamelCase_ )
else:
UpperCAmelCase = val
UpperCAmelCase = temp
self.set_position(UpperCamelCase_ , UpperCamelCase_ )
break
UpperCAmelCase = parent
else:
UpperCAmelCase = val
UpperCAmelCase = temp
self.set_position(UpperCamelCase_ , 0 )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
UpperCAmelCase = len(UpperCamelCase_ ) // 2 - 1
for i in range(UpperCamelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ) -> int:
UpperCAmelCase = positions[0]
UpperCAmelCase = sys.maxsize
self.top_to_bottom(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) , UpperCamelCase_ )
return temp
def _lowerCAmelCase( __A ):
UpperCAmelCase = Heap()
UpperCAmelCase = [0] * len(lowerCamelCase__ )
UpperCAmelCase = [-1] * len(lowerCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase = []
for vertex in range(len(lowerCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCamelCase__ )
heap.node_position.append(lowerCamelCase__ )
UpperCAmelCase = []
UpperCAmelCase = 1
UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase = 0
UpperCAmelCase = distance
heap.heapify(lowerCamelCase__ , lowerCamelCase__ )
for _ in range(1 , len(lowerCamelCase__ ) ):
UpperCAmelCase = heap.delete_minimum(lowerCamelCase__ , lowerCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCamelCase__ )]
):
UpperCAmelCase = distance
heap.bottom_to_top(
lowerCamelCase__ , heap.get_position(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCAmelCase__ = int(input("Enter number of edges: ").strip())
lowerCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
lowerCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 709 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase__ = random.Random()
if is_torch_available():
import torch
def _lowerCAmelCase( __A , __A=1.0 , __A=None , __A=None ):
if rng is None:
UpperCAmelCase = global_rng
UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=7 , lowerCAmelCase__ : Any=4_0_0 , lowerCAmelCase__ : Tuple=2_0_0_0 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Union[str, Any]=1_6_0_0_0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]=True , ) -> Union[str, Any]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = min_seq_length
UpperCAmelCase = max_seq_length
UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase = feature_size
UpperCAmelCase = padding_value
UpperCAmelCase = sampling_rate
UpperCAmelCase = return_attention_mask
UpperCAmelCase = do_normalize
def _UpperCamelCase ( self : Any ) -> Tuple:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Any=False ) -> Dict:
def _flatten(lowerCAmelCase__ : Union[str, Any] ):
return list(itertools.chain(*__A ) )
if equal_length:
UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = ASTFeatureExtractor
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
UpperCAmelCase = ASTFeatureExtractionTester(self )
def _UpperCamelCase ( self : Tuple ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase = [np.asarray(__A ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test batched
UpperCAmelCase = feat_extract(__A , padding=__A , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(__A , padding=__A , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase = np.asarray(__A )
UpperCAmelCase = feat_extract(__A , return_tensors="np" ).input_values
UpperCAmelCase = feat_extract(__A , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
import torch
UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> int:
from datasets import load_dataset
UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def _UpperCamelCase ( self : List[str] ) -> List[Any]:
# fmt: off
UpperCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
UpperCAmelCase = self._load_datasamples(1 )
UpperCAmelCase = ASTFeatureExtractor()
UpperCAmelCase = feature_extractor(__A , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __A , atol=1e-4 ) )
| 710 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCAmelCase__ = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """xlm-roberta-xl"""
def __init__( self : Any , lowerCAmelCase__ : Optional[int]=2_5_0_8_8_0 , lowerCAmelCase__ : int=2_5_6_0 , lowerCAmelCase__ : Optional[Any]=3_6 , lowerCAmelCase__ : List[Any]=3_2 , lowerCAmelCase__ : Optional[int]=1_0_2_4_0 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : str=5_1_4 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Tuple=1e-0_5 , lowerCAmelCase__ : Union[str, Any]=1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Optional[int]="absolute" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : Tuple , ) -> Any:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class __magic_name__ ( _snake_case ):
@property
def _UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 713 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( __lowerCAmelCase ):
UpperCAmelCase = (EulerDiscreteScheduler,)
UpperCAmelCase = 10
def _UpperCamelCase ( self : Tuple , **lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase )
return config
def _UpperCamelCase ( self : List[str] ) -> List[str]:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Dict:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_131 ) < 1e-3
def _UpperCamelCase ( self : int ) -> Tuple:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(_UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1e-3
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _UpperCamelCase ( self : str ) -> List[Any]:
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self : Dict ) -> str:
UpperCAmelCase = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ = (accelerator.state.process_index + 2, 10)
lowerCAmelCase__ = torch.randint(0, 10, shape).to(accelerator.device)
lowerCAmelCase__ = ""
lowerCAmelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCAmelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCAmelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 715 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __magic_name__ :
def __init__( self : Dict , lowerCAmelCase__ : List[str] , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = 1_3
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 9_9
UpperCAmelCase = 3_2
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 3_7
UpperCAmelCase = "gelu"
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 5_1_2
UpperCAmelCase = 1_6
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = None
def _UpperCamelCase ( self : int ) -> Any:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : int ) -> List[Any]:
(
UpperCAmelCase
) = self.prepare_config_and_inputs()
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = TFEsmModel(config=__lowerCamelCase )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase = model(__lowerCamelCase )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , ) -> List[Any]:
UpperCAmelCase = True
UpperCAmelCase = TFEsmModel(config=__lowerCamelCase )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCAmelCase = model(__lowerCamelCase )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(__lowerCamelCase , encoder_hidden_states=__lowerCamelCase )
# Also check the case where encoder outputs are not passed
UpperCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> Dict:
UpperCAmelCase = TFEsmForMaskedLM(config=__lowerCamelCase )
UpperCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFEsmForTokenClassification(config=__lowerCamelCase )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Dict ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
(
UpperCAmelCase
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _A , _A , unittest.TestCase ):
UpperCAmelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : int ) -> Optional[Any]:
UpperCAmelCase = TFEsmModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def _UpperCamelCase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : int ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def _UpperCamelCase ( self : int ) -> str:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def _UpperCamelCase ( self : str ) -> int:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFEsmModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _UpperCamelCase ( self : List[Any] ) -> str:
pass
def _UpperCamelCase ( self : List[Any] ) -> str:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase = model.get_bias()
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for k, v in name.items():
assert isinstance(__lowerCamelCase , tf.Variable )
else:
UpperCAmelCase = model.get_output_embeddings()
assert x is None
UpperCAmelCase = model.get_bias()
assert name is None
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(__lowerCamelCase )[0]
UpperCAmelCase = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCamelCase )
# compare the actual values for a slice.
UpperCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _UpperCamelCase ( self : Optional[int] ) -> int:
UpperCAmelCase = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
UpperCAmelCase = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
UpperCAmelCase = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
UpperCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 716 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> str:
UpperCAmelCase = 1_0
def _UpperCamelCase ( self : List[Any] ) -> Any:
UpperCAmelCase = [1, 2, 3, 4]
UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def _UpperCamelCase ( self : Tuple ) -> Dict:
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ )
def _UpperCamelCase ( self : int ) -> Optional[int]:
UpperCAmelCase = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = ''''''
UpperCAmelCase = process_story(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [] )
self.assertEqual(lowerCamelCase_ , [] )
def _UpperCamelCase ( self : Tuple ) -> Tuple:
UpperCAmelCase = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase = process_story(lowerCamelCase_ )
UpperCAmelCase = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = ['''It was the best of times.''']
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 0 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self : Tuple ) -> Tuple:
UpperCAmelCase = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 2_3 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 1 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self : Optional[int] ) -> str:
UpperCAmelCase = 1_0_1
UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase = compute_token_type_ids(lowerCamelCase_ , lowerCamelCase_ )
np.testing.assert_array_equal(lowerCamelCase_ , lowerCamelCase_ )
| 717 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
super().__init__()
UpperCAmelCase = model
UpperCAmelCase = 2
UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
pass
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = LongformerModel.from_pretrained(_A )
UpperCAmelCase = LightningModel(_A )
UpperCAmelCase = torch.load(_A , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(_A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_A )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 718 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __magic_name__ ( _snake_case , _snake_case ):
UpperCAmelCase = """convnextv2"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : str=2_2_4 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = num_stages
UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCAmelCase = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_path_rate
UpperCAmelCase = image_size
UpperCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 1 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase( __A , __A , __A=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase( __A , __A=512 , __A=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert("RGB" ) )
UpperCAmelCase = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase = np.transpose(__A , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(__A ).unsqueeze(0 )
return image
class __magic_name__ ( _a ):
def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ) -> Union[str, Any]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , snake_case_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=None ) -> str:
if not isinstance(snake_case_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case_ )}" )
UpperCAmelCase = image.to(device=snake_case_ , dtype=snake_case_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case_ )
]
UpperCAmelCase = torch.cat(snake_case_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(snake_case_ ).latent_dist.sample(snake_case_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase = init_latents
return latents
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def _UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : str=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] = 5_1_2 , lowerCAmelCase__ : Any = 5_1_2 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : int = 4.0 , lowerCAmelCase__ : List[str] = 0.3 , lowerCAmelCase__ : Tuple = 1 , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Union[str, Any] = "pil" , lowerCAmelCase__ : Tuple = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase = torch.cat(snake_case_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase = [image]
if not all(isinstance(snake_case_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(snake_case_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase = torch.cat([prepare_image(snake_case_ , snake_case_ , snake_case_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=snake_case_ )
UpperCAmelCase = self.movq.encode(snake_case_ )["latents"]
UpperCAmelCase = latents.repeat_interleave(snake_case_ , dim=0 )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , image_embeds.dtype , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {"image_embeds": image_embeds}
UpperCAmelCase = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 719 |
lowerCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 1 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase = False
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return 3_2
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return 3_2
@property
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Tuple ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Any ) -> Optional[int]:
return 1_0_0
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase = MultilingualCLIP(lowerCAmelCase__ )
UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def _UpperCamelCase ( self : str ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCAmelCase__ , )
UpperCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=0 ) -> str:
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase = 0
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase = "cpu"
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCamelCase ( self : str ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : str ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Tuple ) -> int:
UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase = 0
UpperCAmelCase = "a hat"
UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="np" , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 1 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __magic_name__ ( _lowercase ):
def __get__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=None ) -> Dict:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
UpperCAmelCase = "__cached_" + self.fget.__name__
UpperCAmelCase = getattr(A_ , A_ , A_ )
if cached is None:
UpperCAmelCase = self.fget(A_ )
setattr(A_ , A_ , A_ )
return cached
def _lowerCAmelCase( __A ):
UpperCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"invalid truth value {val!r}" )
def _lowerCAmelCase( __A ):
if is_torch_fx_proxy(snake_case__ ):
return True
if is_torch_available():
import torch
if isinstance(snake_case__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case__ , np.ndarray )
def _lowerCAmelCase( __A ):
return isinstance(snake_case__ , np.ndarray )
def _lowerCAmelCase( __A ):
return _is_numpy(snake_case__ )
def _lowerCAmelCase( __A ):
import torch
return isinstance(snake_case__ , torch.Tensor )
def _lowerCAmelCase( __A ):
return False if not is_torch_available() else _is_torch(snake_case__ )
def _lowerCAmelCase( __A ):
import torch
return isinstance(snake_case__ , torch.device )
def _lowerCAmelCase( __A ):
return False if not is_torch_available() else _is_torch_device(snake_case__ )
def _lowerCAmelCase( __A ):
import torch
if isinstance(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , snake_case__ ):
UpperCAmelCase = getattr(snake_case__ , snake_case__ )
else:
return False
return isinstance(snake_case__ , torch.dtype )
def _lowerCAmelCase( __A ):
return False if not is_torch_available() else _is_torch_dtype(snake_case__ )
def _lowerCAmelCase( __A ):
import tensorflow as tf
return isinstance(snake_case__ , tf.Tensor )
def _lowerCAmelCase( __A ):
return False if not is_tf_available() else _is_tensorflow(snake_case__ )
def _lowerCAmelCase( __A ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(snake_case__ )
return type(snake_case__ ) == tf.Tensor
def _lowerCAmelCase( __A ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case__ )
def _lowerCAmelCase( __A ):
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case__ , jnp.ndarray )
def _lowerCAmelCase( __A ):
return False if not is_flax_available() else _is_jax(snake_case__ )
def _lowerCAmelCase( __A ):
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_py_obj(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return [to_py_obj(snake_case__ ) for o in obj]
elif is_tf_tensor(snake_case__ ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ ).tolist()
elif isinstance(snake_case__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase( __A ):
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_numpy(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return np.array(snake_case__ )
elif is_tf_tensor(snake_case__ ):
return obj.numpy()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ )
else:
return obj
class __magic_name__ ( _lowercase ):
def _UpperCamelCase ( self : str ) -> str:
UpperCAmelCase = fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
UpperCAmelCase = getattr(self , class_fields[0].name )
UpperCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ , A_ ):
UpperCAmelCase = first_field.items()
UpperCAmelCase = True
else:
try:
UpperCAmelCase = iter(A_ )
UpperCAmelCase = True
except TypeError:
UpperCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ , (list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] , A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase = element[1]
elif first_field is not None:
UpperCAmelCase = first_field
else:
for field in class_fields:
UpperCAmelCase = getattr(self , field.name )
if v is not None:
UpperCAmelCase = v
def __delitem__( self : Optional[int] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Dict ) -> Tuple:
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def _UpperCamelCase ( self : int , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ) -> Dict:
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def _UpperCamelCase ( self : List[Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Dict ) -> Dict:
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def _UpperCamelCase ( self : Tuple , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Dict ) -> Dict:
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : int , lowerCAmelCase__ : Any ) -> str:
if isinstance(A_ , A_ ):
UpperCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ , A_ )
super().__setattr__(A_ , A_ )
def __setitem__( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> int:
# Will raise a KeyException if needed
super().__setitem__(A_ , A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ , A_ )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __magic_name__ ( _lowercase , _lowercase ):
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , lowerCAmelCase__ : int ) -> Tuple:
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class __magic_name__ ( _lowercase ):
UpperCAmelCase = '''longest'''
UpperCAmelCase = '''max_length'''
UpperCAmelCase = '''do_not_pad'''
class __magic_name__ ( _lowercase ):
UpperCAmelCase = '''pt'''
UpperCAmelCase = '''tf'''
UpperCAmelCase = '''np'''
UpperCAmelCase = '''jax'''
class __magic_name__ :
def __init__( self : int , lowerCAmelCase__ : List[ContextManager] ) -> Any:
UpperCAmelCase = context_managers
UpperCAmelCase = ExitStack()
def __enter__( self : Tuple ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self : int , *lowerCAmelCase__ : str , **lowerCAmelCase__ : str ) -> Any:
self.stack.__exit__(*A_ , **A_ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = infer_framework(snake_case__ )
if framework == "tf":
UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase( __A ):
UpperCAmelCase = model_class.__name__
UpperCAmelCase = infer_framework(snake_case__ )
if framework == "tf":
UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase( __A , __A = "" , __A = "." ):
def _flatten_dict(__A , __A="" , __A="." ):
for k, v in d.items():
UpperCAmelCase = str(snake_case__ ) + delimiter + str(snake_case__ ) if parent_key else k
if v and isinstance(snake_case__ , snake_case__ ):
yield from flatten_dict(snake_case__ , snake_case__ , delimiter=snake_case__ ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case__ , snake_case__ , snake_case__ ) )
@contextmanager
def _lowerCAmelCase( __A , __A = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase( __A , __A=None ):
if is_numpy_array(snake_case__ ):
return np.transpose(snake_case__ , axes=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.T if axes is None else array.permute(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.transpose(snake_case__ , perm=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.transpose(snake_case__ , axes=snake_case__ )
else:
raise ValueError(F"Type not supported for transpose: {type(snake_case__ )}." )
def _lowerCAmelCase( __A , __A ):
if is_numpy_array(snake_case__ ):
return np.reshape(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.reshape(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.reshape(snake_case__ , snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.reshape(snake_case__ , snake_case__ )
else:
raise ValueError(F"Type not supported for reshape: {type(snake_case__ )}." )
def _lowerCAmelCase( __A , __A=None ):
if is_numpy_array(snake_case__ ):
return np.squeeze(snake_case__ , axis=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.squeeze(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.squeeze(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F"Type not supported for squeeze: {type(snake_case__ )}." )
def _lowerCAmelCase( __A , __A ):
if is_numpy_array(snake_case__ ):
return np.expand_dims(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.unsqueeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.expand_dims(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.expand_dims(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F"Type not supported for expand_dims: {type(snake_case__ )}." )
def _lowerCAmelCase( __A ):
if is_numpy_array(snake_case__ ):
return np.size(snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.numel()
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.size(snake_case__ )
elif is_jax_tensor(snake_case__ ):
return array.size
else:
raise ValueError(F"Type not supported for expand_dims: {type(snake_case__ )}." )
def _lowerCAmelCase( __A , __A ):
for key, value in auto_map.items():
if isinstance(snake_case__ , (tuple, list) ):
UpperCAmelCase = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase = F"{repo_id}--{value}"
return auto_map
def _lowerCAmelCase( __A ):
for base_class in inspect.getmro(snake_case__ ):
UpperCAmelCase = base_class.__module__
UpperCAmelCase = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"Could not infer framework from class {model_class}." )
| 721 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 | 0 |
from __future__ import annotations
import math
def _lowerCAmelCase( __A ):
if num <= 0:
UpperCAmelCase = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [True] * (num + 1)
UpperCAmelCase = []
UpperCAmelCase = 2
UpperCAmelCase = int(math.sqrt(__SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , __SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
UpperCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """cvt"""
def __init__( self : List[Any] , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : List[str]=[7, 3, 3] , lowerCAmelCase__ : int=[4, 2, 2] , lowerCAmelCase__ : str=[2, 1, 1] , lowerCAmelCase__ : Tuple=[6_4, 1_9_2, 3_8_4] , lowerCAmelCase__ : List[str]=[1, 3, 6] , lowerCAmelCase__ : Union[str, Any]=[1, 2, 1_0] , lowerCAmelCase__ : str=[4.0, 4.0, 4.0] , lowerCAmelCase__ : int=[0.0, 0.0, 0.0] , lowerCAmelCase__ : int=[0.0, 0.0, 0.0] , lowerCAmelCase__ : str=[0.0, 0.0, 0.1] , lowerCAmelCase__ : Dict=[True, True, True] , lowerCAmelCase__ : str=[False, False, True] , lowerCAmelCase__ : Tuple=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ : Tuple=[3, 3, 3] , lowerCAmelCase__ : Any=[1, 1, 1] , lowerCAmelCase__ : List[Any]=[2, 2, 2] , lowerCAmelCase__ : str=[1, 1, 1] , lowerCAmelCase__ : int=[1, 1, 1] , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : List[str]=1e-1_2 , **lowerCAmelCase__ : Tuple , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase__ )
UpperCAmelCase = num_channels
UpperCAmelCase = patch_sizes
UpperCAmelCase = patch_stride
UpperCAmelCase = patch_padding
UpperCAmelCase = embed_dim
UpperCAmelCase = num_heads
UpperCAmelCase = depth
UpperCAmelCase = mlp_ratio
UpperCAmelCase = attention_drop_rate
UpperCAmelCase = drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = cls_token
UpperCAmelCase = qkv_projection_method
UpperCAmelCase = kernel_qkv
UpperCAmelCase = padding_kv
UpperCAmelCase = stride_kv
UpperCAmelCase = padding_q
UpperCAmelCase = stride_q
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
| 701 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase_ )} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
UpperCAmelCase = field(default=UpperCamelCase_ , metadata={"""help""": """Whether ot not to use whole word mask."""} )
UpperCAmelCase = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCAmelCase = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
UpperCAmelCase = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
UpperCAmelCase = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase( __A , __A , __A = False , __A = None , ):
def _dataset(__A , __A=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , ref_path=snake_case_ , )
return LineByLineTextDataset(tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase = AutoModelWithLMHead.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
UpperCAmelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase = (
get_dataset(snake_case_ , tokenizer=snake_case_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase = (
get_dataset(snake_case_ , tokenizer=snake_case_ , evaluate=snake_case_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase = DataCollatorForWholeWordMask(
tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=snake_case_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=snake_case_ , args=snake_case_ , data_collator=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , prediction_loss_only=snake_case_ , )
# Training
if training_args.do_train:
UpperCAmelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=snake_case_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = math.exp(eval_output["eval_loss"] )
UpperCAmelCase = {"perplexity": perplexity}
UpperCAmelCase = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(snake_case_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , snake_case_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(snake_case_ )
return results
def _lowerCAmelCase( __A ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 702 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
def __init__( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]="resnet50" , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=True , ) -> List[str]:
UpperCAmelCase = parent
UpperCAmelCase = out_indices if out_indices is not None else [4]
UpperCAmelCase = stage_names
UpperCAmelCase = out_features
UpperCAmelCase = backbone
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_pretrained_backbone
UpperCAmelCase = is_training
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self : Tuple ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Any:
UpperCAmelCase = TimmBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def _UpperCamelCase ( self : List[str] ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
UpperCAmelCase = (TimmBackbone,) if is_torch_available() else ()
UpperCAmelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : List[str] ) -> Any:
UpperCAmelCase = TimmBackboneModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = "resnet18"
UpperCAmelCase = "microsoft/resnet-18"
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ , out_indices=[1, 2, 3] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> str:
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def _UpperCamelCase ( self : List[str] ) -> Any:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase ( self : Optional[int] ) -> int:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _UpperCamelCase ( self : List[str] ) -> List[str]:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase ( self : int ) -> List[Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def _UpperCamelCase ( self : int ) -> Tuple:
pass
@unittest.skip("Safetensors is not supported by timm." )
def _UpperCamelCase ( self : int ) -> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self : int ) -> Tuple:
pass
def _UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase = self.all_model_classes[0]
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = model(**lowerCAmelCase__ )
UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _UpperCamelCase ( self : Optional[int] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase = copy.deepcopy(lowerCAmelCase__ )
UpperCAmelCase = None
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCAmelCase = copy.deepcopy(lowerCAmelCase__ )
UpperCAmelCase = False
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase__ )
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _lowerCAmelCase( __A , __A , __A=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : DDPMScheduler , lowerCAmelCase__ : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> Dict:
if latents is None:
UpperCAmelCase = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(_a )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self : str , lowerCAmelCase__ : Dict=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def _UpperCamelCase ( self : int , lowerCAmelCase__ : List[Any]=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self : Tuple ) -> Tuple:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : str , lowerCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 4.0 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , ) -> int:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(_a , _a ):
UpperCAmelCase = torch.cat(_a , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_a , _a ):
UpperCAmelCase = torch.cat(_a , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(_a , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(_a , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.unet.config.in_channels
UpperCAmelCase = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {"""image_embeds""": image_embeds}
UpperCAmelCase = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(_a , force_not_quantize=_a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 704 |
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCAmelCase( __A = 100 ):
UpperCAmelCase = 1
UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase = pre_numerator
UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase = cur_numerator
UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 0 |
import math
from collections.abc import Callable
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = xa
UpperCAmelCase = xa
while True:
if x_n == x_na or function(snake_case__ ) == function(snake_case__ ):
raise ZeroDivisionError("float division by zero, could not find root" )
UpperCAmelCase = x_na - (
function(snake_case__ ) / ((function(snake_case__ ) - function(snake_case__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCAmelCase = x_na
UpperCAmelCase = x_na
def _lowerCAmelCase( __A ):
return math.pow(snake_case__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 705 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 1 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __magic_name__ ( _snake_case ):
UpperCAmelCase = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
UpperCAmelCase = field(default=_snake_case , metadata={"""help""": """Whether to SortishSamler or not."""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
UpperCAmelCase = field(default=_snake_case , metadata={"""help""": """whether to use adafactor"""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
UpperCAmelCase = field(default=_snake_case , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
UpperCAmelCase = field(
default=_snake_case , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
UpperCAmelCase = field(
default="""linear""" , metadata={"""help""": f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 706 |
import numpy
# List of input, output pairs
lowerCAmelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase__ = [2, 4, 1, 5]
lowerCAmelCase__ = len(train_data)
lowerCAmelCase__ = 0.0_0_9
def _lowerCAmelCase( __A , __A="train" ):
return calculate_hypothesis_value(__A , __A ) - output(
__A , __A )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for i in range(len(__A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase( __A , __A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase( __A , __A=m ):
UpperCAmelCase = 0
for i in range(__A ):
if index == -1:
summation_value += _error(__A )
else:
summation_value += _error(__A ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase( __A ):
UpperCAmelCase = summation_of_cost_derivative(__A , __A ) / m
return cost_derivative_value
def _lowerCAmelCase( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase = 0.000002
UpperCAmelCase = 0
UpperCAmelCase = 0
while True:
j += 1
UpperCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(__A ) ):
UpperCAmelCase = get_cost_derivative(i - 1 )
UpperCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__A , __A , atol=__A , rtol=__A , ):
break
UpperCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCAmelCase( ):
for i in range(len(__A ) ):
print(("Actual output value:", output(__A , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__A , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 1 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = "▁"
lowerCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
lowerCAmelCase__ = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
lowerCAmelCase__ = {"vinai/bartpho-syllable": 1024}
class __magic_name__ ( _UpperCAmelCase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : Optional[int]="</s>" , lowerCAmelCase__ : Optional[int]="</s>" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Optional[int]="<pad>" , lowerCAmelCase__ : List[Any]="<mask>" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(lowercase_ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(lowercase_ ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> Any:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> List[str]:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> Dict:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def _UpperCamelCase ( self : List[Any] ) -> Any:
UpperCAmelCase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[Any]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : List[Any] ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : str ) -> Optional[int]:
return self.fairseq_ids_to_tokens[index]
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : Tuple ) -> List[Any]:
UpperCAmelCase = """""".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def _UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> List[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowercase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowercase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowercase_ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(lowercase_ )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 707 |
def _lowerCAmelCase( __A , __A , __A ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__A , n - 1 , __A ) * a) % mod
else:
UpperCAmelCase = binary_exponentiation(__A , n / 2 , __A )
return (b * b) % mod
# a prime number
lowerCAmelCase__ = 701
lowerCAmelCase__ = 1000000000
lowerCAmelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 1 | 0 |
import math
def _lowerCAmelCase( __A : int ):
UpperCAmelCase = 0
UpperCAmelCase = 0
while num > 0:
UpperCAmelCase = num % 8
UpperCAmelCase = octal + (remainder * math.floor(math.pow(10 , _lowercase ) ))
counter += 1
UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_lowercase )}"
def _lowerCAmelCase( ):
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 708 |
lowerCAmelCase__ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _lowerCAmelCase( __A ):
UpperCAmelCase = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowerCAmelCase( __A ):
if set(__A ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase = ""
for word in coded.split():
while len(__A ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
def _lowerCAmelCase( __A ):
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCAmelCase = grid[0]
for row_n in range(1 , len(UpperCAmelCase__ ) ):
UpperCAmelCase = grid[row_n]
UpperCAmelCase = fill_row(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def _lowerCAmelCase( __A , __A ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase__ = {"UserAgent": UserAgent().random}
def _lowerCAmelCase( __A ):
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __magic_name__ :
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Any:
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self : List[str] ) -> dict:
UpperCAmelCase = requests.get(self.url , headers=lowerCAmelCase__ ).text
UpperCAmelCase = BeautifulSoup(lowerCAmelCase__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def _UpperCamelCase ( self : Any ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self : List[str] ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self : str ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self : int ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self : Tuple ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def _lowerCAmelCase( __A = "github" ):
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 1 | 0 |
lowerCAmelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
lowerCAmelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = from_type.lower().strip("s" )
UpperCAmelCase = to_type.lower().strip("s" )
UpperCAmelCase = UNIT_SYMBOL.get(_snake_case , _snake_case )
UpperCAmelCase = UNIT_SYMBOL.get(_snake_case , _snake_case )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(_snake_case )}"
)
raise ValueError(_snake_case )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(_snake_case )}"
)
raise ValueError(_snake_case )
UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase = 1
if from_exponent > to_exponent:
UpperCAmelCase = from_exponent - to_exponent
else:
UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 , _snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
import unittest
import numpy as np
def _lowerCAmelCase( __A , __A , __A , __A = None , ):
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
UpperCAmelCase = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase = (
"Expected the same number of rows for A and B. "
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase = (
"Expected the same number of columns for B and C. "
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__A )
UpperCAmelCase = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : List[str] ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
UpperCAmelCase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = np.block([[a, b], [b.T, c]] )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
UpperCAmelCase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _UpperCamelCase ( self : str ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self : Dict ) -> None:
UpperCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 1 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCAmelCase__ = parse(importlib.metadata.version("torch"))
def _lowerCAmelCase( __A , __A , __A ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase = parse(importlib.metadata.version(_lowercase ) )
return operation(_lowercase , parse(_lowercase ) )
def _lowerCAmelCase( __A , __A ):
return compare_versions(_lowercase , _lowercase , _lowercase )
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """deit"""
def __init__( self : Any , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : int=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Dict=1e-1_2 , lowerCAmelCase__ : Optional[int]=2_2_4 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[str]=1_6 , **lowerCAmelCase__ : List[str] , ) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = encoder_stride
class __magic_name__ ( _snake_case ):
UpperCAmelCase = version.parse("""1.11""" )
@property
def _UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> float:
return 1e-4
| 712 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 0 |
from collections.abc import Callable
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = a
UpperCAmelCase = b
if function(__A ) == 0: # one of the a or b is a root for the function
return a
elif function(__A ) == 0:
return b
elif (
function(__A ) * function(__A ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
UpperCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__A ) == 0:
return mid
elif function(__A ) * function(__A ) < 0:
UpperCAmelCase = mid
else:
UpperCAmelCase = mid
UpperCAmelCase = start + (end - start) / 2.0
return mid
def _lowerCAmelCase( __A ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 713 |
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1 | 0 |
from collections import defaultdict
from math import gcd
def _lowerCAmelCase( __A = 1500000 ):
UpperCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ):
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1:
continue
UpperCAmelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"{solution() = }")
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
UpperCAmelCase = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
UpperCAmelCase = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
UpperCAmelCase = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
UpperCAmelCase = field(default=2 , metadata={"""help""": """Batch size for training."""} )
UpperCAmelCase = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
UpperCAmelCase = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
UpperCAmelCase = field(
default=1_0_0_0_0 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
UpperCAmelCase = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
UpperCAmelCase = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
UpperCAmelCase = field(
default=7_5_0 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
UpperCAmelCase = field(
default=1_6 , metadata={"""help""": """Number of gradient accumulation steps."""} )
UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
UpperCAmelCase = field(default=5_0_0_0_0 , metadata={"""help""": """Maximum number of training steps."""} )
UpperCAmelCase = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
UpperCAmelCase = field(default=1_0_2_4 , metadata={"""help""": """Sequence lengths used for training."""} )
UpperCAmelCase = field(default=1 , metadata={"""help""": """Training seed."""} )
UpperCAmelCase = field(
default=1_0_2_4 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
UpperCAmelCase = field(default=__snake_case , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
UpperCAmelCase = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
UpperCAmelCase = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
UpperCAmelCase = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
UpperCAmelCase = field(default=1_0_2_4 , metadata={"""help""": """Length of sequences to be evaluated."""} )
UpperCAmelCase = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
UpperCAmelCase = field(default=__snake_case , metadata={"""help""": """Number of workers used for code evaluation."""} )
UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Sample from the language model\'s output distribution."""} )
UpperCAmelCase = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
UpperCAmelCase = field(default=2_5_6 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
UpperCAmelCase = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
UpperCAmelCase = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
UpperCAmelCase = field(default=1_0 , metadata={"""help""": """Number of generations to run in parallel."""} )
UpperCAmelCase = field(
default=2_0_0 , metadata={"""help""": """Number of completions to generate for each sample."""} )
UpperCAmelCase = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
UpperCAmelCase = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
UpperCAmelCase = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
UpperCAmelCase = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default=__snake_case , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
UpperCAmelCase = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
UpperCAmelCase = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
UpperCAmelCase = field(
default=1_0_0_0_0_0 , metadata={"""help""": """Number of files to save per JSON output file."""} )
UpperCAmelCase = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
UpperCAmelCase = field(
default=1_0_0_0 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
UpperCAmelCase = field(
default=1_0_0 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
UpperCAmelCase = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
UpperCAmelCase = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
UpperCAmelCase = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
UpperCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
UpperCAmelCase = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
UpperCAmelCase = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
UpperCAmelCase = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
UpperCAmelCase = field(default=2_0_0_0_0_0 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
UpperCAmelCase = field(
default=3_2_7_6_8 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
UpperCAmelCase = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
UpperCAmelCase = field(default=__snake_case , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
UpperCAmelCase = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
UpperCAmelCase = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
UpperCAmelCase = field(default=__snake_case , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class __magic_name__ :
UpperCAmelCase = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
UpperCAmelCase = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
UpperCAmelCase = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
UpperCAmelCase = field(default=__snake_case , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 715 |
def _lowerCAmelCase( __A ):
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(__A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCAmelCase( __A , __A=False , __A=False ):
UpperCAmelCase = "backbone." if is_semantic else ""
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase = "backbone." if is_semantic else ""
# queries, keys and values
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = q_bias
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCAmelCase = gamma_a
UpperCAmelCase = gamma_a
def _lowerCAmelCase( __A , __A , __A ):
UpperCAmelCase = dct.pop(__A )
UpperCAmelCase = val
def _lowerCAmelCase( ):
UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase( __A , __A , __A=False ):
UpperCAmelCase = False if "rvlcdip" in checkpoint_url else True
UpperCAmelCase = BeitConfig(use_absolute_position_embeddings=__A , use_mask_token=__A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase = 16
UpperCAmelCase = "huggingface/label-files"
UpperCAmelCase = "rvlcdip-id2label.json"
UpperCAmelCase = json.load(open(hf_hub_download(__A , __A , repo_type="dataset" ) , "r" ) )
UpperCAmelCase = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(__A , map_location="cpu" )["model"]
UpperCAmelCase = create_rename_keys(__A , has_lm_head=__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , has_lm_head=__A )
# load HuggingFace model
UpperCAmelCase = BeitForMaskedImageModeling(__A ) if has_lm_head else BeitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# Check outputs on an image
UpperCAmelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__A , return_tensors="pt" )
UpperCAmelCase = encoding["pixel_values"]
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.logits
# verify logits
UpperCAmelCase = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__A ), "Shape of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__A )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__A )
if push_to_hub:
if has_lm_head:
UpperCAmelCase = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
UpperCAmelCase = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__A , )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__A , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
lowerCAmelCase__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 716 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir("fixtures")
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 5_0_0
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class __magic_name__ ( unittest.TestCase ):
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> List[Any]:
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _UpperCamelCase ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Any:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="test-feature-extractor" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 1 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
UpperCAmelCase = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase = VideoClassificationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ , top_k=2 )
UpperCAmelCase = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> List[Any]:
for example in examples:
UpperCAmelCase = video_classifier(lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
] , )
@require_torch
def _UpperCamelCase ( self : Any ) -> List[Any]:
UpperCAmelCase = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCAmelCase = VideoMAEFeatureExtractor(
size={"shortest_edge": 1_0} , crop_size={"height": 1_0, "width": 1_0} )
UpperCAmelCase = pipeline(
"video-classification" , model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , frame_sampling_rate=4 )
UpperCAmelCase = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCAmelCase = video_classifier(lowerCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , )
UpperCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
] , )
@require_tf
def _UpperCamelCase ( self : List[str] ) -> Optional[int]:
pass
| 717 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase__ = "\n{0} = None\n"
lowerCAmelCase__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _lowerCAmelCase( __A ):
UpperCAmelCase = _re_backend.findall(__A )
if len(__A ) == 0:
return None
return "_and_".join(__A )
def _lowerCAmelCase( ):
with open(os.path.join(__A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase = 0
UpperCAmelCase = {}
# Go through the end of the file
while line_index < len(__A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__A ) and len(lines[line_index] ) > 1:
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _re_single_line_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__A ) > 0:
UpperCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def _lowerCAmelCase( __A , __A ):
if name.isupper():
return DUMMY_CONSTANT.format(__A )
elif name.islower():
return DUMMY_FUNCTION.format(__A , __A )
else:
return DUMMY_CLASS.format(__A , __A )
def _lowerCAmelCase( __A=None ):
if backend_specific_objects is None:
UpperCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__A , __A ) for o in objects] )
UpperCAmelCase = dummy_file
return dummy_files
def _lowerCAmelCase( __A=False ):
UpperCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase = os.path.join(__A , "utils" )
UpperCAmelCase = {
backend: os.path.join(__A , F"dummy_{short_names.get(__A , __A )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__A ):
with open(__A , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(__A , __A )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 1 | 0 |