code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transforme... | 510 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Co... | 585 | 0 |
from __future__ import annotations
def snake_case_ ( __lowercase , __lowercase ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__lowercase ):
print(F'''{i}\t\t{d}''' )
def snake_case_ ( __lowercase , _... | 641 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def snake_case_ ( __lowercase , __low... | 641 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_c... | 143 |
'''simple docstring'''
A_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prom... | 143 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : Any = logging.get_logger(__name__)
class lowerCAmelCase ( _UpperCAmelCase ):
def __init__( self , *UpperCamelCase , **UpperC... | 719 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' wh... | 493 | 0 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Ve... | 115 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
fro... | 115 | 1 |
from __future__ import annotations
from math import gcd
def _A (UpperCamelCase : int , UpperCamelCase : int = 2 , UpperCamelCase : int = 1 , UpperCamelCase : int = 3 , ):
'''simple docstring'''
if num < 2:
raise ValueError("""T... | 714 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available()... | 96 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__ ( enum.Enum ):
a : A... | 515 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import B... | 515 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ :str = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
... | 721 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import Ba... | 374 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_doc... | 129 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ ... | 92 | 0 |
"""simple docstring"""
__magic_name__ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4... | 716 |
"""simple docstring"""
from itertools import count
def _A ( __lowercase = 50 ):
"""simple docstring"""
lowerCamelCase__ = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_l... | 258 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import depr... | 592 | from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_Upper... | 547 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import Mo... | 152 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_availabl... | 152 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One... | 330 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib i... | 29 | 0 |
'''simple docstring'''
def _A ( __snake_case :Union[str, Any] , __snake_case :Any , __snake_case :List[str] , __snake_case :Optional[Any] ) -> str:
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __... | 704 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Optional[Any] = {'vocab_file': 'vocab.json'}
_snake_ca... | 214 | 0 |
import os
import sys
import unittest
UpperCamelCase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_te... | 105 | '''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = False ):
if not arr:
return 0
lowercase__ : Tuple = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__ : int = 0.0
for num i... | 152 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" ... | 546 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Any=5 ) -> Optional[Any]:
"""s... | 546 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
fro... | 262 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__( self : Dict , __a : List[str] ... | 262 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
tr... | 371 |
import sys
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = [[0 for x in range(UpperCamelCase_ )] for x in range(UpperCamelCase_ )]
UpperCamelCase_ = [[0 for x in range(Upper... | 371 | 1 |
'''simple docstring'''
__magic_name__ = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
... | 665 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_co... | 663 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( lowercase , lowercase ) -> Dict:
... | 711 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"vocab_file": ... | 3 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate... | 274 | '''simple docstring'''
from __future__ import annotations
def snake_case_ ( __snake_case : list[int | str]) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case))])
def snake_case_ ( __snake_case : list[int | str] , ... | 274 | 1 |
'''simple docstring'''
def __a ( A__ = 100_0000 ) -> int:
lowerCAmelCase = limit + 1
lowerCAmelCase = [0] * limit
for first_term in range(1 , A__ ):
for n in range(A__ , A__ , A__ ):
lowerCAmelCase = first_term + ... | 159 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class ... | 159 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__ ( __UpperCamelCase )-> Dict:... | 301 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE... | 301 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Optional[Any] = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Optional[Any] = ... | 710 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
snake_case_ : Any = logging.get_logger(__na... | 166 | 0 |
def a ( a ) ->Union[str, Any]:
'''simple docstring'''
if n_term == "":
return []
SCREAMING_SNAKE_CASE = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
__lo... | 201 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF... | 521 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
_UpperCamelCase = list[list[float | int]]
def _lowerCAmelCase( UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ) -> Matrix:
lowerCAmelCase__ = len(... | 211 |
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> int:
assert column_title.isupper()
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCAmelCase_ ) - 1
lowerCAmelCase__ = 0
while index >= 0:
... | 211 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
... | 346 |
'''simple docstring'''
from collections import defaultdict
def _lowerCAmelCase ( __magic_name__ : int ) -> int:
lowercase : Optional[Any] =1
lowercase : Union[str, Any] =True
for v in tree[start]:
if v not in visited:
... | 92 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta... | 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
p... | 681 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelT... | 577 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
... | 577 | 1 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'{price_plus_tax(100, 0.25) = }')
print(f'{price_plus_tax(1_25.50, 0.05) = }')
| 357 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotA... | 357 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowercase ( a__ : Callable[[int | float], int | float] , a__ : int | float , a__ : int | float , a__ : int = 100 , ) -> float:
_Upp... | 420 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_mbart""": [... | 420 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Any ... | 710 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Dic... | 176 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord impor... | 142 | def lowerCamelCase ( UpperCamelCase : int ) -> bool:
_lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase ( UpperCamelCase : int = 50_00 ) -> int:
_lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 ,... | 544 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe... | 718 |
def __lowerCAmelCase ( __lowerCamelCase : list ) -> list:
__lowerCAmelCase =False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase =True
for i in range(0 , len(__lowerCamelCase ) - 1 , 2 ): # iterating over ... | 456 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTe... | 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A_ : str = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': [... | 265 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
... | 703 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
__UpperCAmelCase : Union[str, Any] = _modexpt(UpperCamelCase ... | 487 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from .... | 351 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ (__UpperCAmelCase: ArgumentParser ) -> Tuple:
... | 351 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ :str = namedtuple('covid_data', 'cases deaths recovered')
def a ( A__ = "https://www.worldometers.info/coronavirus/" ) -> Tuple:
'''simple docstring'''
SCREAMING_S... | 705 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from ... | 250 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Option... | 67 |
"""simple docstring"""
def _lowerCAmelCase ( ) -> int:
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9 )
for b in range(lowerCamelCase__, 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solu... | 572 | 0 |
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
if not sentence:
return ""
A__ = dict(zip(_lowerCamelCase , _lowerCamelCase ) )
return lower_to_upper.get(s... | 716 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon... | 177 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeMo... | 108 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )... | 34 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from... | 639 |
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # r... | 639 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin... | 575 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__SCREAMING_SNAKE_CASE =False
class UpperCamel... | 425 | 0 |
def __magic_name__ ( lowercase_ ) -> bool:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase_ ... | 701 |
def __magic_name__ ( lowercase_ ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():... | 5 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
_... | 138 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( lowercase_ , unittest.TestCase ):
"""simple d... | 713 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
... | 592 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional impo... | 84 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
f... | 377 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_clip": [
... | 703 |
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Dict = total # total no of tasks (N)
# DP table will have a ... | 140 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Dict = '▁'
__UpperCAmelCase : Optional[Any] = {'vocab_file': ... | 471 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h... | 177 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
... | 703 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ... | 114 | 0 |
from sklearn.metrics import recall_score
import datasets
snake_case = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the fals... | 67 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_A : float
_A : TreeNode | None = None
_A : TreeNode | None = None
def SCREA... | 480 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Union[str, Any] = ... | 701 | '''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase : Dict = logging.get_logger(_... | 610 | 0 |
_lowercase: Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _lowerCamelCase ( snake_case ):
# Make sure the supplied data is a bytes-like object
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'a b... | 192 | def _lowerCamelCase ( snake_case = 10 ):
if not isinstance(snake_case , snake_case ) or n < 0:
raise ValueError('Invalid input' )
_lowerCAmelCase = 10**n
_lowerCAmelCase = 28_433 * (pow(2 , 7_830_457 , snake_case )) + 1
return str(numbe... | 192 | 1 |
"""simple docstring"""
a : int = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : str= {"*": op.mul, "/": op.truediv, "+": op.a... | 707 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_t... | 109 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modelin... | 109 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : Any = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
... | 702 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
stooge(_UpperCAmelCase, 0, len(_UpperCAmelCase ) - 1 )
return arr
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if i >= h:
return
# If first element is smaller than the ... | 329 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
... | 472 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_commo... | 639 | 0 |
"""simple docstring"""
import numpy as np
class __A :
def __init__( self ):
_lowerCAmelCase : List[Any] = (0, 0)
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[Any] = 0
_lowerC... | 663 | """simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(... | 663 | 1 |
import unittest
from knapsack import knapsack as k
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
snake_case_ = 0
snake_case_ = [0]
snake_case_ = ... | 198 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( ... | 450 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_com... | 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig'... | 145 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase_ : Union[str, Any] = TypeVar('''T''')
lowercase_ : Tuple = Union[List[T], Tuple[T, ...]]
lowercase_ : Any = Union[T, List[T], Dict[str, T]]
lowe... | 572 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import ta... | 572 | 1 |
import requests
from bsa import BeautifulSoup
def a ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
UpperCamelCase : Dict = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCamelCase : Any = ... | 643 |
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCamelCase : List[str] = len(bin(SCREAMING_SNAKE_CASE_ )[3:] )
... | 643 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: str = AutoConfig.from_pretrained(UpperCamelCase__ )
_lowercase: Optio... | 226 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import log... | 616 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case = """src/transformers"""
# Matches is_xxx_available()
snake_case = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
snake_case = ... | 535 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterM... | 535 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase ( lowercase ):
... | 534 | import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
... | 534 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Any = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyN... | 75 |
# Imports
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any:
self.set_matricies(red=_SCRE... | 75 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip... | 477 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_r... | 477 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=7 ):
SCREAMING_SNAKE_CASE = None
if token is not None:
... | 647 | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by a... | 647 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils impor... | 31 |
def __a ( SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
if n_element < 1:
__UpperCAmelCase = ValueError('''a should be a positive number''' )
raise my_error
__... | 303 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/... | 532 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
Robe... | 532 | 1 |
from __future__ import annotations
__UpperCamelCase : Optional[Any] = list[list[int]]
# assigning initial values to the grid
__UpperCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
... | 80 |
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
A : Any = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _lowerCamelCase ( lowerCamelCase_: int = 100 ):
... | 256 | 0 |
lowerCAmelCase_ = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder... | 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name i... | 669 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies ... | 75 |
def UpperCAmelCase_ ( _UpperCAmelCase :list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) < 2:
return collection
def circle_sort_util(_UpperCAmelCase :list , _UpperCAmelCase :int , _UpperCAmelCase :int ) -> bool:
... | 188 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[Any] = get_tests_dir("fixtures/s... | 604 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling... | 604 | 1 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simpl... | 196 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_S... | 196 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : List[str] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONF... | 706 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Any = logging.get_logger(__name__)
__a : Dict = {
"""hustvl/... | 522 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCamelCase_ : List[str] = '''src/transf... | 185 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase_ : Tuple = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understa... | 185 | 1 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common i... | 708 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( lowerCAmel... | 305 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floa... | 689 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_... | 689 | 1 |
'''simple docstring'''
snake_case_ = {str(digit): digit**5 for digit in range(10)}
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_SCREAMING_SNAKE_CASE ) )
def __lowercase ():
return sum(
nu... | 355 |
'''simple docstring'''
from __future__ import annotations
def __lowercase (_SCREAMING_SNAKE_CASE :int | str ):
SCREAMING_SNAKE_CASE : int = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00_00_0... | 355 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = (boundary[1] - boundary[0]) / steps
lowerCamelCase_ = boundary[0]
lowerCamelCase_ = ... | 70 |
from collections import Counter
from timeit import timeit
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _SCREAMING_S... | 70 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
__a : Union[str, Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
for ... | 522 | import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : int = (EulerDiscreteScheduler,)
__a : Any ... | 522 | 1 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase__ :
"""simple docstring"""
... | 688 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = (KDP... | 688 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase ( _lowerCAmelCase ):
'''simpl... | 713 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: List[Any] , snake_case: List[Any]=None ... | 310 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_... | 259 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
while repunit:
_lowerCA... | 259 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependenc... | 702 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.... | 297 | 0 |
A : Tuple = [0, 2, 4, 6, 8]
A : Dict = [1, 3, 5, 7, 9]
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : int ) -> int:
"""simple docstring""... | 15 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"vocab_file": "vocab.json",
... | 419 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( A__ : Union[str, Any] ) ... | 171 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
re... | 171 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.