| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ Wav2Vec2 model configuration""" |
| |
|
| | import functools |
| | import operator |
| |
|
| | from transformers.configuration_utils import PretrainedConfig |
| | from transformers.utils import logging |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
| | "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", |
| | |
| | } |
| |
|
| |
|
| | class Wav2Vec2Config(PretrainedConfig): |
| | r""" |
| | This is the configuration class to store the configuration of a [`Wav2Vec2Model`]. It is used to instantiate an |
| | Wav2Vec2 model according to the specified arguments, defining the model architecture. Instantiating a configuration |
| | with the defaults will yield a similar configuration to that of the Wav2Vec2 |
| | [facebook/wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base-960h) architecture. |
| | |
| | Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| | documentation from [`PretrainedConfig`] for more information. |
| | |
| | |
| | Args: |
| | vocab_size (`int`, *optional*, defaults to 32): |
| | Vocabulary size of the Wav2Vec2 model. Defines the number of different tokens that can be represented by |
| | the `inputs_ids` passed when calling [`Wav2Vec2Model`] or [`TFWav2Vec2Model`]. Vocabulary size of the |
| | model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward |
| | method of [`Wav2Vec2Model`]. |
| | hidden_size (`int`, *optional*, defaults to 768): |
| | Dimensionality of the encoder layers and the pooler layer. |
| | num_hidden_layers (`int`, *optional*, defaults to 12): |
| | Number of hidden layers in the Transformer encoder. |
| | num_attention_heads (`int`, *optional*, defaults to 12): |
| | Number of attention heads for each attention layer in the Transformer encoder. |
| | intermediate_size (`int`, *optional*, defaults to 3072): |
| | Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
| | hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): |
| | The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| | `"relu"`, `"selu"` and `"gelu_new"` are supported. |
| | hidden_dropout (`float`, *optional*, defaults to 0.1): |
| | The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
| | attention_dropout (`float`, *optional*, defaults to 0.1): |
| | The dropout ratio for the attention probabilities. |
| | final_dropout (`float`, *optional*, defaults to 0.1): |
| | The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`]. |
| | initializer_range (`float`, *optional*, defaults to 0.02): |
| | The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
| | layer_norm_eps (`float`, *optional*, defaults to 1e-12): |
| | The epsilon used by the layer normalization layers. |
| | feat_extract_norm (`str`, *optional*, defaults to `"group"`): |
| | The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group |
| | normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D |
| | convolutional layers. |
| | feat_proj_dropout (`float`, *optional*, defaults to 0.0): |
| | The dropout probability for output of the feature encoder. |
| | feat_extract_activation (`str, `optional`, defaults to `"gelu"`): |
| | The non-linear activation function (function or string) in the 1D convolutional layers of the feature |
| | extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. |
| | feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): |
| | The dropout probabilitiy for quantized feature encoder states. |
| | conv_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`): |
| | A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the |
| | feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. |
| | conv_stride (`Tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`): |
| | A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length |
| | of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. |
| | conv_kernel (`Tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`): |
| | A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The |
| | length of *conv_kernel* defines the number of convolutional layers and has to match the length of |
| | *conv_dim*. |
| | conv_bias (`bool`, *optional*, defaults to `False`): |
| | Whether the 1D convolutional layers have a bias. |
| | num_conv_pos_embeddings (`int`, *optional*, defaults to 128): |
| | Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional |
| | embeddings layer. |
| | num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): |
| | Number of groups of 1D convolutional positional embeddings layer. |
| | do_stable_layer_norm (`bool`, *optional*, defaults to `False`): |
| | Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is |
| | True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is |
| | False` corresponds to applying layer norm after the attention layer. |
| | apply_spec_augment (`bool`, *optional*, defaults to `True`): |
| | Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see |
| | [SpecAugment: A Simple Data Augmentation Method for Automatic Speech |
| | Recognition](https://arxiv.org/abs/1904.08779). |
| | mask_time_prob (`float`, *optional*, defaults to 0.05): |
| | Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking |
| | procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If |
| | reasoning from the propability of each feature vector to be chosen as the start of the vector span to be |
| | masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the |
| | actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. |
| | mask_time_length (`int`, *optional*, defaults to 10): |
| | Length of vector span along the time axis. |
| | mask_time_min_masks (`int`, *optional*, defaults to 2),: |
| | The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, |
| | irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < |
| | mask_time_min_masks'' |
| | mask_feature_prob (`float`, *optional*, defaults to 0.0): |
| | Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The |
| | masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over |
| | the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector |
| | span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap |
| | may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is |
| | True`. |
| | mask_feature_length (`int`, *optional*, defaults to 10): |
| | Length of vector span along the feature axis. |
| | mask_feature_min_masks (`int`, *optional*, defaults to 0),: |
| | The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time |
| | step, irrespectively of `mask_feature_prob`. Only relevant if |
| | ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' |
| | num_codevectors_per_group (`int`, *optional*, defaults to 320): |
| | Number of entries in each quantization codebook (group). |
| | num_codevector_groups (`int`, *optional*, defaults to 2): |
| | Number of codevector groups for product codevector quantization. |
| | contrastive_logits_temperature (`float`, *optional*, defaults to 0.1): |
| | The temperature *kappa* in the contrastive loss. |
| | feat_quantizer_dropout (`float`, *optional*, defaults to 0.0): |
| | The dropout probabilitiy for the output of the feature encoder that's used by the quantizer. |
| | num_negatives (`int`, *optional*, defaults to 100): |
| | Number of negative samples for the contrastive loss. |
| | codevector_dim (`int`, *optional*, defaults to 256): |
| | Dimensionality of the quantized feature vectors. |
| | proj_codevector_dim (`int`, *optional*, defaults to 256): |
| | Dimensionality of the final projection of both the quantized and the transformer features. |
| | diversity_loss_weight (`int`, *optional*, defaults to 0.1): |
| | The weight of the codebook diversity loss component. |
| | ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): |
| | Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an |
| | instance of [`Wav2Vec2ForCTC`]. |
| | ctc_zero_infinity (`bool`, *optional*, defaults to `False`): |
| | Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly |
| | occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance |
| | of [`Wav2Vec2ForCTC`]. |
| | use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): |
| | Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an |
| | instance of [`Wav2Vec2ForSequenceClassification`]. |
| | classifier_proj_size (`int`, *optional*, defaults to 256): |
| | Dimensionality of the projection before token mean-pooling for classification. |
| | tdnn_dim (`Tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): |
| | A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* |
| | module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. |
| | tdnn_kernel (`Tuple[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): |
| | A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the |
| | *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. |
| | tdnn_dilation (`Tuple[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): |
| | A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the |
| | *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. |
| | xvector_output_dim (`int`, *optional*, defaults to 512): |
| | Dimensionality of the *XVector* embedding vectors. |
| | add_adapter (`bool`, *optional*, defaults to `False`): |
| | Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for |
| | warm-starting Wav2Vec2 for SpeechEncoderDecoder models. |
| | adapter_kernel_size (`int`, *optional*, defaults to 3): |
| | Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. |
| | adapter_stride (`int`, *optional*, defaults to 2): |
| | Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. |
| | num_adapter_layers (`int`, *optional*, defaults to 3): |
| | Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is |
| | True`. |
| | output_hidden_size (`int`, *optional*): |
| | Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant |
| | if `add_adapter is True`. |
| | use_scan (`bool`, *optional*, defaults to `False`): |
| | Whether or not to use nn.scan in the Flax Wav2Vec2 transformer layers. |
| | |
| | Example: |
| | |
| | ```python |
| | >>> from transformers import Wav2Vec2Model, Wav2Vec2Config |
| | |
| | >>> # Initializing a Wav2Vec2 facebook/wav2vec2-base-960h style configuration |
| | >>> configuration = Wav2Vec2Config() |
| | |
| | >>> # Initializing a model from the facebook/wav2vec2-base-960h style configuration |
| | >>> model = Wav2Vec2Model(configuration) |
| | |
| | >>> # Accessing the model configuration |
| | >>> configuration = model.config |
| | ```""" |
| | model_type = "wav2vec2" |
| |
|
| | def __init__( |
| | self, |
| | vocab_size=32, |
| | hidden_size=768, |
| | num_hidden_layers=12, |
| | num_attention_heads=12, |
| | intermediate_size=3072, |
| | hidden_act="gelu", |
| | hidden_dropout=0.1, |
| | activation_dropout=0.1, |
| | attention_dropout=0.1, |
| | feat_proj_dropout=0.0, |
| | feat_quantizer_dropout=0.0, |
| | final_dropout=0.1, |
| | layerdrop=0.1, |
| | initializer_range=0.02, |
| | layer_norm_eps=1e-5, |
| | feat_extract_norm="group", |
| | feat_extract_activation="gelu", |
| | conv_dim=(512, 512, 512, 512, 512, 512, 512), |
| | conv_stride=(5, 2, 2, 2, 2, 2, 2), |
| | conv_kernel=(10, 3, 3, 3, 3, 2, 2), |
| | conv_bias=False, |
| | num_conv_pos_embeddings=128, |
| | num_conv_pos_embedding_groups=16, |
| | do_stable_layer_norm=False, |
| | apply_spec_augment=True, |
| | mask_time_prob=0.05, |
| | mask_time_length=10, |
| | mask_time_min_masks=2, |
| | mask_feature_prob=0.0, |
| | mask_feature_length=10, |
| | mask_feature_min_masks=0, |
| | num_codevectors_per_group=320, |
| | num_codevector_groups=2, |
| | contrastive_logits_temperature=0.1, |
| | num_negatives=100, |
| | codevector_dim=256, |
| | proj_codevector_dim=256, |
| | diversity_loss_weight=0.1, |
| | ctc_loss_reduction="sum", |
| | ctc_zero_infinity=False, |
| | use_weighted_layer_sum=False, |
| | classifier_proj_size=256, |
| | tdnn_dim=(512, 512, 512, 512, 1500), |
| | tdnn_kernel=(5, 3, 3, 1, 1), |
| | tdnn_dilation=(1, 2, 3, 1, 1), |
| | xvector_output_dim=512, |
| | pad_token_id=0, |
| | bos_token_id=1, |
| | eos_token_id=2, |
| | add_adapter=False, |
| | adapter_kernel_size=3, |
| | adapter_stride=2, |
| | num_adapter_layers=3, |
| | output_hidden_size=None, |
| | use_scan=False, |
| | fuse_matmuls=False, |
| | **kwargs |
| | ): |
| | super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) |
| | self.hidden_size = hidden_size |
| | self.feat_extract_norm = feat_extract_norm |
| | self.feat_extract_activation = feat_extract_activation |
| | self.conv_dim = list(conv_dim) |
| | self.conv_stride = list(conv_stride) |
| | self.conv_kernel = list(conv_kernel) |
| | self.conv_bias = conv_bias |
| | self.num_conv_pos_embeddings = num_conv_pos_embeddings |
| | self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups |
| | self.num_feat_extract_layers = len(self.conv_dim) |
| | self.num_hidden_layers = num_hidden_layers |
| | self.intermediate_size = intermediate_size |
| | self.hidden_act = hidden_act |
| | self.num_attention_heads = num_attention_heads |
| | self.hidden_dropout = hidden_dropout |
| | self.attention_dropout = attention_dropout |
| | self.activation_dropout = activation_dropout |
| | self.feat_proj_dropout = feat_proj_dropout |
| | self.final_dropout = final_dropout |
| | self.layerdrop = layerdrop |
| | self.layer_norm_eps = layer_norm_eps |
| | self.initializer_range = initializer_range |
| | self.vocab_size = vocab_size |
| | self.do_stable_layer_norm = do_stable_layer_norm |
| | self.use_weighted_layer_sum = use_weighted_layer_sum |
| | self.use_scan = use_scan |
| | self.fuse_matmuls = fuse_matmuls |
| |
|
| | if ( |
| | (len(self.conv_stride) != self.num_feat_extract_layers) |
| | or (len(self.conv_kernel) != self.num_feat_extract_layers) |
| | or (len(self.conv_dim) != self.num_feat_extract_layers) |
| | ): |
| | raise ValueError( |
| | "Configuration for convolutional layers is incorrect. " |
| | "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, " |
| | f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) " |
| | f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." |
| | ) |
| |
|
| | |
| | self.apply_spec_augment = apply_spec_augment |
| | self.mask_time_prob = mask_time_prob |
| | self.mask_time_length = mask_time_length |
| | self.mask_time_min_masks = mask_time_min_masks |
| | self.mask_feature_prob = mask_feature_prob |
| | self.mask_feature_length = mask_feature_length |
| | self.mask_feature_min_masks = mask_feature_min_masks |
| |
|
| | |
| | self.num_codevectors_per_group = num_codevectors_per_group |
| | self.num_codevector_groups = num_codevector_groups |
| | self.contrastive_logits_temperature = contrastive_logits_temperature |
| | self.feat_quantizer_dropout = feat_quantizer_dropout |
| | self.num_negatives = num_negatives |
| | self.codevector_dim = codevector_dim |
| | self.proj_codevector_dim = proj_codevector_dim |
| | self.diversity_loss_weight = diversity_loss_weight |
| |
|
| | |
| | self.ctc_loss_reduction = ctc_loss_reduction |
| | self.ctc_zero_infinity = ctc_zero_infinity |
| |
|
| | |
| | self.add_adapter = add_adapter |
| | self.adapter_kernel_size = adapter_kernel_size |
| | self.adapter_stride = adapter_stride |
| | self.num_adapter_layers = num_adapter_layers |
| | self.output_hidden_size = output_hidden_size or hidden_size |
| |
|
| | |
| | self.classifier_proj_size = classifier_proj_size |
| |
|
| | |
| | self.tdnn_dim = list(tdnn_dim) |
| | self.tdnn_kernel = list(tdnn_kernel) |
| | self.tdnn_dilation = list(tdnn_dilation) |
| | self.xvector_output_dim = xvector_output_dim |
| |
|
| | @property |
| | def inputs_to_logits_ratio(self): |
| | return functools.reduce(operator.mul, self.conv_stride, 1) |
| |
|