method_name
stringlengths
3
45
method_body
stringlengths
9
6.25k
full_code
stringlengths
35
7.02k
docstring
stringlengths
18
4.7k
load_weights
stacked_params_mapping = [('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if name == 'lm_head.weight': is_baichuan2 = self.config.vocab_size == 125696 if is_baichuan2: loaded_weight = torch.nn.functional.normalize(loaded_weight) for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('gate_up_proj', 'gate_proj', 0), ( 'gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if name == 'lm_head.weight': is_baichuan2 = self.config.vocab_size == 125696 if is_baichuan2: loaded_weight = torch.nn.functional.normalize(loaded_weight) for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
get_min_capability
return 70
def get_min_capability(self) ->int: return 70
null
in_wsl
return 'microsoft' in ' '.join(uname()).lower()
def in_wsl() ->bool: return 'microsoft' in ' '.join(uname()).lower()
null
convert_pyslice_to_tensor
"""convert PySafeSlice object from safetensors to torch.Tensor PySafeSlice object supports indexing, which is done before loading the actual tensor and can reduce the amount of memory being read into the memory. However, it does not support more advanced functionalities like `.view()` or `.t()`. Therefore, if we need to modify the loaded tensor with these more complicated operators, we need to convert to tensor first. """ if not isinstance(x, torch.Tensor): x = x[:] return x
def convert_pyslice_to_tensor(x: Any) ->torch.Tensor: """convert PySafeSlice object from safetensors to torch.Tensor PySafeSlice object supports indexing, which is done before loading the actual tensor and can reduce the amount of memory being read into the memory. However, it does not support more advanced functionalities like `.view()` or `.t()`. Therefore, if we need to modify the loaded tensor with these more complicated operators, we need to convert to tensor first. """ if not isinstance(x, torch.Tensor): x = x[:] return x
convert PySafeSlice object from safetensors to torch.Tensor PySafeSlice object supports indexing, which is done before loading the actual tensor and can reduce the amount of memory being read into the memory. However, it does not support more advanced functionalities like `.view()` or `.t()`. Therefore, if we need to modify the loaded tensor with these more complicated operators, we need to convert to tensor first.
_forward
"""PyTorch-native implementation equivalent to forward().""" d = x.shape[-1] // 2 return F.silu(x[..., :d]) * x[..., d:]
def _forward(self, x: torch.Tensor) ->torch.Tensor: """PyTorch-native implementation equivalent to forward().""" d = x.shape[-1] // 2 return F.silu(x[..., :d]) * x[..., d:]
PyTorch-native implementation equivalent to forward().
__init__
self.parent_seq_id = parent_seq_id self.output_token = output_token self.logprobs = logprobs
def __init__(self, parent_seq_id: int, output_token: int, logprobs: Dict[ int, float]) ->None: self.parent_seq_id = parent_seq_id self.output_token = output_token self.logprobs = logprobs
null
_convert_id_to_token
"""Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token
def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token
Converts an index (integer) in a token (str) using the vocab.
__init__
"""The MPT configuration class. Args: d_model (int): The size of the embedding dimension of the model. n_heads (int): The number of attention heads. n_layers (int): The number of layers in the model. expansion_ratio (int): The ratio of the up/down scale in the ffn. max_seq_len (int): The maximum sequence length of the model. vocab_size (int): The size of the vocabulary. resid_pdrop (float): The dropout probability applied to the attention output before combining with residual. emb_pdrop (float): The dropout probability for the embedding layer. learned_pos_emb (bool): Whether to use learned positional embeddings attn_config (Dict): A dictionary used to configure the model's attention module: attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention, grouped_query_attention attn_pdrop (float): The dropout probability for the attention layers. attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'. qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer. clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to this value. softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None, use the default scale of ``1/sqrt(d_keys)``. prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another bi-directionally. Tokens outside the prefix use causal attention. attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id. When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates which sub-sequence each token belongs to. Defaults to ``False`` meaning any provided `sequence_id` will be ignored. alibi (bool): Whether to use the alibi bias instead of position embeddings. alibi_bias_max (int): The maximum value of the alibi bias. kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads. ffn_config (Dict): A dictionary used to configure the model's ffn module: ffn_type (str): type of ffn to use. Options: mptmlp, te_ln_mlp init_device (str): The device to use for parameter initialization. logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value. no_bias (bool): Whether to use bias in all layers. verbose (int): The verbosity level. 0 is silent. embedding_fraction (float): The fraction to scale the gradients of the embedding layer by. norm_type (str): choose type of norm to use use_cache (bool): Whether or not the model should return the last key/values attentions init_config (Dict): A dictionary used to configure the model initialization: init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_', 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch. init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True. emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer. emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``. init_std (float): The standard deviation of the normal distribution used to initialize the model, if using the baseline_ parameter initialization scheme. init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes. fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes. init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes. --- See llmfoundry.models.utils.param_init_fns.py for info on other param init config options fc_type (str): choose fc layer implementation. Options: torch and te. te layers support fp8 when using H100 GPUs. """ self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.expansion_ratio = expansion_ratio self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.learned_pos_emb = learned_pos_emb self.attn_config = attn_config self.ffn_config = ffn_config self.init_device = init_device self.logit_scale = logit_scale self.no_bias = no_bias self.embedding_fraction = embedding_fraction self.norm_type = norm_type self.use_cache = use_cache self.init_config = init_config self.fc_type = fc_type if verbose is not None: warnings.warn(DeprecationWarning( 'verbose argument for MPTConfig is now ignored and will be removed. Use python_log_level instead.' ), stacklevel=2) if 'name' in kwargs: del kwargs['name'] if 'loss_fn' in kwargs: del kwargs['loss_fn'] if self.attn_config.get('alibi', False): self.learned_pos_emb = False warnings.warn( f'alibi is turned on, setting `learned_pos_emb` to {self.learned_pos_emb}`' , stacklevel=2) super().__init__(**kwargs) self._validate_config()
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool= True, attn_config: Dict=attn_config_defaults, ffn_config: Dict= ffn_config_defaults, init_device: str='cpu', logit_scale: Optional[ Union[float, str]]=None, no_bias: bool=False, embedding_fraction: float =1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, fc_type: str='torch', verbose: Optional[int]=None, **kwargs: Any): """The MPT configuration class. Args: d_model (int): The size of the embedding dimension of the model. n_heads (int): The number of attention heads. n_layers (int): The number of layers in the model. expansion_ratio (int): The ratio of the up/down scale in the ffn. max_seq_len (int): The maximum sequence length of the model. vocab_size (int): The size of the vocabulary. resid_pdrop (float): The dropout probability applied to the attention output before combining with residual. emb_pdrop (float): The dropout probability for the embedding layer. learned_pos_emb (bool): Whether to use learned positional embeddings attn_config (Dict): A dictionary used to configure the model's attention module: attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention, grouped_query_attention attn_pdrop (float): The dropout probability for the attention layers. attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'. qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer. clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to this value. softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None, use the default scale of ``1/sqrt(d_keys)``. prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another bi-directionally. Tokens outside the prefix use causal attention. attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id. When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates which sub-sequence each token belongs to. Defaults to ``False`` meaning any provided `sequence_id` will be ignored. alibi (bool): Whether to use the alibi bias instead of position embeddings. alibi_bias_max (int): The maximum value of the alibi bias. kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads. ffn_config (Dict): A dictionary used to configure the model's ffn module: ffn_type (str): type of ffn to use. Options: mptmlp, te_ln_mlp init_device (str): The device to use for parameter initialization. logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value. no_bias (bool): Whether to use bias in all layers. verbose (int): The verbosity level. 0 is silent. embedding_fraction (float): The fraction to scale the gradients of the embedding layer by. norm_type (str): choose type of norm to use use_cache (bool): Whether or not the model should return the last key/values attentions init_config (Dict): A dictionary used to configure the model initialization: init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_', 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch. init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True. emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer. emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``. init_std (float): The standard deviation of the normal distribution used to initialize the model, if using the baseline_ parameter initialization scheme. init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes. fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes. init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes. --- See llmfoundry.models.utils.param_init_fns.py for info on other param init config options fc_type (str): choose fc layer implementation. Options: torch and te. te layers support fp8 when using H100 GPUs. """ self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.expansion_ratio = expansion_ratio self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.learned_pos_emb = learned_pos_emb self.attn_config = attn_config self.ffn_config = ffn_config self.init_device = init_device self.logit_scale = logit_scale self.no_bias = no_bias self.embedding_fraction = embedding_fraction self.norm_type = norm_type self.use_cache = use_cache self.init_config = init_config self.fc_type = fc_type if verbose is not None: warnings.warn(DeprecationWarning( 'verbose argument for MPTConfig is now ignored and will be removed. Use python_log_level instead.' ), stacklevel=2) if 'name' in kwargs: del kwargs['name'] if 'loss_fn' in kwargs: del kwargs['loss_fn'] if self.attn_config.get('alibi', False): self.learned_pos_emb = False warnings.warn( f'alibi is turned on, setting `learned_pos_emb` to {self.learned_pos_emb}`' , stacklevel=2) super().__init__(**kwargs) self._validate_config()
The MPT configuration class. Args: d_model (int): The size of the embedding dimension of the model. n_heads (int): The number of attention heads. n_layers (int): The number of layers in the model. expansion_ratio (int): The ratio of the up/down scale in the ffn. max_seq_len (int): The maximum sequence length of the model. vocab_size (int): The size of the vocabulary. resid_pdrop (float): The dropout probability applied to the attention output before combining with residual. emb_pdrop (float): The dropout probability for the embedding layer. learned_pos_emb (bool): Whether to use learned positional embeddings attn_config (Dict): A dictionary used to configure the model's attention module: attn_type (str): type of attention to use. Options: multihead_attention, multiquery_attention, grouped_query_attention attn_pdrop (float): The dropout probability for the attention layers. attn_impl (str): The attention implementation to use. One of 'torch', 'flash', or 'triton'. qk_ln (bool): Whether to apply layer normalization to the queries and keys in the attention layer. clip_qkv (Optional[float]): If not None, clip the queries, keys, and values in the attention layer to this value. softmax_scale (Optional[float]): If not None, scale the softmax in the attention layer by this value. If None, use the default scale of ``1/sqrt(d_keys)``. prefix_lm (Optional[bool]): Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another bi-directionally. Tokens outside the prefix use causal attention. attn_uses_sequence_id (Optional[bool]): Whether to restrict attention to tokens that have the same sequence_id. When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates which sub-sequence each token belongs to. Defaults to ``False`` meaning any provided `sequence_id` will be ignored. alibi (bool): Whether to use the alibi bias instead of position embeddings. alibi_bias_max (int): The maximum value of the alibi bias. kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads. ffn_config (Dict): A dictionary used to configure the model's ffn module: ffn_type (str): type of ffn to use. Options: mptmlp, te_ln_mlp init_device (str): The device to use for parameter initialization. logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value. no_bias (bool): Whether to use bias in all layers. verbose (int): The verbosity level. 0 is silent. embedding_fraction (float): The fraction to scale the gradients of the embedding layer by. norm_type (str): choose type of norm to use use_cache (bool): Whether or not the model should return the last key/values attentions init_config (Dict): A dictionary used to configure the model initialization: init_config.name: The parameter initialization scheme to use. Options: 'default_', 'baseline_', 'kaiming_uniform_', 'kaiming_normal_', 'neox_init_', 'small_init_', 'xavier_uniform_', or 'xavier_normal_'. These mimic the parameter initialization methods in PyTorch. init_div_is_residual (Union[int, float, str, bool]): Value to divide initial weights by if ``module._is_residual`` is True. emb_init_std (Optional[float]): The standard deviation of the normal distribution used to initialize the embedding layer. emb_init_uniform_lim (Optional[Union[Tuple[float, float], float]]): The lower and upper limits of the uniform distribution used to initialize the embedding layer. Mutually exclusive with ``emb_init_std``. init_std (float): The standard deviation of the normal distribution used to initialize the model, if using the baseline_ parameter initialization scheme. init_gain (float): The gain to use for parameter initialization with kaiming or xavier initialization schemes. fan_mode (str): The fan mode to use for parameter initialization with kaiming initialization schemes. init_nonlinearity (str): The nonlinearity to use for parameter initialization with kaiming initialization schemes. --- See llmfoundry.models.utils.param_init_fns.py for info on other param init config options fc_type (str): choose fc layer implementation. Options: torch and te. te layers support fp8 when using H100 GPUs.
__init__
super().__init__(vocab_size=vocab_size) self.fake_logits = fake_logits
def __init__(self, vocab_size: int, fake_logits: torch.Tensor): super().__init__(vocab_size=vocab_size) self.fake_logits = fake_logits
null
vocab_range_from_per_partition_vocab_size
index_f = rank * per_partition_vocab_size index_l = index_f + per_partition_vocab_size return index_f, index_l
def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size: int, rank: int) ->Sequence[int]: index_f = rank * per_partition_vocab_size index_l = index_f + per_partition_vocab_size return index_f, index_l
null
__repr__
return f'SequenceGroup(request_id={self.request_id}, sampling_params={self.sampling_params}, num_seqs={len(self.seqs_dict)})'
def __repr__(self) ->str: return ( f'SequenceGroup(request_id={self.request_id}, sampling_params={self.sampling_params}, num_seqs={len(self.seqs_dict)})' )
null
apply_weights
qweight = weights['qweight'] out_shape = x.shape[:-1] + (qweight.shape[-1],) reshaped_x = x.reshape(-1, x.shape[-1]) if weights['exllama_state'] == ExllamaState.UNINITIALIZED: if self.quant_config.desc_act: weights['g_idx'] = torch.argsort(weights['g_idx']).to(torch.int) else: weights['g_idx'] = torch.empty((1, 1), device='meta') weights['exllama_state'] = ExllamaState.READY ops.gptq_shuffle(weights['qweight'], weights['g_idx']) output = ops.gptq_gemm(reshaped_x, weights['qweight'], weights['qzeros'], weights['scales'], weights['g_idx'], weights['exllama_state'] == ExllamaState.READY) if bias is not None: output = output + bias return output.reshape(out_shape)
def apply_weights(self, weights: Dict[str, Any], x: torch.Tensor, bias: Optional[torch.Tensor]=None) ->torch.Tensor: qweight = weights['qweight'] out_shape = x.shape[:-1] + (qweight.shape[-1],) reshaped_x = x.reshape(-1, x.shape[-1]) if weights['exllama_state'] == ExllamaState.UNINITIALIZED: if self.quant_config.desc_act: weights['g_idx'] = torch.argsort(weights['g_idx']).to(torch.int) else: weights['g_idx'] = torch.empty((1, 1), device='meta') weights['exllama_state'] = ExllamaState.READY ops.gptq_shuffle(weights['qweight'], weights['g_idx']) output = ops.gptq_gemm(reshaped_x, weights['qweight'], weights['qzeros' ], weights['scales'], weights['g_idx'], weights['exllama_state'] == ExllamaState.READY) if bias is not None: output = output + bias return output.reshape(out_shape)
null
forward
qkv, _ = self.W_pack(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) if self.postion_embedding != 'ALIBI': q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.W_pack(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) if self.postion_embedding != 'ALIBI': q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
null
__init__
self.request_id = request_id self.is_prompt = is_prompt self.seq_data = seq_data self.sampling_params = sampling_params self.block_tables = block_tables
def __init__(self, request_id: str, is_prompt: bool, seq_data: Dict[int, SequenceData], sampling_params: SamplingParams, block_tables: Dict[int, List[int]]) ->None: self.request_id = request_id self.is_prompt = is_prompt self.seq_data = seq_data self.sampling_params = sampling_params self.block_tables = block_tables
null
post_http_request
headers = {'User-Agent': 'Test Client'} pload = {'prompt': prompt, 'n': n, 'use_beam_search': True, 'temperature': 0.0, 'max_tokens': 16, 'stream': stream} response = requests.post(api_url, headers=headers, json=pload, stream=True) return response
def post_http_request(prompt: str, api_url: str, n: int=1, stream: bool=False ) ->requests.Response: headers = {'User-Agent': 'Test Client'} pload = {'prompt': prompt, 'n': n, 'use_beam_search': True, 'temperature': 0.0, 'max_tokens': 16, 'stream': stream} response = requests.post(api_url, headers=headers, json=pload, stream=True) return response
null
load_weights
params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
get_torch_arch_list
env_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None) if env_arch_list is None: return set() torch_arch_list = set(env_arch_list.replace(' ', ';').split(';')) if not torch_arch_list: return set() valid_archs = NVIDIA_SUPPORTED_ARCHS.union({(s + '+PTX') for s in NVIDIA_SUPPORTED_ARCHS}) arch_list = torch_arch_list.intersection(valid_archs) if not arch_list: raise RuntimeError( f'None of the CUDA/ROCM architectures in `TORCH_CUDA_ARCH_LIST` env variable ({env_arch_list}) is supported. Supported CUDA/ROCM architectures are: {valid_archs}.' ) invalid_arch_list = torch_arch_list - valid_archs if invalid_arch_list: warnings.warn( f'Unsupported CUDA/ROCM architectures ({invalid_arch_list}) are excluded from the `TORCH_CUDA_ARCH_LIST` env variable ({env_arch_list}). Supported CUDA/ROCM architectures are: {valid_archs}.' , stacklevel=2) return arch_list
def get_torch_arch_list() ->Set[str]: env_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None) if env_arch_list is None: return set() torch_arch_list = set(env_arch_list.replace(' ', ';').split(';')) if not torch_arch_list: return set() valid_archs = NVIDIA_SUPPORTED_ARCHS.union({(s + '+PTX') for s in NVIDIA_SUPPORTED_ARCHS}) arch_list = torch_arch_list.intersection(valid_archs) if not arch_list: raise RuntimeError( f'None of the CUDA/ROCM architectures in `TORCH_CUDA_ARCH_LIST` env variable ({env_arch_list}) is supported. Supported CUDA/ROCM architectures are: {valid_archs}.' ) invalid_arch_list = torch_arch_list - valid_archs if invalid_arch_list: warnings.warn( f'Unsupported CUDA/ROCM architectures ({invalid_arch_list}) are excluded from the `TORCH_CUDA_ARCH_LIST` env variable ({env_arch_list}). Supported CUDA/ROCM architectures are: {valid_archs}.' , stacklevel=2) return arch_list
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.model = OPTModel(config, linear_method) self.lm_head_weight = self.model.decoder.embed_tokens.weight self.sampler = Sampler(config.vocab_size)
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.model = OPTModel(config, linear_method) self.lm_head_weight = self.model.decoder.embed_tokens.weight self.sampler = Sampler(config.vocab_size)
null
run_hf
assert not use_beam_search llm = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.float16, trust_remote_code=trust_remote_code) if llm.config.model_type == 'llama': tokenizer.pad_token = tokenizer.eos_token llm = llm.cuda() pbar = tqdm(total=len(requests)) start = time.perf_counter() batch: List[str] = [] max_prompt_len = 0 max_output_len = 0 for i in range(len(requests)): prompt, prompt_len, output_len = requests[i] batch.append(prompt) max_prompt_len = max(max_prompt_len, prompt_len) max_output_len = max(max_output_len, output_len) if len(batch) < max_batch_size and i != len(requests) - 1: _, next_prompt_len, next_output_len = requests[i + 1] if max(max_prompt_len, next_prompt_len) + max(max_output_len, next_output_len) <= 2048: continue input_ids = tokenizer(batch, return_tensors='pt', padding=True).input_ids llm_outputs = llm.generate(input_ids=input_ids.cuda(), do_sample=not use_beam_search, num_return_sequences=n, temperature=1.0, top_p=1.0, use_cache=True, max_new_tokens=max_output_len) tokenizer.batch_decode(llm_outputs, skip_special_tokens=True) pbar.update(len(batch)) batch = [] max_prompt_len = 0 max_output_len = 0 end = time.perf_counter() return end - start
def run_hf(requests: List[Tuple[str, int, int]], model: str, tokenizer: PreTrainedTokenizerBase, n: int, use_beam_search: bool, max_batch_size: int, trust_remote_code: bool) ->float: assert not use_beam_search llm = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch. float16, trust_remote_code=trust_remote_code) if llm.config.model_type == 'llama': tokenizer.pad_token = tokenizer.eos_token llm = llm.cuda() pbar = tqdm(total=len(requests)) start = time.perf_counter() batch: List[str] = [] max_prompt_len = 0 max_output_len = 0 for i in range(len(requests)): prompt, prompt_len, output_len = requests[i] batch.append(prompt) max_prompt_len = max(max_prompt_len, prompt_len) max_output_len = max(max_output_len, output_len) if len(batch) < max_batch_size and i != len(requests) - 1: _, next_prompt_len, next_output_len = requests[i + 1] if max(max_prompt_len, next_prompt_len) + max(max_output_len, next_output_len) <= 2048: continue input_ids = tokenizer(batch, return_tensors='pt', padding=True ).input_ids llm_outputs = llm.generate(input_ids=input_ids.cuda(), do_sample= not use_beam_search, num_return_sequences=n, temperature=1.0, top_p=1.0, use_cache=True, max_new_tokens=max_output_len) tokenizer.batch_decode(llm_outputs, skip_special_tokens=True) pbar.update(len(batch)) batch = [] max_prompt_len = 0 max_output_len = 0 end = time.perf_counter() return end - start
null
forward
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
_prepare_prompt
assert len(seq_group_metadata_list) > 0 input_tokens: List[List[int]] = [] input_positions: List[List[int]] = [] slot_mapping: List[List[int]] = [] prompt_lens: List[int] = [] for seq_group_metadata in seq_group_metadata_list: assert seq_group_metadata.is_prompt seq_ids = list(seq_group_metadata.seq_data.keys()) assert len(seq_ids) == 1 seq_id = seq_ids[0] seq_data = seq_group_metadata.seq_data[seq_id] prompt_tokens = seq_data.get_token_ids() prompt_len = len(prompt_tokens) prompt_lens.append(prompt_len) input_tokens.append(prompt_tokens) input_positions.append(list(range(prompt_len))) if seq_group_metadata.block_tables is None: slot_mapping.append([_PAD_SLOT_ID] * prompt_len) continue slot_mapping.append([]) block_table = seq_group_metadata.block_tables[seq_id] start_idx = 0 if self.sliding_window is not None: start_idx = max(0, prompt_len - self.sliding_window) for i in range(prompt_len): if i < start_idx: slot_mapping[-1].append(_PAD_SLOT_ID) continue block_number = block_table[i // self.block_size] block_offset = i % self.block_size slot = block_number * self.block_size + block_offset slot_mapping[-1].append(slot) max_prompt_len = max(prompt_lens) input_tokens = _make_tensor_with_pad(input_tokens, max_prompt_len, pad=0, dtype=torch.long) input_positions = _make_tensor_with_pad(input_positions, max_prompt_len, pad=0, dtype=torch.long) slot_mapping = _make_tensor_with_pad(slot_mapping, max_prompt_len, pad= _PAD_SLOT_ID, dtype=torch.long) input_metadata = InputMetadata(is_prompt=True, slot_mapping=slot_mapping, max_context_len=None, context_lens=None, block_tables=None, use_cuda_graph=False) return input_tokens, input_positions, input_metadata, prompt_lens
def _prepare_prompt(self, seq_group_metadata_list: List[SequenceGroupMetadata] ) ->Tuple[torch.Tensor, torch.Tensor, InputMetadata, List[int]]: assert len(seq_group_metadata_list) > 0 input_tokens: List[List[int]] = [] input_positions: List[List[int]] = [] slot_mapping: List[List[int]] = [] prompt_lens: List[int] = [] for seq_group_metadata in seq_group_metadata_list: assert seq_group_metadata.is_prompt seq_ids = list(seq_group_metadata.seq_data.keys()) assert len(seq_ids) == 1 seq_id = seq_ids[0] seq_data = seq_group_metadata.seq_data[seq_id] prompt_tokens = seq_data.get_token_ids() prompt_len = len(prompt_tokens) prompt_lens.append(prompt_len) input_tokens.append(prompt_tokens) input_positions.append(list(range(prompt_len))) if seq_group_metadata.block_tables is None: slot_mapping.append([_PAD_SLOT_ID] * prompt_len) continue slot_mapping.append([]) block_table = seq_group_metadata.block_tables[seq_id] start_idx = 0 if self.sliding_window is not None: start_idx = max(0, prompt_len - self.sliding_window) for i in range(prompt_len): if i < start_idx: slot_mapping[-1].append(_PAD_SLOT_ID) continue block_number = block_table[i // self.block_size] block_offset = i % self.block_size slot = block_number * self.block_size + block_offset slot_mapping[-1].append(slot) max_prompt_len = max(prompt_lens) input_tokens = _make_tensor_with_pad(input_tokens, max_prompt_len, pad= 0, dtype=torch.long) input_positions = _make_tensor_with_pad(input_positions, max_prompt_len, pad=0, dtype=torch.long) slot_mapping = _make_tensor_with_pad(slot_mapping, max_prompt_len, pad= _PAD_SLOT_ID, dtype=torch.long) input_metadata = InputMetadata(is_prompt=True, slot_mapping= slot_mapping, max_context_len=None, context_lens=None, block_tables =None, use_cuda_graph=False) return input_tokens, input_positions, input_metadata, prompt_lens
null
get_config_filenames
return ['quant_config.json']
@staticmethod def get_config_filenames() ->List[str]: return ['quant_config.json']
null
_paged_attention
output = torch.empty_like(query) block_size = value_cache.shape[3] num_seqs, num_heads, head_size = query.shape max_num_partitions = (input_metadata.max_context_len + _PARTITION_SIZE - 1 ) // _PARTITION_SIZE use_v1 = input_metadata.max_context_len <= 8192 and (max_num_partitions == 1 or num_seqs * num_heads > 512) if use_v1: ops.paged_attention_v1(output, query, key_cache, value_cache, num_kv_heads, scale, input_metadata.block_tables, input_metadata. context_lens, block_size, input_metadata.max_context_len, alibi_slopes) else: assert _PARTITION_SIZE % block_size == 0 tmp_output = torch.empty(size=(num_seqs, num_heads, max_num_partitions, head_size), dtype=output.dtype, device=output.device) exp_sums = torch.empty(size=(num_seqs, num_heads, max_num_partitions), dtype=torch.float32, device=output.device) max_logits = torch.empty_like(exp_sums) ops.paged_attention_v2(output, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, num_kv_heads, scale, input_metadata. block_tables, input_metadata.context_lens, block_size, input_metadata.max_context_len, alibi_slopes) return output
def _paged_attention(query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, input_metadata: InputMetadata, num_kv_heads: int, scale: float, alibi_slopes: Optional[torch.Tensor]) ->torch.Tensor: output = torch.empty_like(query) block_size = value_cache.shape[3] num_seqs, num_heads, head_size = query.shape max_num_partitions = (input_metadata.max_context_len + _PARTITION_SIZE - 1 ) // _PARTITION_SIZE use_v1 = input_metadata.max_context_len <= 8192 and (max_num_partitions == 1 or num_seqs * num_heads > 512) if use_v1: ops.paged_attention_v1(output, query, key_cache, value_cache, num_kv_heads, scale, input_metadata.block_tables, input_metadata.context_lens, block_size, input_metadata. max_context_len, alibi_slopes) else: assert _PARTITION_SIZE % block_size == 0 tmp_output = torch.empty(size=(num_seqs, num_heads, max_num_partitions, head_size), dtype=output.dtype, device= output.device) exp_sums = torch.empty(size=(num_seqs, num_heads, max_num_partitions), dtype=torch.float32, device=output.device) max_logits = torch.empty_like(exp_sums) ops.paged_attention_v2(output, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, num_kv_heads, scale, input_metadata.block_tables, input_metadata.context_lens, block_size, input_metadata.max_context_len, alibi_slopes) return output
null
_apply_logits_processors
logits_row_idx = 0 found_logits_processors = False for seq_ids, sampling_params in sampling_metadata.seq_groups: logits_processors = sampling_params.logits_processors if logits_processors: found_logits_processors = True for seq_id in seq_ids: logits_row = logits[logits_row_idx] token_ids = sampling_metadata.seq_data[seq_id].output_token_ids for logits_processor in logits_processors: logits_row = logits_processor(token_ids, logits_row) logits[logits_row_idx] = logits_row logits_row_idx += 1 else: logits_row_idx += len(seq_ids) if found_logits_processors: assert logits_row_idx == logits.shape[0] return logits
def _apply_logits_processors(logits: torch.Tensor, sampling_metadata: SamplingMetadata) ->torch.Tensor: logits_row_idx = 0 found_logits_processors = False for seq_ids, sampling_params in sampling_metadata.seq_groups: logits_processors = sampling_params.logits_processors if logits_processors: found_logits_processors = True for seq_id in seq_ids: logits_row = logits[logits_row_idx] token_ids = sampling_metadata.seq_data[seq_id].output_token_ids for logits_processor in logits_processors: logits_row = logits_processor(token_ids, logits_row) logits[logits_row_idx] = logits_row logits_row_idx += 1 else: logits_row_idx += len(seq_ids) if found_logits_processors: assert logits_row_idx == logits.shape[0] return logits
null
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) key_cache, value_cache = kv_cache attn_output = self.attn(q, k, v, key_cache, value_cache, input_metadata) output, _ = self.out_proj(attn_output) return output
def forward(self, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) key_cache, value_cache = kv_cache attn_output = self.attn(q, k, v, key_cache, value_cache, input_metadata) output, _ = self.out_proj(attn_output) return output
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.model = LlamaModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: LlamaConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.linear_method = linear_method self.model = LlamaModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
null
record_metrics
gauge_avg_prompt_throughput.set(labels, avg_prompt_throughput) gauge_avg_generation_throughput.set(labels, avg_generation_throughput) gauge_scheduler_running.set(labels, scheduler_running) gauge_scheduler_swapped.set(labels, scheduler_swapped) gauge_scheduler_waiting.set(labels, scheduler_waiting) gauge_gpu_cache_usage.set(labels, gpu_cache_usage) gauge_cpu_cache_usage.set(labels, cpu_cache_usage)
def record_metrics(avg_prompt_throughput: float, avg_generation_throughput: float, scheduler_running: int, scheduler_swapped: int, scheduler_waiting: int, gpu_cache_usage: float, cpu_cache_usage: float): gauge_avg_prompt_throughput.set(labels, avg_prompt_throughput) gauge_avg_generation_throughput.set(labels, avg_generation_throughput) gauge_scheduler_running.set(labels, scheduler_running) gauge_scheduler_swapped.set(labels, scheduler_swapped) gauge_scheduler_waiting.set(labels, scheduler_waiting) gauge_gpu_cache_usage.set(labels, gpu_cache_usage) gauge_cpu_cache_usage.set(labels, cpu_cache_usage)
null
get_config_filenames
"""List of filenames to search for in the model directory.""" raise NotImplementedError
@staticmethod @abstractmethod def get_config_filenames() ->List[str]: """List of filenames to search for in the model directory.""" raise NotImplementedError
List of filenames to search for in the model directory.
get_num_empty_slots
return self.block_size - self.num_tokens
def get_num_empty_slots(self) ->int: return self.block_size - self.num_tokens
null
get_beam_search_score
"""Calculate the beam search score with length penalty. Adapted from https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938 """ if seq_len is None: seq_len = self.get_len() if eos_token_id is not None and self.get_last_token_id() == eos_token_id: seq_len -= 1 return self.get_cumulative_logprob() / seq_len ** length_penalty
def get_beam_search_score(self, length_penalty: float=0.0, seq_len: Optional[int]=None, eos_token_id: Optional[int]=None) ->float: """Calculate the beam search score with length penalty. Adapted from https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938 """ if seq_len is None: seq_len = self.get_len() if eos_token_id is not None and self.get_last_token_id( ) == eos_token_id: seq_len -= 1 return self.get_cumulative_logprob() / seq_len ** length_penalty
Calculate the beam search score with length penalty. Adapted from https://github.com/huggingface/transformers/blob/ccb92be23def445f2afdea94c31286f84b89eb5b/src/transformers/generation/beam_search.py#L938
__init__
self.model = model self.tokenizer = tokenizer self.tokenizer_mode = tokenizer_mode self.trust_remote_code = trust_remote_code self.download_dir = download_dir self.load_format = load_format self.seed = seed self.revision = revision self.tokenizer_revision = tokenizer_revision self.quantization = quantization self.enforce_eager = enforce_eager self.max_context_len_to_capture = max_context_len_to_capture if os.environ.get('VLLM_USE_MODELSCOPE', 'False').lower() == 'true': from modelscope.hub.snapshot_download import snapshot_download model_path = snapshot_download(model_id=model, cache_dir=download_dir, revision=revision) self.model = model_path self.download_dir = model_path self.tokenizer = model_path self.hf_config = get_config(self.model, trust_remote_code, revision) self.dtype = _get_and_verify_dtype(self.hf_config, dtype) self.max_model_len = _get_and_verify_max_len(self.hf_config, max_model_len) self._verify_load_format() self._verify_tokenizer_mode() self._verify_quantization() self._verify_cuda_graph()
def __init__(self, model: str, tokenizer: str, tokenizer_mode: str, trust_remote_code: bool, download_dir: Optional[str], load_format: str, dtype: Union[str, torch.dtype], seed: int, revision: Optional[str]=None, tokenizer_revision: Optional[str]=None, max_model_len: Optional[int]= None, quantization: Optional[str]=None, enforce_eager: bool=False, max_context_len_to_capture: Optional[int]=None) ->None: self.model = model self.tokenizer = tokenizer self.tokenizer_mode = tokenizer_mode self.trust_remote_code = trust_remote_code self.download_dir = download_dir self.load_format = load_format self.seed = seed self.revision = revision self.tokenizer_revision = tokenizer_revision self.quantization = quantization self.enforce_eager = enforce_eager self.max_context_len_to_capture = max_context_len_to_capture if os.environ.get('VLLM_USE_MODELSCOPE', 'False').lower() == 'true': from modelscope.hub.snapshot_download import snapshot_download model_path = snapshot_download(model_id=model, cache_dir= download_dir, revision=revision) self.model = model_path self.download_dir = model_path self.tokenizer = model_path self.hf_config = get_config(self.model, trust_remote_code, revision) self.dtype = _get_and_verify_dtype(self.hf_config, dtype) self.max_model_len = _get_and_verify_max_len(self.hf_config, max_model_len) self._verify_load_format() self._verify_tokenizer_mode() self._verify_quantization() self._verify_cuda_graph()
null
forward
bias = self.bias if not self.skip_bias_add else None output = self.linear_method.apply_weights(self.linear_weights, x, bias) output_bias = self.bias if self.skip_bias_add else None return output, output_bias
def forward(self, x: torch.Tensor) ->torch.Tensor: bias = self.bias if not self.skip_bias_add else None output = self.linear_method.apply_weights(self.linear_weights, x, bias) output_bias = self.bias if self.skip_bias_add else None return output, output_bias
null
_degroup_weight
hidden_size = self.config.hidden_size head_size = self.config.hidden_size // self.config.num_attention_heads target_num_kv_heads = self.config.num_key_value_heads num_kv_heads = loaded_weight.shape[0] // head_size n_repeats = target_num_kv_heads / num_kv_heads assert n_repeats == int(n_repeats) n_repeats = int(n_repeats) loaded_weight = loaded_weight.view(num_kv_heads, head_size, hidden_size) loaded_weight = torch.repeat_interleave(loaded_weight, repeats=n_repeats, dim=0 ) loaded_weight = loaded_weight.reshape(target_num_kv_heads * head_size, hidden_size) return loaded_weight
def _degroup_weight(self, loaded_weight: torch.Tensor) ->torch.Tensor: hidden_size = self.config.hidden_size head_size = self.config.hidden_size // self.config.num_attention_heads target_num_kv_heads = self.config.num_key_value_heads num_kv_heads = loaded_weight.shape[0] // head_size n_repeats = target_num_kv_heads / num_kv_heads assert n_repeats == int(n_repeats) n_repeats = int(n_repeats) loaded_weight = loaded_weight.view(num_kv_heads, head_size, hidden_size) loaded_weight = torch.repeat_interleave(loaded_weight, repeats= n_repeats, dim=0) loaded_weight = loaded_weight.reshape(target_num_kv_heads * head_size, hidden_size) return loaded_weight
null
forward
qkv, _ = self.c_attn(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) key_cache, value_cache = kv_cache attn_output = self.attn(q, k, v, key_cache, value_cache, input_metadata) attn_output, _ = self.c_proj(attn_output) return attn_output
def forward(self, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.c_attn(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) key_cache, value_cache = kv_cache attn_output = self.attn(q, k, v, key_cache, value_cache, input_metadata) attn_output, _ = self.c_proj(attn_output) return attn_output
null
get_cache_block_size
head_size = model_config.get_head_size() num_heads = model_config.get_num_kv_heads(parallel_config) num_layers = model_config.get_num_layers(parallel_config) key_cache_block = block_size * num_heads * head_size value_cache_block = key_cache_block total = num_layers * (key_cache_block + value_cache_block) dtype_size = _get_dtype_size(model_config.dtype) return dtype_size * total
@staticmethod def get_cache_block_size(block_size: int, model_config: ModelConfig, parallel_config: ParallelConfig) ->int: head_size = model_config.get_head_size() num_heads = model_config.get_num_kv_heads(parallel_config) num_layers = model_config.get_num_layers(parallel_config) key_cache_block = block_size * num_heads * head_size value_cache_block = key_cache_block total = num_layers * (key_cache_block + value_cache_block) dtype_size = _get_dtype_size(model_config.dtype) return dtype_size * total
null
_get_alibi_slopes
closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads)) base = torch.tensor(2 ** -2 ** -(math.log2(closest_power_of_2) - 3), dtype= torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != total_num_heads: extra_base = torch.tensor(2 ** -2 ** -(math.log2(2 * closest_power_of_2 ) - 3), dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, total_num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, dtype= torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) return slopes
def _get_alibi_slopes(total_num_heads: int) ->torch.Tensor: closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads)) base = torch.tensor(2 ** -2 ** -(math.log2(closest_power_of_2) - 3), dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != total_num_heads: extra_base = torch.tensor(2 ** -2 ** -(math.log2(2 * closest_power_of_2) - 3), dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, total_num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0 ) return slopes
null
sample
next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
null
allocate
seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0] block_table: BlockTable = [] for logical_idx in range(len(seq.logical_token_blocks)): if (self.block_sliding_window is not None and logical_idx >= self. block_sliding_window): block = block_table[logical_idx % self.block_sliding_window] else: block = self.gpu_allocator.allocate() block.ref_count = seq_group.num_seqs() block_table.append(block) for seq in seq_group.get_seqs(status=SequenceStatus.WAITING): self.block_tables[seq.seq_id] = block_table.copy()
def allocate(self, seq_group: SequenceGroup) ->None: seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0] block_table: BlockTable = [] for logical_idx in range(len(seq.logical_token_blocks)): if (self.block_sliding_window is not None and logical_idx >= self. block_sliding_window): block = block_table[logical_idx % self.block_sliding_window] else: block = self.gpu_allocator.allocate() block.ref_count = seq_group.num_seqs() block_table.append(block) for seq in seq_group.get_seqs(status=SequenceStatus.WAITING): self.block_tables[seq.seq_id] = block_table.copy()
null
load_weights
params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if ('attention.bias' in name or 'attention.masked_bias' in name or 'rotary_emb.inv_freq' in name): continue param = params_dict[name] if 'query_key_value' in name: output_dim = getattr(param, 'output_dim', None) num_heads = self.config.num_attention_heads if output_dim is not None: loaded_weight_shape = loaded_weight.shape loaded_weight = loaded_weight.view(loaded_weight_shape[: output_dim] + (num_heads, 3, -1) + loaded_weight_shape[ output_dim + 1:]) loaded_weight = loaded_weight.transpose(output_dim, output_dim + 1) loaded_weight = loaded_weight.reshape(loaded_weight_shape) weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if ('attention.bias' in name or 'attention.masked_bias' in name or 'rotary_emb.inv_freq' in name): continue param = params_dict[name] if 'query_key_value' in name: output_dim = getattr(param, 'output_dim', None) num_heads = self.config.num_attention_heads if output_dim is not None: loaded_weight_shape = loaded_weight.shape loaded_weight = loaded_weight.view(loaded_weight_shape[: output_dim] + (num_heads, 3, -1) + loaded_weight_shape[ output_dim + 1:]) loaded_weight = loaded_weight.transpose(output_dim, output_dim + 1) loaded_weight = loaded_weight.reshape(loaded_weight_shape) weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
reset
self.counter = 0
def reset(self) ->None: self.counter = 0
null
__init__
super().__init__() self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = PhiAttention(config, linear_method) self.mlp = PhiMLP(config, linear_method)
def __init__(self, config: PretrainedConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.mixer = PhiAttention(config, linear_method) self.mlp = PhiMLP(config, linear_method)
null
__init__
super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias= False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase]=None) ->None: super().__init__() self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [ intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias =False, linear_method=linear_method) if hidden_act != 'silu': raise ValueError( f'Unsupported activation: {hidden_act}. Only silu is supported for now.' ) self.act_fn = SiluAndMul()
null
_compute_inv_freq
pos_freqs = self.base ** (torch.arange(0, self.rotary_dim, 2, dtype=torch. float, device='cuda') / self.rotary_dim) inv_freq_extrapolation = 1.0 / pos_freqs inv_freq_interpolation = 1.0 / (scaling_factor * pos_freqs) low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, self.rotary_dim, self.base, self.max_position_embeddings) inv_freq_mask = (1 - _yarn_linear_ramp_mask(low, high, self.rotary_dim // 2, dtype=torch.float, device='cuda')) * self.extrapolation_factor inv_freq = inv_freq_interpolation * (1 - inv_freq_mask ) + inv_freq_extrapolation * inv_freq_mask return inv_freq
def _compute_inv_freq(self, scaling_factor: float) ->torch.Tensor: pos_freqs = self.base ** (torch.arange(0, self.rotary_dim, 2, dtype= torch.float, device='cuda') / self.rotary_dim) inv_freq_extrapolation = 1.0 / pos_freqs inv_freq_interpolation = 1.0 / (scaling_factor * pos_freqs) low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, self.rotary_dim, self.base, self.max_position_embeddings) inv_freq_mask = (1 - _yarn_linear_ramp_mask(low, high, self.rotary_dim // 2, dtype=torch.float, device='cuda')) * self.extrapolation_factor inv_freq = inv_freq_interpolation * (1 - inv_freq_mask ) + inv_freq_extrapolation * inv_freq_mask return inv_freq
null
broadcast
"""Broadcast the input tensor.""" world_size = torch.distributed.get_world_size() assert 0 <= src < world_size, f'Invalid src rank ({src})' if world_size == 1: return input_ torch.distributed.broadcast(input_, src=src) return input_
def broadcast(input_, src=0): """Broadcast the input tensor.""" world_size = torch.distributed.get_world_size() assert 0 <= src < world_size, f'Invalid src rank ({src})' if world_size == 1: return input_ torch.distributed.broadcast(input_, src=src) return input_
Broadcast the input tensor.
tensor_model_parallel_all_gather
"""All-gather the input tensor across model parallel group.""" world_size = get_tensor_model_parallel_world_size() if world_size == 1: return input_ assert -input_.dim() <= dim < input_.dim( ), f'Invalid dim ({dim}) for input tensor with shape {input_.size()}' if dim < 0: dim += input_.dim() input_size = input_.size() output_tensor = torch.empty((world_size,) + input_size, dtype=input_.dtype, device=input_.device) torch.distributed.all_gather_into_tensor(output_tensor, input_, group= get_tensor_model_parallel_group()) output_tensor = output_tensor.movedim(0, dim) output_tensor = output_tensor.reshape(input_size[:dim] + (world_size * input_size[dim],) + input_size[dim + 1:]) return output_tensor
def tensor_model_parallel_all_gather(input_, dim=-1): """All-gather the input tensor across model parallel group.""" world_size = get_tensor_model_parallel_world_size() if world_size == 1: return input_ assert -input_.dim() <= dim < input_.dim( ), f'Invalid dim ({dim}) for input tensor with shape {input_.size()}' if dim < 0: dim += input_.dim() input_size = input_.size() output_tensor = torch.empty((world_size,) + input_size, dtype=input_. dtype, device=input_.device) torch.distributed.all_gather_into_tensor(output_tensor, input_, group= get_tensor_model_parallel_group()) output_tensor = output_tensor.movedim(0, dim) output_tensor = output_tensor.reshape(input_size[:dim] + (world_size * input_size[dim],) + input_size[dim + 1:]) return output_tensor
All-gather the input tensor across model parallel group.
_forward
"""PyTorch-native implementation equivalent to forward().""" c = math.sqrt(2.0 / math.pi) return 0.5 * x * (1.0 + torch.tanh(c * (x + 0.044715 * torch.pow(x, 3.0))))
def _forward(self, x: torch.Tensor) ->torch.Tensor: """PyTorch-native implementation equivalent to forward().""" c = math.sqrt(2.0 / math.pi) return 0.5 * x * (1.0 + torch.tanh(c * (x + 0.044715 * torch.pow(x, 3.0))))
PyTorch-native implementation equivalent to forward().
is_running
return self.background_loop is not None and not self.background_loop.done()
@property def is_running(self) ->bool: return self.background_loop is not None and not self.background_loop.done()
null
is_finished
return all(seq.is_finished() for seq in self.get_seqs())
def is_finished(self) ->bool: return all(seq.is_finished() for seq in self.get_seqs())
null
__init__
super().__init__(*args, **kwargs) self._num_aborts = 0
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._num_aborts = 0
null
get_num_unfinished_requests
"""Gets the number of unfinished requests.""" return self.scheduler.get_num_unfinished_seq_groups()
def get_num_unfinished_requests(self) ->int: """Gets the number of unfinished requests.""" return self.scheduler.get_num_unfinished_seq_groups()
Gets the number of unfinished requests.
sample
next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head_weight, hidden_states, sampling_metadata) return next_tokens
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.transformer = BloomModel(config, linear_method) self.lm_head_weight = self.transformer.word_embeddings.weight self.sampler = Sampler(config.vocab_size)
def __init__(self, config: BloomConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.transformer = BloomModel(config, linear_method) self.lm_head_weight = self.transformer.word_embeddings.weight self.sampler = Sampler(config.vocab_size)
null
test_health_endpoint
response = client.get('/health') assert response.status_code == 200
def test_health_endpoint(): response = client.get('/health') assert response.status_code == 200
null
get_config_filenames
return ['quant_config.json', 'quantize_config.json']
@staticmethod def get_config_filenames() ->List[str]: return ['quant_config.json', 'quantize_config.json']
null
init_cache_engine
self.cache_config = cache_config self.cache_engine = CacheEngine(self.cache_config, self.model_config, self. parallel_config) self.cache_events = self.cache_engine.events self.gpu_cache = self.cache_engine.gpu_cache self.model_runner.set_block_size(self.cache_engine.block_size)
def init_cache_engine(self, cache_config: CacheConfig) ->None: self.cache_config = cache_config self.cache_engine = CacheEngine(self.cache_config, self.model_config, self.parallel_config) self.cache_events = self.cache_engine.events self.gpu_cache = self.cache_engine.gpu_cache self.model_runner.set_block_size(self.cache_engine.block_size)
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.transformer = QWenModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: QWenConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.transformer = QWenModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
null
get_max_num_running_seqs
"""The maximum number of sequences running in parallel in the remaining lifetime of the request.""" if self.sampling_params.use_beam_search: return self.sampling_params.best_of else: if self.sampling_params.best_of > self.num_seqs(): return self.sampling_params.best_of return self.num_unfinished_seqs()
def get_max_num_running_seqs(self) ->int: """The maximum number of sequences running in parallel in the remaining lifetime of the request.""" if self.sampling_params.use_beam_search: return self.sampling_params.best_of else: if self.sampling_params.best_of > self.num_seqs(): return self.sampling_params.best_of return self.num_unfinished_seqs()
The maximum number of sequences running in parallel in the remaining lifetime of the request.
_decode_sequence
"""Decodes the new token for a sequence.""" new_tokens, new_output_text, prefix_offset, read_offset = ( detokenize_incrementally(self.tokenizer, all_input_ids=seq. get_token_ids(), prev_tokens=seq.tokens, prefix_offset=seq. prefix_offset, read_offset=seq.read_offset, skip_special_tokens=prms. skip_special_tokens, spaces_between_special_tokens=prms. spaces_between_special_tokens)) if seq.tokens is None: seq.tokens = new_tokens else: seq.tokens.extend(new_tokens) seq.prefix_offset = prefix_offset seq.read_offset = read_offset seq.output_text += new_output_text
def _decode_sequence(self, seq: Sequence, prms: SamplingParams) ->None: """Decodes the new token for a sequence.""" new_tokens, new_output_text, prefix_offset, read_offset = ( detokenize_incrementally(self.tokenizer, all_input_ids=seq. get_token_ids(), prev_tokens=seq.tokens, prefix_offset=seq. prefix_offset, read_offset=seq.read_offset, skip_special_tokens= prms.skip_special_tokens, spaces_between_special_tokens=prms. spaces_between_special_tokens)) if seq.tokens is None: seq.tokens = new_tokens else: seq.tokens.extend(new_tokens) seq.prefix_offset = prefix_offset seq.read_offset = read_offset seq.output_text += new_output_text
Decodes the new token for a sequence.
get_token_ids
return self.token_ids[:self.num_tokens]
def get_token_ids(self) ->List[int]: return self.token_ids[:self.num_tokens]
null
load_weights
params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if name == 'lm_head.weight': continue if not name.startswith('transformer.'): name = 'transformer.' + name param = params_dict[name] if 'query_key_value' in name: output_dim = getattr(param, 'output_dim', None) num_heads = self.config.num_attention_heads if output_dim is not None: loaded_weight_shape = loaded_weight.shape loaded_weight = loaded_weight.view(loaded_weight_shape[: output_dim] + (num_heads, 3, -1) + loaded_weight_shape[ output_dim + 1:]) loaded_weight = loaded_weight.transpose(output_dim, output_dim + 1) loaded_weight = loaded_weight.reshape(loaded_weight_shape) weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if name == 'lm_head.weight': continue if not name.startswith('transformer.'): name = 'transformer.' + name param = params_dict[name] if 'query_key_value' in name: output_dim = getattr(param, 'output_dim', None) num_heads = self.config.num_attention_heads if output_dim is not None: loaded_weight_shape = loaded_weight.shape loaded_weight = loaded_weight.view(loaded_weight_shape[: output_dim] + (num_heads, 3, -1) + loaded_weight_shape[ output_dim + 1:]) loaded_weight = loaded_weight.transpose(output_dim, output_dim + 1) loaded_weight = loaded_weight.reshape(loaded_weight_shape) weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
get_name
"""Name of the quantization method.""" raise NotImplementedError
@abstractmethod def get_name(self) ->str: """Name of the quantization method.""" raise NotImplementedError
Name of the quantization method.
get_requirements
"""Get Python package dependencies from requirements.txt.""" if _is_hip(): with open(get_path('requirements-rocm.txt')) as f: requirements = f.read().strip().split('\n') else: with open(get_path('requirements.txt')) as f: requirements = f.read().strip().split('\n') return requirements
def get_requirements() ->List[str]: """Get Python package dependencies from requirements.txt.""" if _is_hip(): with open(get_path('requirements-rocm.txt')) as f: requirements = f.read().strip().split('\n') else: with open(get_path('requirements.txt')) as f: requirements = f.read().strip().split('\n') return requirements
Get Python package dependencies from requirements.txt.
load_weights
params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
__init__
self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.emb_dropout_prob = emb_dropout_prob self.attn_dropout_prob = attn_dropout_prob self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.bf16 = bf16 self.fp16 = fp16 self.fp32 = fp32 self.kv_channels = kv_channels self.rotary_pct = rotary_pct self.rotary_emb_base = rotary_emb_base self.use_dynamic_ntk = use_dynamic_ntk self.use_logn_attn = use_logn_attn self.use_flash_attn = use_flash_attn self.no_bias = no_bias super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
def __init__(self, vocab_size=151936, hidden_size=4096, num_hidden_layers= 32, num_attention_heads=32, emb_dropout_prob=0.0, attn_dropout_prob=0.0, layer_norm_epsilon=1e-06, initializer_range=0.02, max_position_embeddings=8192, scale_attn_weights=True, use_cache=True, bf16=False, fp16=False, fp32=False, kv_channels=128, rotary_pct=1.0, rotary_emb_base=10000, use_dynamic_ntk=True, use_logn_attn=True, use_flash_attn='auto', intermediate_size=22016, no_bias=True, tie_word_embeddings=False, **kwargs): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.emb_dropout_prob = emb_dropout_prob self.attn_dropout_prob = attn_dropout_prob self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.bf16 = bf16 self.fp16 = fp16 self.fp32 = fp32 self.kv_channels = kv_channels self.rotary_pct = rotary_pct self.rotary_emb_base = rotary_emb_base self.use_dynamic_ntk = use_dynamic_ntk self.use_logn_attn = use_logn_attn self.use_flash_attn = use_flash_attn self.no_bias = no_bias super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
null
clear
self.flag = False
def clear(self): self.flag = False
null
__init__
super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) rope_scaling = getattr(config, 'rope_scaling', None) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.self_attn = LlamaAttention(hidden_size=self.hidden_size, num_heads= config.num_attention_heads, num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, rope_scaling=rope_scaling, max_position_embeddings=max_position_embeddings, linear_method= linear_method) self.mlp = LlamaMLP(hidden_size=self.hidden_size, intermediate_size=config. intermediate_size, hidden_act=config.hidden_act, linear_method= linear_method) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config. rms_norm_eps)
def __init__(self, config: LlamaConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, 'rope_theta', 10000) rope_scaling = getattr(config, 'rope_scaling', None) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.self_attn = LlamaAttention(hidden_size=self.hidden_size, num_heads =config.num_attention_heads, num_kv_heads=config. num_key_value_heads, rope_theta=rope_theta, rope_scaling= rope_scaling, max_position_embeddings=max_position_embeddings, linear_method=linear_method) self.mlp = LlamaMLP(hidden_size=self.hidden_size, intermediate_size= config.intermediate_size, hidden_act=config.hidden_act, linear_method=linear_method) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config. rms_norm_eps)
null
_is_cuda
return torch.version.cuda is not None
def _is_cuda() ->bool: return torch.version.cuda is not None
null
test_beam_search_single_input
hf_model = hf_runner(model, dtype=dtype) hf_outputs = hf_model.generate_beam_search(example_prompts, beam_width, max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_outputs = vllm_model.generate_beam_search(example_prompts, beam_width, max_tokens) del vllm_model for i in range(len(example_prompts)): hf_output_ids, _ = hf_outputs[i] vllm_output_ids, _ = vllm_outputs[i] assert len(hf_output_ids) == len(vllm_output_ids) for j in range(len(hf_output_ids)): assert hf_output_ids[j] == vllm_output_ids[j], f"""Test{i} output{j}: HF: {hf_output_ids} vLLM: {vllm_output_ids}"""
@pytest.mark.parametrize('model', MODELS) @pytest.mark.parametrize('dtype', ['half']) @pytest.mark.parametrize('max_tokens', MAX_TOKENS) @pytest.mark.parametrize('beam_width', BEAM_WIDTHS) def test_beam_search_single_input(hf_runner, vllm_runner, example_prompts, model: str, dtype: str, max_tokens: int, beam_width: int) ->None: hf_model = hf_runner(model, dtype=dtype) hf_outputs = hf_model.generate_beam_search(example_prompts, beam_width, max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_outputs = vllm_model.generate_beam_search(example_prompts, beam_width, max_tokens) del vllm_model for i in range(len(example_prompts)): hf_output_ids, _ = hf_outputs[i] vllm_output_ids, _ = vllm_outputs[i] assert len(hf_output_ids) == len(vllm_output_ids) for j in range(len(hf_output_ids)): assert hf_output_ids[j] == vllm_output_ids[j], f"""Test{i} output{j}: HF: {hf_output_ids} vLLM: {vllm_output_ids}"""
null
forward
if residual is None: residual = hidden_states hidden_states = self.ln_1(hidden_states) else: hidden_states, residual = self.ln_1(hidden_states, residual) hidden_states = self.attn(positions=positions, hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.ln_2(hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[ torch.Tensor]) ->Tuple[torch.Tensor, torch.Tensor]: if residual is None: residual = hidden_states hidden_states = self.ln_1(hidden_states) else: hidden_states, residual = self.ln_1(hidden_states, residual) hidden_states = self.attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.ln_2(hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual
null
forward
hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches [i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
null
get_last_token_id
return self.data.get_last_token_id()
def get_last_token_id(self) ->int: return self.data.get_last_token_id()
null
stop_generating
self.request_id = None
def stop_generating(self): self.request_id = None
null
_rotate_gptj
x1 = x[..., ::2] x2 = x[..., 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2)
def _rotate_gptj(x: torch.Tensor) ->torch.Tensor: x1 = x[..., ::2] x2 = x[..., 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2)
null
apply_weights
"""Apply the weights to the input tensor.""" raise NotImplementedError
@abstractmethod def apply_weights(self, weights: Dict[str, torch.Tensor], x: torch.Tensor, bias: Optional[torch.Tensor]=None) ->torch.Tensor: """Apply the weights to the input tensor.""" raise NotImplementedError
Apply the weights to the input tensor.
__init__
super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([LlamaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: LlamaConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([LlamaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
__init__
super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([MistralDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: MistralConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([MistralDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
swap_out
self._swap(self.gpu_cache, self.cpu_cache, src_to_dst)
def swap_out(self, src_to_dst: Dict[int, int]) ->None: self._swap(self.gpu_cache, self.cpu_cache, src_to_dst)
null
forward
for i in range(self.num_layers): layer = self.layers[i] hidden_states = layer(hidden_states=hidden_states, position_ids= position_ids, kv_cache=kv_caches[i], input_metadata=input_metadata) if self.post_layer_norm: hidden_states = self.final_layernorm(hidden_states) return hidden_states
def forward(self, hidden_states: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: for i in range(self.num_layers): layer = self.layers[i] hidden_states = layer(hidden_states=hidden_states, position_ids= position_ids, kv_cache=kv_caches[i], input_metadata=input_metadata) if self.post_layer_norm: hidden_states = self.final_layernorm(hidden_states) return hidden_states
null
load_model_cls
if model_arch not in _MODELS: return None if is_hip(): if model_arch in _ROCM_UNSUPPORTED_MODELS: raise ValueError( f'Model architecture {model_arch} is not supported by ROCm for now.' ) if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS: logger.warning( f'Model architecture {model_arch} is partially supported by ROCm: ' + _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]) module_name, model_cls_name = _MODELS[model_arch] module = importlib.import_module(f'vllm.model_executor.models.{module_name}') return getattr(module, model_cls_name, None)
@staticmethod def load_model_cls(model_arch: str) ->Optional[Type[nn.Module]]: if model_arch not in _MODELS: return None if is_hip(): if model_arch in _ROCM_UNSUPPORTED_MODELS: raise ValueError( f'Model architecture {model_arch} is not supported by ROCm for now.' ) if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS: logger.warning( f'Model architecture {model_arch} is partially supported by ROCm: ' + _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]) module_name, model_cls_name = _MODELS[model_arch] module = importlib.import_module( f'vllm.model_executor.models.{module_name}') return getattr(module, model_cls_name, None)
null
finish
self._queue.put_nowait(StopIteration) self._finished = True
def finish(self) ->None: self._queue.put_nowait(StopIteration) self._finished = True
null
__init__
super().__init__() self.d_model = config.d_model self.total_num_heads = config.n_heads self.head_dim = self.d_model // self.total_num_heads self.clip_qkv = config.attn_config['clip_qkv'] self.qk_ln = config.attn_config['qk_ln'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] if 'kv_n_heads' in config.attn_config: self.total_num_kv_heads = config.attn_config['kv_n_heads'] else: self.total_num_kv_heads = self.total_num_heads assert not config.attn_config['prefix_lm'] assert config.attn_config['alibi'] self.Wqkv = QKVParallelLinear(self.d_model, self.d_model // self. total_num_heads, self.total_num_heads, self.total_num_kv_heads, bias= not config.no_bias, linear_method=linear_method) if self.qk_ln: self.q_ln = nn.LayerNorm(self.d_model) self.k_ln = nn.LayerNorm(self.d_model) self.out_proj = RowParallelLinear(self.d_model, self.d_model, bias=not config.no_bias, linear_method=linear_method) tp_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tp_world_size == 0 self.num_heads = self.total_num_heads // tp_world_size if self.total_num_kv_heads >= tp_world_size: assert self.total_num_kv_heads % tp_world_size == 0 else: assert tp_world_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_world_size) self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads head_end = (tp_rank + 1) * self.num_heads alibi_slopes = _get_alibi_slopes(self.total_num_heads, self.alibi_bias_max) alibi_slopes = alibi_slopes[head_start:head_end].tolist() self.head_dim = self.d_model // self.total_num_heads scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, scaling, alibi_slopes=alibi_slopes, num_kv_heads=self.num_kv_heads)
def __init__(self, config: MPTConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.d_model = config.d_model self.total_num_heads = config.n_heads self.head_dim = self.d_model // self.total_num_heads self.clip_qkv = config.attn_config['clip_qkv'] self.qk_ln = config.attn_config['qk_ln'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] if 'kv_n_heads' in config.attn_config: self.total_num_kv_heads = config.attn_config['kv_n_heads'] else: self.total_num_kv_heads = self.total_num_heads assert not config.attn_config['prefix_lm'] assert config.attn_config['alibi'] self.Wqkv = QKVParallelLinear(self.d_model, self.d_model // self. total_num_heads, self.total_num_heads, self.total_num_kv_heads, bias=not config.no_bias, linear_method=linear_method) if self.qk_ln: self.q_ln = nn.LayerNorm(self.d_model) self.k_ln = nn.LayerNorm(self.d_model) self.out_proj = RowParallelLinear(self.d_model, self.d_model, bias=not config.no_bias, linear_method=linear_method) tp_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tp_world_size == 0 self.num_heads = self.total_num_heads // tp_world_size if self.total_num_kv_heads >= tp_world_size: assert self.total_num_kv_heads % tp_world_size == 0 else: assert tp_world_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_world_size) self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads head_end = (tp_rank + 1) * self.num_heads alibi_slopes = _get_alibi_slopes(self.total_num_heads, self.alibi_bias_max) alibi_slopes = alibi_slopes[head_start:head_end].tolist() self.head_dim = self.d_model // self.total_num_heads scaling = self.head_dim ** -0.5 self.attn = PagedAttention(self.num_heads, self.head_dim, scaling, alibi_slopes=alibi_slopes, num_kv_heads=self.num_kv_heads)
null
get_pipeline_model_parallel_first_rank
"""Return the global rank of the first process in the pipeline for the current tensor parallel group""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' return _PIPELINE_GLOBAL_RANKS[0]
def get_pipeline_model_parallel_first_rank(): """Return the global rank of the first process in the pipeline for the current tensor parallel group""" assert _PIPELINE_GLOBAL_RANKS is not None, 'Pipeline parallel group is not initialized' return _PIPELINE_GLOBAL_RANKS[0]
Return the global rank of the first process in the pipeline for the current tensor parallel group
_preempt_by_swap
self._swap_out(seq_group, blocks_to_swap_out) self.swapped.append(seq_group)
def _preempt_by_swap(self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[int, int]) ->None: self._swap_out(seq_group, blocks_to_swap_out) self.swapped.append(seq_group)
null
forward
residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states return hidden_states
def forward(self, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states return hidden_states
null
_verify_args
self.model_config.verify_with_parallel_config(self.parallel_config) self.cache_config.verify_with_parallel_config(self.parallel_config)
def _verify_args(self) ->None: self.model_config.verify_with_parallel_config(self.parallel_config) self.cache_config.verify_with_parallel_config(self.parallel_config)
null
__setstate__
self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file)
def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file)
null
_yarn_get_mscale
if scale <= 1: return 1.0 return 0.1 * math.log(scale) + 1.0
def _yarn_get_mscale(scale: float=1) ->float: if scale <= 1: return 1.0 return 0.1 * math.log(scale) + 1.0
null
test_prepare_prompt
model_runner = ModelRunner(None, None, None) model_runner.set_block_size(16) batch_size = random.randint(1, 256) prompt_lens = [] seq_group_metadata_list = [] for i in range(batch_size): prompt_len = i % (model_runner.block_size - 1) + 1 prompt_lens.append(prompt_len) seq_data = list(range(prompt_len)) seq_group_metadata_list.append(SequenceGroupMetadata(request_id= f'test_{i}', is_prompt=True, seq_data={(0): SequenceData(seq_data)}, sampling_params=SamplingParams(temperature=0), block_tables={(0): [1]}) ) expected_selected_token_indices = [] selected_token_start_idx = 0 max_seq_len = max(prompt_lens) for prompt_len in prompt_lens: expected_selected_token_indices.append(selected_token_start_idx + prompt_len - 1) selected_token_start_idx += max_seq_len input_tokens, input_positions, _, return_prompt_lens = (model_runner. _prepare_prompt(seq_group_metadata_list)) assert return_prompt_lens == prompt_lens sampling_metadata = model_runner._prepare_sample(seq_group_metadata_list, prompt_lens) assert input_tokens.shape == (batch_size, max_seq_len) assert input_positions.shape == (batch_size, max_seq_len) torch.testing.assert_close(input_tokens, input_positions) actual = sampling_metadata.selected_token_indices expected = torch.tensor(expected_selected_token_indices, device=actual. device, dtype=actual.dtype) torch.testing.assert_close(actual, expected)
def test_prepare_prompt(): model_runner = ModelRunner(None, None, None) model_runner.set_block_size(16) batch_size = random.randint(1, 256) prompt_lens = [] seq_group_metadata_list = [] for i in range(batch_size): prompt_len = i % (model_runner.block_size - 1) + 1 prompt_lens.append(prompt_len) seq_data = list(range(prompt_len)) seq_group_metadata_list.append(SequenceGroupMetadata(request_id= f'test_{i}', is_prompt=True, seq_data={(0): SequenceData( seq_data)}, sampling_params=SamplingParams(temperature=0), block_tables={(0): [1]})) expected_selected_token_indices = [] selected_token_start_idx = 0 max_seq_len = max(prompt_lens) for prompt_len in prompt_lens: expected_selected_token_indices.append(selected_token_start_idx + prompt_len - 1) selected_token_start_idx += max_seq_len input_tokens, input_positions, _, return_prompt_lens = (model_runner. _prepare_prompt(seq_group_metadata_list)) assert return_prompt_lens == prompt_lens sampling_metadata = model_runner._prepare_sample(seq_group_metadata_list, prompt_lens) assert input_tokens.shape == (batch_size, max_seq_len) assert input_positions.shape == (batch_size, max_seq_len) torch.testing.assert_close(input_tokens, input_positions) actual = sampling_metadata.selected_token_indices expected = torch.tensor(expected_selected_token_indices, device=actual. device, dtype=actual.dtype) torch.testing.assert_close(actual, expected)
null
__init__
super().__init__(config, 'ROPE', linear_method)
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__(config, 'ROPE', linear_method)
null
get_model
model_class = _get_model_architecture(model_config.hf_config) linear_method = None if model_config.quantization is not None: quant_config = get_quant_config(model_config.quantization, model_config .model, model_config.hf_config, model_config.download_dir) capability = torch.cuda.get_device_capability() capability = capability[0] * 10 + capability[1] if capability < quant_config.get_min_capability(): raise ValueError( f'The quantization method {model_config.quantization} is not supported for the current GPU. Minimum capability: {quant_config.get_min_capability()}. Current capability: {capability}.' ) supported_dtypes = quant_config.get_supported_act_dtypes() if model_config.dtype not in supported_dtypes: raise ValueError( f'{model_config.dtype} is not supported for quantization method {model_config.quantization}. Supported dtypes: {supported_dtypes}' ) linear_method = quant_config.get_linear_method() with _set_default_torch_dtype(model_config.dtype): with torch.device('cuda'): model = model_class(model_config.hf_config, linear_method) if model_config.load_format == 'dummy': initialize_dummy_weights(model) else: model.load_weights(model_config.model, model_config.download_dir, model_config.load_format, model_config.revision) return model.eval()
def get_model(model_config: ModelConfig) ->nn.Module: model_class = _get_model_architecture(model_config.hf_config) linear_method = None if model_config.quantization is not None: quant_config = get_quant_config(model_config.quantization, model_config.model, model_config.hf_config, model_config. download_dir) capability = torch.cuda.get_device_capability() capability = capability[0] * 10 + capability[1] if capability < quant_config.get_min_capability(): raise ValueError( f'The quantization method {model_config.quantization} is not supported for the current GPU. Minimum capability: {quant_config.get_min_capability()}. Current capability: {capability}.' ) supported_dtypes = quant_config.get_supported_act_dtypes() if model_config.dtype not in supported_dtypes: raise ValueError( f'{model_config.dtype} is not supported for quantization method {model_config.quantization}. Supported dtypes: {supported_dtypes}' ) linear_method = quant_config.get_linear_method() with _set_default_torch_dtype(model_config.dtype): with torch.device('cuda'): model = model_class(model_config.hf_config, linear_method) if model_config.load_format == 'dummy': initialize_dummy_weights(model) else: model.load_weights(model_config.model, model_config. download_dir, model_config.load_format, model_config.revision) return model.eval()
null
vllm_runner
return VllmRunner
@pytest.fixture def vllm_runner(): return VllmRunner
null
forward
gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
def forward(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x
null
forward
hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches [i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states
null
__init__
super().__init__() hidden_size = config.d_model self.norm_1 = nn.LayerNorm(hidden_size) self.attn = MPTAttention(config, linear_method) self.norm_2 = nn.LayerNorm(hidden_size) self.ffn = MPTMLP(config, linear_method)
def __init__(self, config: MPTConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() hidden_size = config.d_model self.norm_1 = nn.LayerNorm(hidden_size) self.attn = MPTAttention(config, linear_method) self.norm_2 = nn.LayerNorm(hidden_size) self.ffn = MPTMLP(config, linear_method)
null
_verify_args
if self.gpu_memory_utilization > 1.0: raise ValueError( f'GPU memory utilization must be less than 1.0. Got {self.gpu_memory_utilization}.' )
def _verify_args(self) ->None: if self.gpu_memory_utilization > 1.0: raise ValueError( f'GPU memory utilization must be less than 1.0. Got {self.gpu_memory_utilization}.' )
null
_prepare_sample
seq_groups: List[Tuple[List[int], SamplingParams]] = [] selected_token_indices: List[int] = [] selected_token_start_idx = 0 categorized_sample_indices = {t: [] for t in SamplingType} categorized_sample_indices_start_idx = 0 max_prompt_len = max(prompt_lens) if prompt_lens else 1 for i, seq_group_metadata in enumerate(seq_group_metadata_list): seq_ids = list(seq_group_metadata.seq_data.keys()) sampling_params = seq_group_metadata.sampling_params seq_groups.append((seq_ids, sampling_params)) if seq_group_metadata.is_prompt: assert len(seq_ids) == 1 prompt_len = prompt_lens[i] if sampling_params.prompt_logprobs is not None: categorized_sample_indices_start_idx += prompt_len - 1 categorized_sample_indices[sampling_params.sampling_type].append( categorized_sample_indices_start_idx) categorized_sample_indices_start_idx += 1 if sampling_params.prompt_logprobs is not None: selected_token_indices.extend(range(selected_token_start_idx, selected_token_start_idx + prompt_len - 1)) selected_token_indices.append(selected_token_start_idx + prompt_len - 1 ) selected_token_start_idx += max_prompt_len else: num_seqs = len(seq_ids) selected_token_indices.extend(range(selected_token_start_idx, selected_token_start_idx + num_seqs)) selected_token_start_idx += num_seqs categorized_sample_indices[sampling_params.sampling_type].extend(range (categorized_sample_indices_start_idx, categorized_sample_indices_start_idx + num_seqs)) categorized_sample_indices_start_idx += num_seqs selected_token_indices = _async_h2d(selected_token_indices, dtype=torch. long, pin_memory=not self.in_wsl) categorized_sample_indices = {t: _async_h2d(seq_ids, dtype=torch.int, pin_memory=not self.in_wsl) for t, seq_ids in categorized_sample_indices.items()} seq_data: Dict[int, SequenceData] = {} for seq_group_metadata in seq_group_metadata_list: seq_data.update(seq_group_metadata.seq_data) sampling_metadata = SamplingMetadata(seq_groups=seq_groups, seq_data= seq_data, prompt_lens=prompt_lens, selected_token_indices= selected_token_indices, categorized_sample_indices= categorized_sample_indices) return sampling_metadata
def _prepare_sample(self, seq_group_metadata_list: List[ SequenceGroupMetadata], prompt_lens: List[int]) ->SamplingMetadata: seq_groups: List[Tuple[List[int], SamplingParams]] = [] selected_token_indices: List[int] = [] selected_token_start_idx = 0 categorized_sample_indices = {t: [] for t in SamplingType} categorized_sample_indices_start_idx = 0 max_prompt_len = max(prompt_lens) if prompt_lens else 1 for i, seq_group_metadata in enumerate(seq_group_metadata_list): seq_ids = list(seq_group_metadata.seq_data.keys()) sampling_params = seq_group_metadata.sampling_params seq_groups.append((seq_ids, sampling_params)) if seq_group_metadata.is_prompt: assert len(seq_ids) == 1 prompt_len = prompt_lens[i] if sampling_params.prompt_logprobs is not None: categorized_sample_indices_start_idx += prompt_len - 1 categorized_sample_indices[sampling_params.sampling_type].append( categorized_sample_indices_start_idx) categorized_sample_indices_start_idx += 1 if sampling_params.prompt_logprobs is not None: selected_token_indices.extend(range( selected_token_start_idx, selected_token_start_idx + prompt_len - 1)) selected_token_indices.append(selected_token_start_idx + prompt_len - 1) selected_token_start_idx += max_prompt_len else: num_seqs = len(seq_ids) selected_token_indices.extend(range(selected_token_start_idx, selected_token_start_idx + num_seqs)) selected_token_start_idx += num_seqs categorized_sample_indices[sampling_params.sampling_type].extend( range(categorized_sample_indices_start_idx, categorized_sample_indices_start_idx + num_seqs)) categorized_sample_indices_start_idx += num_seqs selected_token_indices = _async_h2d(selected_token_indices, dtype=torch .long, pin_memory=not self.in_wsl) categorized_sample_indices = {t: _async_h2d(seq_ids, dtype=torch.int, pin_memory=not self.in_wsl) for t, seq_ids in categorized_sample_indices.items()} seq_data: Dict[int, SequenceData] = {} for seq_group_metadata in seq_group_metadata_list: seq_data.update(seq_group_metadata.seq_data) sampling_metadata = SamplingMetadata(seq_groups=seq_groups, seq_data= seq_data, prompt_lens=prompt_lens, selected_token_indices= selected_token_indices, categorized_sample_indices= categorized_sample_indices) return sampling_metadata
null
get_tensor_model_parallel_rank
"""Return my rank for the tensor model parallel group.""" return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_rank(): """Return my rank for the tensor model parallel group.""" return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
Return my rank for the tensor model parallel group.
__init__
super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([YiDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: YiConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([YiDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
__init__
super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([BaiChuanDecoderLayer(config, position_embedding, linear_method) for _ in range(config. num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: BaiChuanConfig, position_embedding: str, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([BaiChuanDecoderLayer(config, position_embedding, linear_method) for _ in range(config. num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
__init__
super().__init__() self.input_size = input_size self.output_size = output_size self.skip_bias_add = skip_bias_add if params_dtype is None: params_dtype = torch.get_default_dtype() self.params_dtype = params_dtype if linear_method is None: linear_method = UnquantizedLinearMethod() self.linear_method = linear_method self.linear_weights = self.linear_method.create_weights(self.input_size, self.output_size, self.input_size, self.output_size, self.params_dtype) for name, weight in self.linear_weights.items(): if isinstance(weight, torch.Tensor): self.register_parameter(name, weight) if bias: self.bias = Parameter(torch.empty(self.output_size, device=torch.cuda. current_device(), dtype=self.params_dtype)) set_weight_attrs(self.bias, {'output_dim': 0}) else: self.register_parameter('bias', None)
def __init__(self, input_size: int, output_size: int, bias: bool=True, skip_bias_add: bool=False, params_dtype: Optional[torch.dtype]=None, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.input_size = input_size self.output_size = output_size self.skip_bias_add = skip_bias_add if params_dtype is None: params_dtype = torch.get_default_dtype() self.params_dtype = params_dtype if linear_method is None: linear_method = UnquantizedLinearMethod() self.linear_method = linear_method self.linear_weights = self.linear_method.create_weights(self.input_size, self.output_size, self.input_size, self.output_size, self.params_dtype) for name, weight in self.linear_weights.items(): if isinstance(weight, torch.Tensor): self.register_parameter(name, weight) if bias: self.bias = Parameter(torch.empty(self.output_size, device=torch. cuda.current_device(), dtype=self.params_dtype)) set_weight_attrs(self.bias, {'output_dim': 0}) else: self.register_parameter('bias', None)
null