method_name
stringlengths
3
45
method_body
stringlengths
9
6.25k
full_code
stringlengths
35
7.02k
docstring
stringlengths
18
4.7k
forward
if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual ) hidden_states = self.mlp(hidden_states) return hidden_states, residual
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[ torch.Tensor]) ->Tuple[torch.Tensor, torch.Tensor]: if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm(hidden_states, residual) hidden_states = self.self_attn(positions=positions, hidden_states= hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states, residual = self.post_attention_layernorm(hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.sliding_window = sliding_window self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method= linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position, base=self.rope_theta) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads, sliding_window=self.sliding_window)
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, max_position: int=4096 * 32, rope_theta: float=10000, linear_method: Optional[LinearMethodBase]=None, sliding_window: Optional[int]=None ) ->None: super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: assert self.total_num_kv_heads % tp_size == 0 else: assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.sliding_window = sliding_window self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method =linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=max_position, base=self.rope_theta) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads, sliding_window=self.sliding_window)
null
__init__
super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads assert self.total_num_kv_heads % tp_size == 0 self.num_kv_heads = self.total_num_kv_heads // tp_size self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method= linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=self.max_position_embeddings, base=self.rope_theta, rope_scaling=rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float=10000, max_position_embeddings: int=8192, rope_scaling: Optional[Dict[str, Any]]=None, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads assert self.total_num_kv_heads % tp_size == 0 self.num_kv_heads = self.total_num_kv_heads // tp_size self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, self.total_num_kv_heads, bias=False, linear_method =linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=self.max_position_embeddings, base=self.rope_theta, rope_scaling=rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads)
null
__init__
super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config .layer_norm_eps) self.attention = GPTNeoXAttention(config, linear_method) self.mlp = GPTNeoXMLP(config, linear_method)
def __init__(self, config: GPTNeoXConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps= config.layer_norm_eps) self.attention = GPTNeoXAttention(config, linear_method) self.mlp = GPTNeoXMLP(config, linear_method)
null
forward
hidden_states = self.gpt_neox(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.gpt_neox(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
forward
x, _ = self.up_proj(x) x = self.act(x) x, _ = self.down_proj(x) return x
def forward(self, x: torch.Tensor) ->torch.Tensor: x, _ = self.up_proj(x) x = self.act(x) x, _ = self.down_proj(x) return x
null
forward
hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
can_swap_in
blocks = self._get_physical_blocks(seq_group) num_swapped_seqs = seq_group.num_seqs(status=SequenceStatus.SWAPPED) num_free_blocks = self.gpu_allocator.get_num_free_blocks() num_required_blocks = len(blocks) + num_swapped_seqs return num_free_blocks - num_required_blocks >= self.watermark_blocks
def can_swap_in(self, seq_group: SequenceGroup) ->bool: blocks = self._get_physical_blocks(seq_group) num_swapped_seqs = seq_group.num_seqs(status=SequenceStatus.SWAPPED) num_free_blocks = self.gpu_allocator.get_num_free_blocks() num_required_blocks = len(blocks) + num_swapped_seqs return num_free_blocks - num_required_blocks >= self.watermark_blocks
null
__next__
i = self.counter self.counter += 1 return i
def __next__(self) ->int: i = self.counter self.counter += 1 return i
null
forward
bias = self.bias if not self.skip_bias_add else None output_parallel = self.linear_method.apply_weights(self.linear_weights, input_, bias) if self.gather_output: output = tensor_model_parallel_all_gather(output_parallel) else: output = output_parallel output_bias = self.bias if self.skip_bias_add else None return output, output_bias
def forward(self, input_): bias = self.bias if not self.skip_bias_add else None output_parallel = self.linear_method.apply_weights(self.linear_weights, input_, bias) if self.gather_output: output = tensor_model_parallel_all_gather(output_parallel) else: output = output_parallel output_bias = self.bias if self.skip_bias_add else None return output, output_bias
null
has_unfinished_requests
"""Returns True if there are unfinished requests.""" return self.scheduler.has_unfinished_seqs()
def has_unfinished_requests(self) ->bool: """Returns True if there are unfinished requests.""" return self.scheduler.has_unfinished_seqs()
Returns True if there are unfinished requests.
run_benchmark
torch.cuda.synchronize() if profile: torch.cuda.cudart().cudaProfilerStart() start_time = time.perf_counter() for _ in range(num_iters): if version == 'v1': ops.paged_attention_v1(output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) elif version == 'v2': ops.paged_attention_v2(output, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) else: raise ValueError(f'Invalid version: {version}') torch.cuda.synchronize() end_time = time.perf_counter() if profile: torch.cuda.cudart().cudaProfilerStart() return (end_time - start_time) / num_iters
def run_benchmark(num_iters: int, profile: bool=False) ->float: torch.cuda.synchronize() if profile: torch.cuda.cudart().cudaProfilerStart() start_time = time.perf_counter() for _ in range(num_iters): if version == 'v1': ops.paged_attention_v1(output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) elif version == 'v2': ops.paged_attention_v2(output, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) else: raise ValueError(f'Invalid version: {version}') torch.cuda.synchronize() end_time = time.perf_counter() if profile: torch.cuda.cudart().cudaProfilerStart() return (end_time - start_time) / num_iters
null
__init__
super().__init__() self.total_num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.total_num_heads self.qkv_proj = QKVParallelLinear(config.hidden_size, self.head_size, self. total_num_heads, bias=False, linear_method=linear_method) self.out_proj = RowParallelLinear(config.hidden_size, config.hidden_size, bias=False, linear_method=linear_method) tp_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tp_world_size == 0 self.num_heads = self.total_num_heads // tp_world_size scaling = self.head_size ** -0.5 assert getattr(config, 'rotary', True) assert config.rotary_dim % 2 == 0 rope_theta = getattr(config, 'rope_theta', 10000) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.rotary_emb = get_rope(self.head_size, rotary_dim=config.rotary_dim, max_position=max_position_embeddings, base=rope_theta, is_neox_style=False) self.attn = PagedAttention(self.num_heads, self.head_size, scaling)
def __init__(self, config: GPTJConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.total_num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.total_num_heads self.qkv_proj = QKVParallelLinear(config.hidden_size, self.head_size, self.total_num_heads, bias=False, linear_method=linear_method) self.out_proj = RowParallelLinear(config.hidden_size, config. hidden_size, bias=False, linear_method=linear_method) tp_world_size = get_tensor_model_parallel_world_size() assert self.total_num_heads % tp_world_size == 0 self.num_heads = self.total_num_heads // tp_world_size scaling = self.head_size ** -0.5 assert getattr(config, 'rotary', True) assert config.rotary_dim % 2 == 0 rope_theta = getattr(config, 'rope_theta', 10000) max_position_embeddings = getattr(config, 'max_position_embeddings', 8192) self.rotary_emb = get_rope(self.head_size, rotary_dim=config.rotary_dim, max_position=max_position_embeddings, base=rope_theta, is_neox_style=False) self.attn = PagedAttention(self.num_heads, self.head_size, scaling)
null
abort_request
"""Abort a request during next background loop iteration.""" if verbose: logger.info(f'Aborted request {request_id}.') self._finished_requests.put_nowait(request_id) if request_id not in self._request_streams or self._request_streams[request_id ].finished: return self._request_streams[request_id].finish()
def abort_request(self, request_id: str, *, verbose: bool=False) ->None: """Abort a request during next background loop iteration.""" if verbose: logger.info(f'Aborted request {request_id}.') self._finished_requests.put_nowait(request_id) if request_id not in self._request_streams or self._request_streams[ request_id].finished: return self._request_streams[request_id].finish()
Abort a request during next background loop iteration.
__init__
self.request_id = request_id self.seqs_dict = {seq.seq_id: seq for seq in seqs} self.sampling_params = sampling_params self.arrival_time = arrival_time self.prompt_logprobs: Optional[PromptLogprobs] = None
def __init__(self, request_id: str, seqs: List[Sequence], sampling_params: SamplingParams, arrival_time: float) ->None: self.request_id = request_id self.seqs_dict = {seq.seq_id: seq for seq in seqs} self.sampling_params = sampling_params self.arrival_time = arrival_time self.prompt_logprobs: Optional[PromptLogprobs] = None
null
forward
inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds for i in range(len(self.h)): layer = self.h[i] hidden_states = layer(hidden_states, kv_caches[i], input_metadata) hidden_states = self.ln_f(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds for i in range(len(self.h)): layer = self.h[i] hidden_states = layer(hidden_states, kv_caches[i], input_metadata) hidden_states = self.ln_f(hidden_states) return hidden_states
null
_rotate_neox
x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1)
def _rotate_neox(x: torch.Tensor) ->torch.Tensor: x1 = x[..., :x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1)
null
forward
out = torch.empty_like(x) ops.gelu_new(out, x) return out
def forward(self, x: torch.Tensor) ->torch.Tensor: out = torch.empty_like(x) ops.gelu_new(out, x) return out
null
forward
gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.c_proj(x) return x
def forward(self, x): gate_up, _ = self.gate_up_proj(x) x = self.act_fn(gate_up) x, _ = self.c_proj(x) return x
null
create_weights
del output_size if input_size_per_partition % self.quant_config.group_size != 0: raise ValueError( 'The input size is not aligned with the quantized weight shape. This can be caused by too large tensor parallel size.' ) if output_size_per_partition % self.quant_config.pack_factor != 0: raise ValueError( 'The output size is not aligned with the quantized weight shape. This can be caused by too large tensor parallel size.' ) if self.quant_config.group_size != -1: group_size = self.quant_config.group_size else: group_size = input_size exllama_state = ExllamaState.UNINITIALIZED scale_and_zero_size = input_size // group_size scale_and_zero_input_dim = None if input_size != input_size_per_partition and self.quant_config.group_size != -1: if self.quant_config.desc_act: exllama_state = ExllamaState.UNUSED else: scale_and_zero_size = input_size_per_partition // group_size scale_and_zero_input_dim = 0 qweight = Parameter(torch.empty(input_size_per_partition // self. quant_config.pack_factor, output_size_per_partition, device='cuda', dtype=torch.int32), requires_grad=False) set_weight_attrs(qweight, {'input_dim': 0, 'output_dim': 1, 'packed_dim': 0, 'pack_factor': self.quant_config.pack_factor}) g_idx = Parameter(torch.tensor([(i // self.quant_config.group_size) for i in range(input_size_per_partition)], device='cuda', dtype=torch.int32), requires_grad=False) set_weight_attrs(g_idx, {'input_dim': 0, 'ignore_warning': True}) qzeros = Parameter(torch.empty(scale_and_zero_size, output_size_per_partition // self.quant_config.pack_factor, device= 'cuda', dtype=torch.int32), requires_grad=False) set_weight_attrs(qzeros, {'input_dim': scale_and_zero_input_dim, 'output_dim': 1, 'packed_dim': 1, 'pack_factor': self.quant_config. pack_factor}) scales = Parameter(torch.empty(scale_and_zero_size, output_size_per_partition, device='cuda', dtype=params_dtype), requires_grad=False) set_weight_attrs(scales, {'input_dim': scale_and_zero_input_dim, 'output_dim': 1}) return {'qweight': qweight, 'g_idx': g_idx, 'qzeros': qzeros, 'scales': scales, 'exllama_state': exllama_state}
def create_weights(self, input_size_per_partition: int, output_size_per_partition: int, input_size: int, output_size: int, params_dtype: torch.dtype) ->Dict[str, Any]: del output_size if input_size_per_partition % self.quant_config.group_size != 0: raise ValueError( 'The input size is not aligned with the quantized weight shape. This can be caused by too large tensor parallel size.' ) if output_size_per_partition % self.quant_config.pack_factor != 0: raise ValueError( 'The output size is not aligned with the quantized weight shape. This can be caused by too large tensor parallel size.' ) if self.quant_config.group_size != -1: group_size = self.quant_config.group_size else: group_size = input_size exllama_state = ExllamaState.UNINITIALIZED scale_and_zero_size = input_size // group_size scale_and_zero_input_dim = None if (input_size != input_size_per_partition and self.quant_config. group_size != -1): if self.quant_config.desc_act: exllama_state = ExllamaState.UNUSED else: scale_and_zero_size = input_size_per_partition // group_size scale_and_zero_input_dim = 0 qweight = Parameter(torch.empty(input_size_per_partition // self. quant_config.pack_factor, output_size_per_partition, device='cuda', dtype=torch.int32), requires_grad=False) set_weight_attrs(qweight, {'input_dim': 0, 'output_dim': 1, 'packed_dim': 0, 'pack_factor': self.quant_config.pack_factor}) g_idx = Parameter(torch.tensor([(i // self.quant_config.group_size) for i in range(input_size_per_partition)], device='cuda', dtype=torch. int32), requires_grad=False) set_weight_attrs(g_idx, {'input_dim': 0, 'ignore_warning': True}) qzeros = Parameter(torch.empty(scale_and_zero_size, output_size_per_partition // self.quant_config.pack_factor, device= 'cuda', dtype=torch.int32), requires_grad=False) set_weight_attrs(qzeros, {'input_dim': scale_and_zero_input_dim, 'output_dim': 1, 'packed_dim': 1, 'pack_factor': self.quant_config. pack_factor}) scales = Parameter(torch.empty(scale_and_zero_size, output_size_per_partition, device='cuda', dtype=params_dtype), requires_grad=False) set_weight_attrs(scales, {'input_dim': scale_and_zero_input_dim, 'output_dim': 1}) return {'qweight': qweight, 'g_idx': g_idx, 'qzeros': qzeros, 'scales': scales, 'exllama_state': exllama_state}
null
warm_up_model
if not self.model_config.enforce_eager: self.model_runner.capture_model(self.gpu_cache) set_random_seed(self.model_config.seed)
def warm_up_model(self) ->None: if not self.model_config.enforce_eager: self.model_runner.capture_model(self.gpu_cache) set_random_seed(self.model_config.seed)
null
_setup_logger
_root_logger.setLevel(logging.DEBUG) global _default_handler if _default_handler is None: _default_handler = logging.StreamHandler(sys.stdout) _default_handler.flush = sys.stdout.flush _default_handler.setLevel(logging.INFO) _root_logger.addHandler(_default_handler) fmt = NewLineFormatter(_FORMAT, datefmt=_DATE_FORMAT) _default_handler.setFormatter(fmt) _root_logger.propagate = False
def _setup_logger(): _root_logger.setLevel(logging.DEBUG) global _default_handler if _default_handler is None: _default_handler = logging.StreamHandler(sys.stdout) _default_handler.flush = sys.stdout.flush _default_handler.setLevel(logging.INFO) _root_logger.addHandler(_default_handler) fmt = NewLineFormatter(_FORMAT, datefmt=_DATE_FORMAT) _default_handler.setFormatter(fmt) _root_logger.propagate = False
null
__init__
self.hidden_size = hidden_size self.head_size = head_size self.total_num_heads = total_num_heads if total_num_kv_heads is None: total_num_kv_heads = total_num_heads self.total_num_kv_heads = total_num_kv_heads tp_size = get_tensor_model_parallel_world_size() self.num_heads = divide(self.total_num_heads, tp_size) if tp_size >= self.total_num_kv_heads: self.num_kv_heads = 1 self.num_kv_head_replicas = divide(tp_size, self.total_num_kv_heads) else: self.num_kv_heads = divide(self.total_num_kv_heads, tp_size) self.num_kv_head_replicas = 1 input_size = self.hidden_size output_size = (self.num_heads + 2 * self.num_kv_heads ) * tp_size * self.head_size super().__init__(input_size, output_size, bias, False, skip_bias_add, params_dtype, linear_method)
def __init__(self, hidden_size: int, head_size: int, total_num_heads: int, total_num_kv_heads: Optional[int]=None, bias: bool=True, skip_bias_add: bool=False, params_dtype: Optional[torch.dtype]=None, linear_method: Optional[LinearMethodBase]=None): self.hidden_size = hidden_size self.head_size = head_size self.total_num_heads = total_num_heads if total_num_kv_heads is None: total_num_kv_heads = total_num_heads self.total_num_kv_heads = total_num_kv_heads tp_size = get_tensor_model_parallel_world_size() self.num_heads = divide(self.total_num_heads, tp_size) if tp_size >= self.total_num_kv_heads: self.num_kv_heads = 1 self.num_kv_head_replicas = divide(tp_size, self.total_num_kv_heads) else: self.num_kv_heads = divide(self.total_num_kv_heads, tp_size) self.num_kv_head_replicas = 1 input_size = self.hidden_size output_size = (self.num_heads + 2 * self.num_kv_heads ) * tp_size * self.head_size super().__init__(input_size, output_size, bias, False, skip_bias_add, params_dtype, linear_method)
null
__init__
super().__init__() hidden_size = config.hidden_size self.c_fc = ColumnParallelLinear(hidden_size, intermediate_size, bias=True, linear_method=linear_method) self.c_proj = RowParallelLinear(intermediate_size, hidden_size, bias=True, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size)
def __init__(self, intermediate_size: int, config: GPT2Config, linear_method: Optional[LinearMethodBase]=None): super().__init__() hidden_size = config.hidden_size self.c_fc = ColumnParallelLinear(hidden_size, intermediate_size, bias= True, linear_method=linear_method) self.c_proj = RowParallelLinear(intermediate_size, hidden_size, bias= True, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.act = get_act_fn(config.activation_function, quant_config, intermediate_size)
null
_preempt_by_recompute
seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) assert len(seqs) == 1 for seq in seqs: seq.status = SequenceStatus.WAITING self.block_manager.free(seq) self.waiting.insert(0, seq_group)
def _preempt_by_recompute(self, seq_group: SequenceGroup) ->None: seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING) assert len(seqs) == 1 for seq in seqs: seq.status = SequenceStatus.WAITING self.block_manager.free(seq) self.waiting.insert(0, seq_group)
null
__init__
self.scaling_factor = scaling_factor self.extrapolation_factor = extrapolation_factor self.attn_factor = attn_factor self.beta_fast = beta_fast self.beta_slow = beta_slow self.mscale = float(_yarn_get_mscale(self.scaling_factor) * attn_factor) super().__init__(head_size, rotary_dim, max_position_embeddings, base, is_neox_style)
def __init__(self, head_size: int, rotary_dim: int, max_position_embeddings: int, base: int, is_neox_style: bool, scaling_factor: float, *, extrapolation_factor: float=1, attn_factor: float=1, beta_fast: float= 32, beta_slow: float=1) ->None: self.scaling_factor = scaling_factor self.extrapolation_factor = extrapolation_factor self.attn_factor = attn_factor self.beta_fast = beta_fast self.beta_slow = beta_slow self.mscale = float(_yarn_get_mscale(self.scaling_factor) * attn_factor) super().__init__(head_size, rotary_dim, max_position_embeddings, base, is_neox_style)
null
__init__
super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([AquilaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = AquilaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: AquilaConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([AquilaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)]) self.norm = AquilaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.model = AquilaModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.model = AquilaModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
null
__eq__
if not isinstance(other, SequenceOutput): raise NotImplementedError() return self.parent_seq_id == other.parent_seq_id and self.output_token == other.output_token and self.logprobs == other.logprobs
def __eq__(self, other: object) ->bool: if not isinstance(other, SequenceOutput): raise NotImplementedError() return (self.parent_seq_id == other.parent_seq_id and self.output_token == other.output_token and self.logprobs == other.logprobs)
null
http_bot
headers = {'User-Agent': 'vLLM Client'} pload = {'prompt': prompt, 'stream': True, 'max_tokens': 128} response = requests.post(args.model_url, headers=headers, json=pload, stream=True) for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode('utf-8')) output = data['text'][0] yield output
def http_bot(prompt): headers = {'User-Agent': 'vLLM Client'} pload = {'prompt': prompt, 'stream': True, 'max_tokens': 128} response = requests.post(args.model_url, headers=headers, json=pload, stream=True) for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode('utf-8')) output = data['text'][0] yield output
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'attn.bias' in name or 'attn.masked_bias' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'attn.bias' in name or 'attn.masked_bias' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
__init__
super().__init__() self.hidden_size = hidden_size tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.head_dim = hidden_size // self.total_num_heads self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, bias=bias, linear_method=linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=bias, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=self.max_position_embeddings, base=self.rope_theta, rope_scaling=rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
def __init__(self, hidden_size: int, num_heads: int, bias: bool, rope_theta: float=10000, max_position_embeddings: int=8192, linear_method: Optional [LinearMethodBase]=None, rope_scaling: Optional[Dict[str, Any]]=None): super().__init__() self.hidden_size = hidden_size tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tensor_model_parallel_world_size == 0 self.num_heads = self.total_num_heads // tensor_model_parallel_world_size self.head_dim = hidden_size // self.total_num_heads self.scaling = self.head_dim ** -0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self. total_num_heads, bias=bias, linear_method=linear_method) self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=bias, linear_method=linear_method) self.rotary_emb = get_rope(self.head_dim, rotary_dim=self.head_dim, max_position=self.max_position_embeddings, base=self.rope_theta, rope_scaling=rope_scaling) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling)
null
__init__
super().__init__() self.num_experts = num_experts self.ffn_dim = intermediate_size self.hidden_dim = hidden_size self.w1 = ReplicatedLinear(self.hidden_dim, self.ffn_dim, bias=False, linear_method=linear_method) self.w2 = ReplicatedLinear(self.ffn_dim, self.hidden_dim, bias=False, linear_method=linear_method) self.w3 = ReplicatedLinear(self.hidden_dim, self.ffn_dim, bias=False, linear_method=linear_method) self.act_fn = nn.SiLU()
def __init__(self, num_experts: int, hidden_size: int, intermediate_size: int, linear_method: Optional[LinearMethodBase]=None) ->None: super().__init__() self.num_experts = num_experts self.ffn_dim = intermediate_size self.hidden_dim = hidden_size self.w1 = ReplicatedLinear(self.hidden_dim, self.ffn_dim, bias=False, linear_method=linear_method) self.w2 = ReplicatedLinear(self.ffn_dim, self.hidden_dim, bias=False, linear_method=linear_method) self.w3 = ReplicatedLinear(self.hidden_dim, self.ffn_dim, bias=False, linear_method=linear_method) self.act_fn = nn.SiLU()
null
_allocate
self.block_manager.allocate(seq_group) for seq in seq_group.get_seqs(status=SequenceStatus.WAITING): seq.status = SequenceStatus.RUNNING
def _allocate(self, seq_group: SequenceGroup) ->None: self.block_manager.allocate(seq_group) for seq in seq_group.get_seqs(status=SequenceStatus.WAITING): seq.status = SequenceStatus.RUNNING
null
set_random_seed
random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
def set_random_seed(seed: int) ->None: random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
null
forward
qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output
null
_check_stop
"""Stop the finished sequences.""" for stop_str in sampling_params.stop: if seq.output_text.endswith(stop_str): if not sampling_params.include_stop_str_in_output: seq.output_text = seq.output_text[:-len(stop_str)] seq.status = SequenceStatus.FINISHED_STOPPED return if seq.get_last_token_id() in sampling_params.stop_token_ids: seq.status = SequenceStatus.FINISHED_STOPPED return if seq.get_len() > self.scheduler_config.max_model_len: seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED return if seq.get_output_len() == sampling_params.max_tokens: seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED return if not sampling_params.ignore_eos and seq.get_last_token_id( ) == self.tokenizer.eos_token_id: seq.status = SequenceStatus.FINISHED_STOPPED return
def _check_stop(self, seq: Sequence, sampling_params: SamplingParams) ->None: """Stop the finished sequences.""" for stop_str in sampling_params.stop: if seq.output_text.endswith(stop_str): if not sampling_params.include_stop_str_in_output: seq.output_text = seq.output_text[:-len(stop_str)] seq.status = SequenceStatus.FINISHED_STOPPED return if seq.get_last_token_id() in sampling_params.stop_token_ids: seq.status = SequenceStatus.FINISHED_STOPPED return if seq.get_len() > self.scheduler_config.max_model_len: seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED return if seq.get_output_len() == sampling_params.max_tokens: seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED return if not sampling_params.ignore_eos and seq.get_last_token_id( ) == self.tokenizer.eos_token_id: seq.status = SequenceStatus.FINISHED_STOPPED return
Stop the finished sequences.
__repr__
return f'CompletionOutput(index={self.index}, text={self.text!r}, token_ids={self.token_ids}, cumulative_logprob={self.cumulative_logprob}, logprobs={self.logprobs}, finish_reason={self.finish_reason})'
def __repr__(self) ->str: return ( f'CompletionOutput(index={self.index}, text={self.text!r}, token_ids={self.token_ids}, cumulative_logprob={self.cumulative_logprob}, logprobs={self.logprobs}, finish_reason={self.finish_reason})' )
null
create_weights
if input_size_per_partition % self.quant_config.pack_factor != 0: raise ValueError( 'The input size is not aligned with the quantized weight shape. This can be caused by too large tensor parallel size.' ) qweight = Parameter(torch.empty(input_size_per_partition // self. quant_config.pack_factor, output_size_per_partition, device='cuda', dtype=torch.int32), requires_grad=False) set_weight_attrs(qweight, {'input_dim': 0, 'output_dim': 1, 'packed_dim': 0, 'pack_factor': self.quant_config.pack_factor}) lookup_table = Parameter(torch.empty(output_size, self.quant_config. weight_bits ** 2, device='cuda', dtype=params_dtype), requires_grad=False) set_weight_attrs(lookup_table, {'output_dim': 0}) return {'qweight': qweight, 'lookup_table': lookup_table}
def create_weights(self, input_size_per_partition: int, output_size_per_partition: int, input_size: int, output_size: int, params_dtype: torch.dtype) ->Dict[str, Any]: if input_size_per_partition % self.quant_config.pack_factor != 0: raise ValueError( 'The input size is not aligned with the quantized weight shape. This can be caused by too large tensor parallel size.' ) qweight = Parameter(torch.empty(input_size_per_partition // self. quant_config.pack_factor, output_size_per_partition, device='cuda', dtype=torch.int32), requires_grad=False) set_weight_attrs(qweight, {'input_dim': 0, 'output_dim': 1, 'packed_dim': 0, 'pack_factor': self.quant_config.pack_factor}) lookup_table = Parameter(torch.empty(output_size, self.quant_config. weight_bits ** 2, device='cuda', dtype=params_dtype), requires_grad =False) set_weight_attrs(lookup_table, {'output_dim': 0}) return {'qweight': qweight, 'lookup_table': lookup_table}
null
forward
return super().forward(positions + self.offset)
def forward(self, positions: torch.Tensor): return super().forward(positions + self.offset)
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.transformer = GPTBigCodeModel(config, linear_method) self.lm_head_weight = self.transformer.wte.weight self.sampler = Sampler(config.vocab_size)
def __init__(self, config: GPTBigCodeConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.transformer = GPTBigCodeModel(config, linear_method) self.lm_head_weight = self.transformer.wte.weight self.sampler = Sampler(config.vocab_size)
null
test_paged_attention
random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' scale = float(1.0 / head_size ** 0.5) num_query_heads, num_kv_heads = num_heads query = torch.empty(num_seqs, num_query_heads, head_size, dtype=dtype, device=gpu_id) query.uniform_(-scale, scale) assert num_query_heads % num_kv_heads == 0 num_queries_per_kv = num_query_heads // num_kv_heads alibi_slopes = None if use_alibi: alibi_slopes = torch.randn(num_query_heads, dtype=torch.float, device= gpu_id) context_lens = [random.randint(1, MAX_SEQ_LEN) for _ in range(num_seqs)] context_lens[-1] = MAX_SEQ_LEN max_context_len = max(context_lens) context_lens = torch.tensor(context_lens, dtype=torch.int, device=gpu_id) max_num_blocks_per_seq = (max_context_len + block_size - 1) // block_size block_tables = [] for _ in range(num_seqs): block_table = [random.randint(0, NUM_BLOCKS - 1) for _ in range( max_num_blocks_per_seq)] block_tables.append(block_table) block_tables = torch.tensor(block_tables, dtype=torch.int, device=gpu_id) key_caches, value_caches = kv_cache_factory(NUM_BLOCKS, block_size, 1, num_kv_heads, head_size, dtype, seed, gpu_id) key_cache, value_cache = key_caches[0], value_caches[0] output = torch.empty_like(query) if version == 'v1': ops.paged_attention_v1(output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) elif version == 'v2': num_partitions = (max_context_len + PARTITION_SIZE - 1) // PARTITION_SIZE assert PARTITION_SIZE % block_size == 0 num_seqs, num_heads, head_size = output.shape tmp_output = torch.empty(size=(num_seqs, num_heads, num_partitions, head_size), dtype=output.dtype, device=output.device) exp_sums = torch.empty(size=(num_seqs, num_heads, num_partitions), dtype=torch.float32, device=output.device) max_logits = torch.empty_like(exp_sums) ops.paged_attention_v2(output, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) else: raise AssertionError(f'Unknown version: {version}') ref_output = torch.empty_like(query) ref_single_query_cached_kv_attention(ref_output, query, num_queries_per_kv, key_cache, value_cache, block_tables, context_lens, scale, alibi_slopes) assert torch.allclose(output, ref_output, atol=0.001, rtol=1e-05)
@pytest.mark.parametrize('version', ['v1', 'v2']) @pytest.mark.parametrize('num_seqs', NUM_GEN_SEQS) @pytest.mark.parametrize('num_heads', NUM_HEADS) @pytest.mark.parametrize('head_size', HEAD_SIZES) @pytest.mark.parametrize('use_alibi', USE_ALIBI) @pytest.mark.parametrize('block_size', BLOCK_SIZES) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) def test_paged_attention(kv_cache_factory, version: str, num_seqs: int, num_heads: Tuple[int, int], head_size: int, use_alibi: bool, block_size: int, dtype: torch.dtype, seed: int, device: int) ->None: random.seed(seed) torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' scale = float(1.0 / head_size ** 0.5) num_query_heads, num_kv_heads = num_heads query = torch.empty(num_seqs, num_query_heads, head_size, dtype=dtype, device=gpu_id) query.uniform_(-scale, scale) assert num_query_heads % num_kv_heads == 0 num_queries_per_kv = num_query_heads // num_kv_heads alibi_slopes = None if use_alibi: alibi_slopes = torch.randn(num_query_heads, dtype=torch.float, device=gpu_id) context_lens = [random.randint(1, MAX_SEQ_LEN) for _ in range(num_seqs)] context_lens[-1] = MAX_SEQ_LEN max_context_len = max(context_lens) context_lens = torch.tensor(context_lens, dtype=torch.int, device=gpu_id) max_num_blocks_per_seq = (max_context_len + block_size - 1) // block_size block_tables = [] for _ in range(num_seqs): block_table = [random.randint(0, NUM_BLOCKS - 1) for _ in range( max_num_blocks_per_seq)] block_tables.append(block_table) block_tables = torch.tensor(block_tables, dtype=torch.int, device=gpu_id) key_caches, value_caches = kv_cache_factory(NUM_BLOCKS, block_size, 1, num_kv_heads, head_size, dtype, seed, gpu_id) key_cache, value_cache = key_caches[0], value_caches[0] output = torch.empty_like(query) if version == 'v1': ops.paged_attention_v1(output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) elif version == 'v2': num_partitions = (max_context_len + PARTITION_SIZE - 1 ) // PARTITION_SIZE assert PARTITION_SIZE % block_size == 0 num_seqs, num_heads, head_size = output.shape tmp_output = torch.empty(size=(num_seqs, num_heads, num_partitions, head_size), dtype=output.dtype, device=output.device) exp_sums = torch.empty(size=(num_seqs, num_heads, num_partitions), dtype=torch.float32, device=output.device) max_logits = torch.empty_like(exp_sums) ops.paged_attention_v2(output, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, num_kv_heads, scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) else: raise AssertionError(f'Unknown version: {version}') ref_output = torch.empty_like(query) ref_single_query_cached_kv_attention(ref_output, query, num_queries_per_kv, key_cache, value_cache, block_tables, context_lens, scale, alibi_slopes) assert torch.allclose(output, ref_output, atol=0.001, rtol=1e-05)
null
__init__
self.n = n self.best_of = best_of if best_of is not None else n self.presence_penalty = presence_penalty self.frequency_penalty = frequency_penalty self.repetition_penalty = repetition_penalty self.temperature = temperature self.top_p = top_p self.top_k = top_k self.min_p = min_p self.use_beam_search = use_beam_search self.length_penalty = length_penalty self.early_stopping = early_stopping if stop is None: self.stop = [] elif isinstance(stop, str): self.stop = [stop] else: self.stop = list(stop) if stop_token_ids is None: self.stop_token_ids = [] else: self.stop_token_ids = list(stop_token_ids) self.ignore_eos = ignore_eos self.max_tokens = max_tokens self.logprobs = logprobs self.prompt_logprobs = prompt_logprobs self.skip_special_tokens = skip_special_tokens self.spaces_between_special_tokens = spaces_between_special_tokens self.logits_processors = logits_processors self.include_stop_str_in_output = include_stop_str_in_output self._verify_args() if self.use_beam_search: self._verify_beam_search() else: self._verify_non_beam_search() if self.temperature < _SAMPLING_EPS: self.top_p = 1.0 self.top_k = -1 self.min_p = 0.0 self._verify_greedy_sampling()
def __init__(self, n: int=1, best_of: Optional[int]=None, presence_penalty: float=0.0, frequency_penalty: float=0.0, repetition_penalty: float=1.0, temperature: float=1.0, top_p: float=1.0, top_k: int=-1, min_p: float= 0.0, use_beam_search: bool=False, length_penalty: float=1.0, early_stopping: Union[bool, str]=False, stop: Optional[Union[str, List[ str]]]=None, stop_token_ids: Optional[List[int]]=None, include_stop_str_in_output: bool=False, ignore_eos: bool=False, max_tokens: int=16, logprobs: Optional[int]=None, prompt_logprobs: Optional[int]=None, skip_special_tokens: bool=True, spaces_between_special_tokens: bool=True, logits_processors: Optional[ List[LogitsProcessor]]=None) ->None: self.n = n self.best_of = best_of if best_of is not None else n self.presence_penalty = presence_penalty self.frequency_penalty = frequency_penalty self.repetition_penalty = repetition_penalty self.temperature = temperature self.top_p = top_p self.top_k = top_k self.min_p = min_p self.use_beam_search = use_beam_search self.length_penalty = length_penalty self.early_stopping = early_stopping if stop is None: self.stop = [] elif isinstance(stop, str): self.stop = [stop] else: self.stop = list(stop) if stop_token_ids is None: self.stop_token_ids = [] else: self.stop_token_ids = list(stop_token_ids) self.ignore_eos = ignore_eos self.max_tokens = max_tokens self.logprobs = logprobs self.prompt_logprobs = prompt_logprobs self.skip_special_tokens = skip_special_tokens self.spaces_between_special_tokens = spaces_between_special_tokens self.logits_processors = logits_processors self.include_stop_str_in_output = include_stop_str_in_output self._verify_args() if self.use_beam_search: self._verify_beam_search() else: self._verify_non_beam_search() if self.temperature < _SAMPLING_EPS: self.top_p = 1.0 self.top_k = -1 self.min_p = 0.0 self._verify_greedy_sampling()
null
_apply_top_p_top_k
logits_sort, logits_idx = logits.sort(dim=-1, descending=True) probs_sort = logits_sort.softmax(dim=-1) probs_sum = probs_sort.cumsum(dim=-1).sub_(probs_sort) top_p_mask = probs_sum > p.unsqueeze_(dim=1) top_k_mask = torch.arange(logits_idx.shape[-1], device=logits_idx.device) top_k_mask = top_k_mask.expand(logits_idx.shape[0], -1) top_k_mask = top_k_mask >= k.unsqueeze_(dim=1) mask = top_p_mask | top_k_mask logits_sort.masked_fill_(mask, -float('inf')) src = torch.arange(logits_idx.shape[-1], device=logits_idx.device).expand_as( logits_idx) logits_idx_inv = torch.empty_like(logits_idx).scatter_(dim=-1, index= logits_idx, src=src) logits = torch.gather(logits_sort, dim=-1, index=logits_idx_inv) return logits
def _apply_top_p_top_k(logits: torch.Tensor, p: torch.Tensor, k: torch.Tensor ) ->torch.Tensor: logits_sort, logits_idx = logits.sort(dim=-1, descending=True) probs_sort = logits_sort.softmax(dim=-1) probs_sum = probs_sort.cumsum(dim=-1).sub_(probs_sort) top_p_mask = probs_sum > p.unsqueeze_(dim=1) top_k_mask = torch.arange(logits_idx.shape[-1], device=logits_idx.device) top_k_mask = top_k_mask.expand(logits_idx.shape[0], -1) top_k_mask = top_k_mask >= k.unsqueeze_(dim=1) mask = top_p_mask | top_k_mask logits_sort.masked_fill_(mask, -float('inf')) src = torch.arange(logits_idx.shape[-1], device=logits_idx.device ).expand_as(logits_idx) logits_idx_inv = torch.empty_like(logits_idx).scatter_(dim=-1, index= logits_idx, src=src) logits = torch.gather(logits_sort, dim=-1, index=logits_idx_inv) return logits
null
__init__
self.weight_bits = weight_bits if self.weight_bits != 4: raise ValueError( f'Currently, only 4-bit weight quantization is supported for SqueezeLLM, but got {self.weight_bits} bits.' ) self.pack_factor = 32 // self.weight_bits
def __init__(self, weight_bits: int) ->None: self.weight_bits = weight_bits if self.weight_bits != 4: raise ValueError( f'Currently, only 4-bit weight quantization is supported for SqueezeLLM, but got {self.weight_bits} bits.' ) self.pack_factor = 32 // self.weight_bits
null
api_server
script_path = Path(__file__).parent.joinpath('api_server_async_engine.py' ).absolute() uvicorn_process = subprocess.Popen([sys.executable, '-u', str(script_path), '--model', 'facebook/opt-125m']) yield uvicorn_process.terminate()
@pytest.fixture def api_server(): script_path = Path(__file__).parent.joinpath('api_server_async_engine.py' ).absolute() uvicorn_process = subprocess.Popen([sys.executable, '-u', str( script_path), '--model', 'facebook/opt-125m']) yield uvicorn_process.terminate()
null
set_cuda_visible_devices
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, device_ids))
def set_cuda_visible_devices(device_ids: List[int]) ->None: os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, device_ids))
null
test_no_load_chat_template
template = '../../examples/does_not_exist' mock_args = Namespace(chat_template=template) tokenizer = MockTokenizer() load_chat_template(mock_args, tokenizer=tokenizer) template_content = tokenizer.chat_template assert template_content is not None assert template_content == '../../examples/does_not_exist'
def test_no_load_chat_template(): template = '../../examples/does_not_exist' mock_args = Namespace(chat_template=template) tokenizer = MockTokenizer() load_chat_template(mock_args, tokenizer=tokenizer) template_content = tokenizer.chat_template assert template_content is not None assert template_content == '../../examples/does_not_exist'
null
forward
return self.wte(input_ids)
def forward(self, input_ids: torch.LongTensor): return self.wte(input_ids)
null
__init__
self.scaling_factor = scaling_factor super().__init__(head_size, rotary_dim, max_position_embeddings, base, is_neox_style)
def __init__(self, head_size: int, rotary_dim: int, max_position_embeddings: int, base: int, is_neox_style: bool, scaling_factor: float) ->None: self.scaling_factor = scaling_factor super().__init__(head_size, rotary_dim, max_position_embeddings, base, is_neox_style)
null
__init__
super().__init__() self.add_bias = config.add_bias_linear self.dense_h_to_4h = MergedColumnParallelLinear(config.hidden_size, [config .ffn_hidden_size] * 2, bias=config.add_bias_linear, linear_method= linear_method) self.activation_func = SiluAndMul() self.dense_4h_to_h = RowParallelLinear(config.ffn_hidden_size, config. hidden_size, bias=config.add_bias_linear, linear_method=linear_method)
def __init__(self, config, linear_method: Optional[LinearMethodBase]=None): super().__init__() self.add_bias = config.add_bias_linear self.dense_h_to_4h = MergedColumnParallelLinear(config.hidden_size, [ config.ffn_hidden_size] * 2, bias=config.add_bias_linear, linear_method=linear_method) self.activation_func = SiluAndMul() self.dense_4h_to_h = RowParallelLinear(config.ffn_hidden_size, config. hidden_size, bias=config.add_bias_linear, linear_method=linear_method)
null
_forward
"""PyTorch-native implementation equivalent to forward().""" return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)) )
def _forward(self, x: torch.Tensor) ->torch.Tensor: """PyTorch-native implementation equivalent to forward().""" return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
PyTorch-native implementation equivalent to forward().
__init__
super().__init__() self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.linear = ParallelLMHead(config.vocab_size, config.hidden_size, bias=True)
def __init__(self, config: PretrainedConfig): super().__init__() self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.linear = ParallelLMHead(config.vocab_size, config.hidden_size, bias=True)
null
_validate_config
self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults) self.ffn_config = self._set_config_defaults(self.ffn_config, ffn_config_defaults) self.init_config = self._set_config_defaults(self.init_config, init_config_defaults) if self.d_model % self.n_heads != 0: raise ValueError('d_model must be divisible by n_heads') if any(prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop]): raise ValueError( "self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1" ) if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']: raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}") if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in [ 'torch', 'triton']: raise NotImplementedError( 'prefix_lm only implemented with torch and triton attention.') if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in ['torch', 'triton']: raise NotImplementedError( 'alibi only implemented with torch and triton attention.') if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl' ] not in ['torch', 'triton']: raise NotImplementedError( 'attn_uses_sequence_id only implemented with torch and triton attention.' ) if self.embedding_fraction > 1 or self.embedding_fraction <= 0: raise ValueError( 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' ) if isinstance(self.logit_scale, str ) and self.logit_scale != 'inv_sqrt_d_model': raise ValueError( f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'." ) if self.init_config.get('name', None) is None: raise ValueError( f"self.init_config={self.init_config!r} 'name' needs to be set.") if not self.learned_pos_emb and not self.attn_config['alibi']: warnings.warn('Positional information not being provided to the model.', stacklevel=2) if self.fc_type == 'te' or self.ffn_config['ffn_type'] == 'te_ln_mlp': try: import transformer_engine.pytorch as te del te except Exception as exc: raise ImportError( 'TransformerEngine import fail. `fc_type: te` requires TransformerEngine be installed. ' + """The required version of transformer_engine also requires FlashAttention v1.0.6 is installed: """ + 'pip install flash-attn==1.0.6 --no-build-isolation \n' + 'pip install git+https://github.com/NVIDIA/TransformerEngine.git@144e4888b2cdd60bd52e706d5b7a79cb9c1a7156' ) from exc if self.ffn_config['ffn_type'] == 'mptmlp': self.ffn_config['fc_type'] = self.fc_type elif self.ffn_config['ffn_type'] == 'te_ln_mlp': self.ffn_config['bias'] = not self.no_bias
def _validate_config(self) ->None: self.attn_config = self._set_config_defaults(self.attn_config, attn_config_defaults) self.ffn_config = self._set_config_defaults(self.ffn_config, ffn_config_defaults) self.init_config = self._set_config_defaults(self.init_config, init_config_defaults) if self.d_model % self.n_heads != 0: raise ValueError('d_model must be divisible by n_heads') if any(prob < 0 or prob > 1 for prob in [self.attn_config['attn_pdrop'], self.resid_pdrop, self.emb_pdrop]): raise ValueError( "self.attn_config['attn_pdrop'], resid_pdrop, emb_pdrop are probabilities and must be between 0 and 1" ) if self.attn_config['attn_impl'] not in ['torch', 'flash', 'triton']: raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}") if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in [ 'torch', 'triton']: raise NotImplementedError( 'prefix_lm only implemented with torch and triton attention.') if self.attn_config['alibi'] and self.attn_config['attn_impl'] not in [ 'torch', 'triton']: raise NotImplementedError( 'alibi only implemented with torch and triton attention.') if self.attn_config['attn_uses_sequence_id'] and self.attn_config[ 'attn_impl'] not in ['torch', 'triton']: raise NotImplementedError( 'attn_uses_sequence_id only implemented with torch and triton attention.' ) if self.embedding_fraction > 1 or self.embedding_fraction <= 0: raise ValueError( 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' ) if isinstance(self.logit_scale, str ) and self.logit_scale != 'inv_sqrt_d_model': raise ValueError( f"self.logit_scale={self.logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'." ) if self.init_config.get('name', None) is None: raise ValueError( f"self.init_config={self.init_config!r} 'name' needs to be set.") if not self.learned_pos_emb and not self.attn_config['alibi']: warnings.warn('Positional information not being provided to the model.' , stacklevel=2) if self.fc_type == 'te' or self.ffn_config['ffn_type'] == 'te_ln_mlp': try: import transformer_engine.pytorch as te del te except Exception as exc: raise ImportError( 'TransformerEngine import fail. `fc_type: te` requires TransformerEngine be installed. ' + """The required version of transformer_engine also requires FlashAttention v1.0.6 is installed: """ + 'pip install flash-attn==1.0.6 --no-build-isolation \n' + 'pip install git+https://github.com/NVIDIA/TransformerEngine.git@144e4888b2cdd60bd52e706d5b7a79cb9c1a7156' ) from exc if self.ffn_config['ffn_type'] == 'mptmlp': self.ffn_config['fc_type'] = self.fc_type elif self.ffn_config['ffn_type'] == 'te_ln_mlp': self.ffn_config['bias'] = not self.no_bias
null
get_output_len
return len(self.output_token_ids)
def get_output_len(self) ->int: return len(self.output_token_ids)
null
get_scaled_act_names
return []
def get_scaled_act_names(self) ->List[str]: return []
null
load_weights
params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'lm_head.weight' in name: continue if '.attn.bias' in name: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): params_dict = dict(self.named_parameters(remove_duplicate=False)) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'lm_head.weight' in name: continue if '.attn.bias' in name: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
get_vocab_size
return self.hf_config.vocab_size
def get_vocab_size(self) ->int: return self.hf_config.vocab_size
null
can_allocate
seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0] num_required_blocks = len(seq.logical_token_blocks) if self.block_sliding_window is not None: num_required_blocks = min(num_required_blocks, self.block_sliding_window) num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() if self.num_total_gpu_blocks - num_required_blocks < self.watermark_blocks: return AllocStatus.NEVER if num_free_gpu_blocks - num_required_blocks >= self.watermark_blocks: return AllocStatus.OK else: return AllocStatus.LATER
def can_allocate(self, seq_group: SequenceGroup) ->AllocStatus: seq = seq_group.get_seqs(status=SequenceStatus.WAITING)[0] num_required_blocks = len(seq.logical_token_blocks) if self.block_sliding_window is not None: num_required_blocks = min(num_required_blocks, self. block_sliding_window) num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks() if self.num_total_gpu_blocks - num_required_blocks < self.watermark_blocks: return AllocStatus.NEVER if num_free_gpu_blocks - num_required_blocks >= self.watermark_blocks: return AllocStatus.OK else: return AllocStatus.LATER
null
forward
del position_ids qkv, _ = self.query_key_value(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.dense(attn_output) return output
def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: del position_ids qkv, _ = self.query_key_value(hidden_states) q, k, v = qkv.chunk(chunks=3, dim=-1) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.dense(attn_output) return output
null
_prepare_test
vocab_size = 32000 input_tensor = torch.rand((batch_size, 1024), device='cuda', dtype=torch. float16) fake_logits = torch.full((batch_size, vocab_size), 0.01, device= input_tensor.device, dtype=input_tensor.dtype) sampler = MockLogitsSampler(32000, fake_logits) model_runner = ModelRunner(None, None, None) return input_tensor, fake_logits, sampler, model_runner
def _prepare_test(batch_size: int) ->Tuple[torch.Tensor, torch.Tensor, MockLogitsSampler, ModelRunner]: vocab_size = 32000 input_tensor = torch.rand((batch_size, 1024), device='cuda', dtype= torch.float16) fake_logits = torch.full((batch_size, vocab_size), 0.01, device= input_tensor.device, dtype=input_tensor.dtype) sampler = MockLogitsSampler(32000, fake_logits) model_runner = ModelRunner(None, None, None) return input_tensor, fake_logits, sampler, model_runner
null
__repr__
return f'SequenceGroupOutput(samples={self.samples}, prompt_logprobs={self.prompt_logprobs})'
def __repr__(self) ->str: return ( f'SequenceGroupOutput(samples={self.samples}, prompt_logprobs={self.prompt_logprobs})' )
null
get_node_ip
return get_ip()
def get_node_ip(self) ->str: return get_ip()
null
get_supported_act_dtypes
"""List of supported activation dtypes.""" raise NotImplementedError
@abstractmethod def get_supported_act_dtypes(self) ->List[torch.dtype]: """List of supported activation dtypes.""" raise NotImplementedError
List of supported activation dtypes.
sample
next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
def sample(self, hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata) ->Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head.weight, hidden_states, sampling_metadata) return next_tokens
null
forward
inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds for i in range(len(self.h)): layer = self.h[i] hidden_states = layer(hidden_states, kv_caches[i], input_metadata) hidden_states = self.ln_f(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds for i in range(len(self.h)): layer = self.h[i] hidden_states = layer(hidden_states, kv_caches[i], input_metadata) hidden_states = self.ln_f(hidden_states) return hidden_states
null
__getstate__
state = self.__dict__.copy() state['sp_model'] = None return state
def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state
null
create_engine_configs
model_config = ModelConfig(self.model, self.tokenizer, self.tokenizer_mode, self.trust_remote_code, self.download_dir, self.load_format, self.dtype, self.seed, self.revision, self.tokenizer_revision, self.max_model_len, self.quantization, self.enforce_eager, self.max_context_len_to_capture) cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization, self.swap_space, model_config.get_sliding_window()) parallel_config = ParallelConfig(self.pipeline_parallel_size, self. tensor_parallel_size, self.worker_use_ray, self. max_parallel_loading_workers) scheduler_config = SchedulerConfig(self.max_num_batched_tokens, self. max_num_seqs, model_config.max_model_len, self.max_paddings) return model_config, cache_config, parallel_config, scheduler_config
def create_engine_configs(self) ->Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]: model_config = ModelConfig(self.model, self.tokenizer, self. tokenizer_mode, self.trust_remote_code, self.download_dir, self. load_format, self.dtype, self.seed, self.revision, self. tokenizer_revision, self.max_model_len, self.quantization, self. enforce_eager, self.max_context_len_to_capture) cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization, self.swap_space, model_config.get_sliding_window()) parallel_config = ParallelConfig(self.pipeline_parallel_size, self. tensor_parallel_size, self.worker_use_ray, self. max_parallel_loading_workers) scheduler_config = SchedulerConfig(self.max_num_batched_tokens, self. max_num_seqs, model_config.max_model_len, self.max_paddings) return model_config, cache_config, parallel_config, scheduler_config
null
_run_incremental_decode
decoded_text = '' offset = 0 token_offset = 0 prev_tokens = None for i in range(len(all_input_ids)): new_tokens, text, offset, token_offset = detokenize_incrementally(tokenizer , all_input_ids[:i + 1], prev_tokens, offset, token_offset, skip_special_tokens=skip_special_tokens) decoded_text += text if prev_tokens is None: prev_tokens = new_tokens else: prev_tokens += new_tokens return decoded_text
def _run_incremental_decode(tokenizer, all_input_ids, skip_special_tokens: bool ): decoded_text = '' offset = 0 token_offset = 0 prev_tokens = None for i in range(len(all_input_ids)): new_tokens, text, offset, token_offset = detokenize_incrementally( tokenizer, all_input_ids[:i + 1], prev_tokens, offset, token_offset, skip_special_tokens=skip_special_tokens) decoded_text += text if prev_tokens is None: prev_tokens = new_tokens else: prev_tokens += new_tokens return decoded_text
null
__init__
self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim)
def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim)
null
__init__
super().__init__() self.config = config self.linear_method = linear_method self.transformer = FalconModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
def __init__(self, config: FalconConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.linear_method = linear_method self.transformer = FalconModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size) self.sampler = Sampler(config.vocab_size)
null
load_weights
stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str]= None, load_format: str='auto', revision: Optional[str]=None): stacked_params_mapping = [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v'), ('gate_up_proj', 'gate_proj', 0), ('gate_up_proj', 'up_proj', 1)] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision): if 'rotary_emb.inv_freq' in name: continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: if name.endswith('.bias') and name not in params_dict: continue param = params_dict[name] weight_loader = getattr(param, 'weight_loader', default_weight_loader) weight_loader(param, loaded_weight)
null
test_rms_norm
torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' layer = RMSNorm(hidden_size).to(dtype=dtype, device=gpu_id) layer.weight.data.normal_(mean=1.0, std=0.1) scale = 1 / (2 * hidden_size) x = torch.randn(num_tokens, hidden_size, dtype=dtype, device=gpu_id) x *= scale residual = torch.randn_like(x) * scale if add_residual else None ref_out = layer._forward(x, residual) out = layer(x, residual) if add_residual: assert torch.allclose(out[0], ref_out[0], atol=0.01, rtol=0.01) assert torch.allclose(out[1], ref_out[1], atol=0.01, rtol=0.01) else: assert torch.allclose(out, ref_out, atol=0.01, rtol=0.01)
@pytest.mark.parametrize('num_tokens', NUM_TOKENS) @pytest.mark.parametrize('hidden_size', HIDDEN_SIZES) @pytest.mark.parametrize('add_residual', ADD_RESIDUAL) @pytest.mark.parametrize('dtype', DTYPES) @pytest.mark.parametrize('seed', SEEDS) @pytest.mark.parametrize('device', DEVICES) @torch.inference_mode() def test_rms_norm(num_tokens: int, hidden_size: int, add_residual: bool, dtype: torch.dtype, seed: int, device: int) ->None: torch.random.manual_seed(seed) torch.cuda.manual_seed(seed) gpu_id = f'cuda:{device}' layer = RMSNorm(hidden_size).to(dtype=dtype, device=gpu_id) layer.weight.data.normal_(mean=1.0, std=0.1) scale = 1 / (2 * hidden_size) x = torch.randn(num_tokens, hidden_size, dtype=dtype, device=gpu_id) x *= scale residual = torch.randn_like(x) * scale if add_residual else None ref_out = layer._forward(x, residual) out = layer(x, residual) if add_residual: assert torch.allclose(out[0], ref_out[0], atol=0.01, rtol=0.01) assert torch.allclose(out[1], ref_out[1], atol=0.01, rtol=0.01) else: assert torch.allclose(out, ref_out, atol=0.01, rtol=0.01)
null
_shared_pointers
ptrs = defaultdict(list) for k, v in tensors.items(): ptrs[v.data_ptr()].append(k) failing = [] for _, names in ptrs.items(): if len(names) > 1: failing.append(names) return failing
def _shared_pointers(tensors): ptrs = defaultdict(list) for k, v in tensors.items(): ptrs[v.data_ptr()].append(k) failing = [] for _, names in ptrs.items(): if len(names) > 1: failing.append(names) return failing
null
forward
hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.transformer(input_ids, positions, kv_caches, input_metadata) return hidden_states
null
get_linear_method
return GPTQLinearMethod(self)
def get_linear_method(self) ->'GPTQLinearMethod': return GPTQLinearMethod(self)
null
__init__
self.seq_groups = seq_groups self.seq_data = seq_data self.prompt_lens = prompt_lens self.selected_token_indices = selected_token_indices self.categorized_sample_indices = categorized_sample_indices self.perform_sampling = perform_sampling self.num_prompts = len(prompt_lens) if prompt_lens is not None else 0
def __init__(self, seq_groups: Optional[List[Tuple[List[int], SamplingParams]]], seq_data: Optional[Dict[int, SequenceData]], prompt_lens: Optional[List[int]], selected_token_indices: torch.Tensor, categorized_sample_indices: Optional[Dict[SamplingType, torch.Tensor]], perform_sampling: bool=True) ->None: self.seq_groups = seq_groups self.seq_data = seq_data self.prompt_lens = prompt_lens self.selected_token_indices = selected_token_indices self.categorized_sample_indices = categorized_sample_indices self.perform_sampling = perform_sampling self.num_prompts = len(prompt_lens) if prompt_lens is not None else 0
null
__init__
self.model = model self.graph = None self.input_buffers: Dict[str, torch.Tensor] = {} self.output_buffers: Dict[str, torch.Tensor] = {}
def __init__(self, model: nn.Module): self.model = model self.graph = None self.input_buffers: Dict[str, torch.Tensor] = {} self.output_buffers: Dict[str, torch.Tensor] = {}
null
__init__
super().__init__() self.decoder = OPTDecoder(config, linear_method)
def __init__(self, config: OPTConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.decoder = OPTDecoder(config, linear_method)
null
from_engine_args
"""Creates an async LLM engine from the engine arguments.""" engine_configs = engine_args.create_engine_configs() parallel_config = engine_configs[2] placement_group = initialize_cluster(parallel_config, engine_args. engine_use_ray) engine = cls(parallel_config.worker_use_ray, engine_args.engine_use_ray, * engine_configs, placement_group, log_requests=not engine_args. disable_log_requests, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, start_engine_loop=start_engine_loop) return engine
@classmethod def from_engine_args(cls, engine_args: AsyncEngineArgs, start_engine_loop: bool=True) ->'AsyncLLMEngine': """Creates an async LLM engine from the engine arguments.""" engine_configs = engine_args.create_engine_configs() parallel_config = engine_configs[2] placement_group = initialize_cluster(parallel_config, engine_args. engine_use_ray) engine = cls(parallel_config.worker_use_ray, engine_args.engine_use_ray, *engine_configs, placement_group, log_requests=not engine_args. disable_log_requests, log_stats=not engine_args.disable_log_stats, max_log_len=engine_args.max_log_len, start_engine_loop= start_engine_loop) return engine
Creates an async LLM engine from the engine arguments.
forward
hidden_states = self.wte(input_ids) for i in range(len(self.blocks)): block = self.blocks[i] hidden_states = block(position_ids, hidden_states, kv_caches[i], input_metadata) hidden_states = self.norm_f(hidden_states) return hidden_states
def forward(self, input_ids: torch.Tensor, position_ids: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.wte(input_ids) for i in range(len(self.blocks)): block = self.blocks[i] hidden_states = block(position_ids, hidden_states, kv_caches[i], input_metadata) hidden_states = self.norm_f(hidden_states) return hidden_states
null
_swap_in
mapping = self.block_manager.swap_in(seq_group) blocks_to_swap_in.update(mapping) for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED): seq.status = SequenceStatus.RUNNING
def _swap_in(self, seq_group: SequenceGroup, blocks_to_swap_in: Dict[int, int] ) ->None: mapping = self.block_manager.swap_in(seq_group) blocks_to_swap_in.update(mapping) for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED): seq.status = SequenceStatus.RUNNING
null
_get_alibi_slopes
closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads)) base = torch.tensor(2 ** -2 ** -(math.log2(closest_power_of_2) - 3), dtype= torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != total_num_heads: extra_base = torch.tensor(2 ** -2 ** -(math.log2(2 * closest_power_of_2 ) - 3), dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, total_num_heads - closest_power_of_2) extra_powers = torch.arange(start=1, end=1 + 2 * num_remaining_heads, step=2, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) return slopes
def _get_alibi_slopes(total_num_heads: int) ->torch.Tensor: closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads)) base = torch.tensor(2 ** -2 ** -(math.log2(closest_power_of_2) - 3), dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != total_num_heads: extra_base = torch.tensor(2 ** -2 ** -(math.log2(2 * closest_power_of_2) - 3), dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, total_num_heads - closest_power_of_2) extra_powers = torch.arange(start=1, end=1 + 2 * num_remaining_heads, step=2, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0 ) return slopes
null
_verify_tokenizer_mode
tokenizer_mode = self.tokenizer_mode.lower() if tokenizer_mode not in ['auto', 'slow']: raise ValueError( f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be either 'auto' or 'slow'." ) self.tokenizer_mode = tokenizer_mode
def _verify_tokenizer_mode(self) ->None: tokenizer_mode = self.tokenizer_mode.lower() if tokenizer_mode not in ['auto', 'slow']: raise ValueError( f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be either 'auto' or 'slow'." ) self.tokenizer_mode = tokenizer_mode
null
get_num_free_cpu_blocks
return self.cpu_allocator.get_num_free_blocks()
def get_num_free_cpu_blocks(self) ->int: return self.cpu_allocator.get_num_free_blocks()
null
broadcast_object_list
"""Broadcast the input object list.""" world_size = torch.distributed.get_world_size() assert 0 <= src < world_size, f'Invalid src rank ({src})' if world_size == 1: return obj_list torch.distributed.broadcast_object_list(obj_list, src=src) return obj_list
def broadcast_object_list(obj_list, src=0): """Broadcast the input object list.""" world_size = torch.distributed.get_world_size() assert 0 <= src < world_size, f'Invalid src rank ({src})' if world_size == 1: return obj_list torch.distributed.broadcast_object_list(obj_list, src=src) return obj_list
Broadcast the input object list.
get_len
return len(self.output_token_ids) + len(self.prompt_token_ids)
def get_len(self) ->int: return len(self.output_token_ids) + len(self.prompt_token_ids)
null
init_worker
self.worker = worker_init_fn()
def init_worker(self, worker_init_fn): self.worker = worker_init_fn()
null
get_min_capability
return 75
def get_min_capability(self) ->int: return 75
null
__init__
super().__init__() self.config = config self.embed_dim = config.hidden_size self.self_attn = OPTAttention(embed_dim=self.embed_dim, num_heads=config. num_attention_heads, bias=config.enable_bias, linear_method=linear_method) self.do_layer_norm_before = config.do_layer_norm_before self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine =config.layer_norm_elementwise_affine) self.fc1 = ColumnParallelLinear(self.embed_dim, config.ffn_dim, bias=config .enable_bias, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.activation_fn = get_act_fn(config.activation_function, quant_config, config.ffn_dim) self.fc2 = RowParallelLinear(config.ffn_dim, self.embed_dim, bias=config. enable_bias, linear_method=linear_method) self.final_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine= config.layer_norm_elementwise_affine)
def __init__(self, config: OPTConfig, linear_method: Optional[ LinearMethodBase]=None): super().__init__() self.config = config self.embed_dim = config.hidden_size self.self_attn = OPTAttention(embed_dim=self.embed_dim, num_heads= config.num_attention_heads, bias=config.enable_bias, linear_method= linear_method) self.do_layer_norm_before = config.do_layer_norm_before self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine) self.fc1 = ColumnParallelLinear(self.embed_dim, config.ffn_dim, bias= config.enable_bias, linear_method=linear_method) quant_config = getattr(linear_method, 'quant_config', None) self.activation_fn = get_act_fn(config.activation_function, quant_config, config.ffn_dim) self.fc2 = RowParallelLinear(config.ffn_dim, self.embed_dim, bias= config.enable_bias, linear_method=linear_method) self.final_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine =config.layer_norm_elementwise_affine)
null
forward
hidden_states = self.embd(input_ids) for i in range(self.config.num_hidden_layers): layer = self.h[i] hidden_states = layer(positions, hidden_states, kv_caches[i], input_metadata) return hidden_states
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor: hidden_states = self.embd(input_ids) for i in range(self.config.num_hidden_layers): layer = self.h[i] hidden_states = layer(positions, hidden_states, kv_caches[i], input_metadata) return hidden_states
null
test_models
hf_model = hf_runner(model, dtype=dtype) hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) del vllm_model for i in range(len(example_prompts)): hf_output_ids, hf_output_str = hf_outputs[i] vllm_output_ids, vllm_output_str = vllm_outputs[i] assert hf_output_str == vllm_output_str, f"""Test{i}: HF: {hf_output_str!r} vLLM: {vllm_output_str!r}""" assert hf_output_ids == vllm_output_ids, f"""Test{i}: HF: {hf_output_ids} vLLM: {vllm_output_ids}"""
@pytest.mark.parametrize('model', MODELS) @pytest.mark.parametrize('dtype', ['float']) @pytest.mark.parametrize('max_tokens', [128]) def test_models(hf_runner, vllm_runner, example_prompts, model: str, dtype: str, max_tokens: int) ->None: hf_model = hf_runner(model, dtype=dtype) hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) del hf_model vllm_model = vllm_runner(model, dtype=dtype) vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) del vllm_model for i in range(len(example_prompts)): hf_output_ids, hf_output_str = hf_outputs[i] vllm_output_ids, vllm_output_str = vllm_outputs[i] assert hf_output_str == vllm_output_str, f"""Test{i}: HF: {hf_output_str!r} vLLM: {vllm_output_str!r}""" assert hf_output_ids == vllm_output_ids, f"""Test{i}: HF: {hf_output_ids} vLLM: {vllm_output_ids}"""
null
forward
if self.tp_size > 1: input_mask = (input_ < self.vocab_start_index) | (input_ >= self. vocab_end_index) masked_input = input_.clone() - self.vocab_start_index masked_input[input_mask] = 0 else: masked_input = input_ output_parallel = F.embedding(masked_input, self.weight) if self.tp_size > 1: output_parallel[input_mask, :] = 0.0 output = tensor_model_parallel_all_reduce(output_parallel) return output
def forward(self, input_): if self.tp_size > 1: input_mask = (input_ < self.vocab_start_index) | (input_ >= self. vocab_end_index) masked_input = input_.clone() - self.vocab_start_index masked_input[input_mask] = 0 else: masked_input = input_ output_parallel = F.embedding(masked_input, self.weight) if self.tp_size > 1: output_parallel[input_mask, :] = 0.0 output = tensor_model_parallel_all_reduce(output_parallel) return output
null
get_seqs
if status is None: return list(self.seqs_dict.values()) else: return [seq for seq in self.seqs_dict.values() if seq.status == status]
def get_seqs(self, status: Optional[SequenceStatus]=None) ->List[Sequence]: if status is None: return list(self.seqs_dict.values()) else: return [seq for seq in self.seqs_dict.values() if seq.status == status]
null
execute_model
input_tokens, input_positions, input_metadata, sampling_metadata = (self. prepare_input_tensors(seq_group_metadata_list)) if input_metadata.use_cuda_graph: graph_batch_size = input_tokens.shape[0] model_executable = self.graph_runners[graph_batch_size] else: model_executable = self.model hidden_states = model_executable(input_ids=input_tokens, positions= input_positions, kv_caches=kv_caches, input_metadata=input_metadata) output = self.model.sample(hidden_states=hidden_states, sampling_metadata= sampling_metadata) return output
@torch.inference_mode() def execute_model(self, seq_group_metadata_list: Optional[List[ SequenceGroupMetadata]], kv_caches: List[Tuple[torch.Tensor, torch.Tensor]] ) ->Optional[SamplerOutput]: input_tokens, input_positions, input_metadata, sampling_metadata = (self .prepare_input_tensors(seq_group_metadata_list)) if input_metadata.use_cuda_graph: graph_batch_size = input_tokens.shape[0] model_executable = self.graph_runners[graph_batch_size] else: model_executable = self.model hidden_states = model_executable(input_ids=input_tokens, positions= input_positions, kv_caches=kv_caches, input_metadata=input_metadata) output = self.model.sample(hidden_states=hidden_states, sampling_metadata=sampling_metadata) return output
null
ensure_divisibility
"""Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, '{} is not divisible by {}'.format( numerator, denominator)
def ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, '{} is not divisible by {}'.format( numerator, denominator)
Ensure that numerator is divisible by the denominator.
forward
residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states return hidden_states
def forward(self, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_output = self.attn(hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata) hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states return hidden_states
null
__init__
super().__init__() self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([MixtralDecoderLayer(config, linear_method= linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def __init__(self, config: MixtralConfig, linear_method: Optional[ LinearMethodBase]=None) ->None: super().__init__() self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding(config.vocab_size, config. hidden_size) self.layers = nn.ModuleList([MixtralDecoderLayer(config, linear_method= linear_method) for _ in range(config.num_hidden_layers)]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
null
swap_out
mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {} for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): new_block_table: BlockTable = [] block_table = self.block_tables[seq.seq_id] for gpu_block in block_table: if gpu_block in mapping: cpu_block = mapping[gpu_block] cpu_block.ref_count += 1 else: cpu_block = self.cpu_allocator.allocate() mapping[gpu_block] = cpu_block new_block_table.append(cpu_block) self.gpu_allocator.free(gpu_block) self.block_tables[seq.seq_id] = new_block_table block_number_mapping = {gpu_block.block_number: cpu_block.block_number for gpu_block, cpu_block in mapping.items()} return block_number_mapping
def swap_out(self, seq_group: SequenceGroup) ->Dict[int, int]: mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {} for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING): new_block_table: BlockTable = [] block_table = self.block_tables[seq.seq_id] for gpu_block in block_table: if gpu_block in mapping: cpu_block = mapping[gpu_block] cpu_block.ref_count += 1 else: cpu_block = self.cpu_allocator.allocate() mapping[gpu_block] = cpu_block new_block_table.append(cpu_block) self.gpu_allocator.free(gpu_block) self.block_tables[seq.seq_id] = new_block_table block_number_mapping = {gpu_block.block_number: cpu_block.block_number for gpu_block, cpu_block in mapping.items()} return block_number_mapping
null
__init__
super().__init__() self.act = act_module self.input_is_parallel = input_is_parallel if input_is_parallel: tp_size = get_tensor_model_parallel_world_size() intermediate_size_per_partition = divide(intermediate_size, tp_size) else: intermediate_size_per_partition = intermediate_size if params_dtype is None: params_dtype = torch.get_default_dtype() self.scales = nn.Parameter(torch.empty(intermediate_size_per_partition, dtype=params_dtype, device='cuda')) set_weight_attrs(self.scales, {'weight_loader': self.weight_loader})
def __init__(self, act_module: nn.Module, intermediate_size: int, input_is_parallel: bool=True, params_dtype: Optional[torch.dtype]=None): super().__init__() self.act = act_module self.input_is_parallel = input_is_parallel if input_is_parallel: tp_size = get_tensor_model_parallel_world_size() intermediate_size_per_partition = divide(intermediate_size, tp_size) else: intermediate_size_per_partition = intermediate_size if params_dtype is None: params_dtype = torch.get_default_dtype() self.scales = nn.Parameter(torch.empty(intermediate_size_per_partition, dtype=params_dtype, device='cuda')) set_weight_attrs(self.scales, {'weight_loader': self.weight_loader})
null