Skip to content

vllm.v1.engine

Modules:

Name Description
async_llm
coordinator
core
core_client
detokenizer
exceptions
input_processor
llm_engine
logprobs
output_processor
parallel_sampling
utils

FINISH_REASON_STRINGS module-attribute

FINISH_REASON_STRINGS = ("stop", "length", "abort", "error")

EngineCoreOutput

Bases: Struct

Source code in vllm/v1/engine/__init__.py
class EngineCoreOutput(
    msgspec.Struct,
    array_like=True,  # type: ignore[call-arg]
    omit_defaults=True,  # type: ignore[call-arg]
    gc=False,
):  # type: ignore[call-arg]
    request_id: str
    new_token_ids: list[int]

    new_logprobs: LogprobsLists | None = None
    new_prompt_logprobs_tensors: LogprobsTensors | None = None

    pooling_output: torch.Tensor | None = None

    finish_reason: FinishReason | None = None
    stop_reason: int | str | None = None
    kv_transfer_params: dict[str, Any] | None = None

    trace_headers: Mapping[str, str] | None = None
    # The number of tokens with prefix cache hits.
    num_cached_tokens: int = 0
    routed_experts: np.ndarray | None = None
    # The number of NaNs in logits.
    # A value greater than 0 indicates that the output is corrupted.
    num_nans_in_logits: int = 0

    @property
    def finished(self) -> bool:
        return self.finish_reason is not None

finish_reason class-attribute instance-attribute

finish_reason: FinishReason | None = None

finished property

finished: bool

kv_transfer_params class-attribute instance-attribute

kv_transfer_params: dict[str, Any] | None = None

new_logprobs class-attribute instance-attribute

new_logprobs: LogprobsLists | None = None

new_prompt_logprobs_tensors class-attribute instance-attribute

new_prompt_logprobs_tensors: LogprobsTensors | None = None

new_token_ids instance-attribute

new_token_ids: list[int]

num_cached_tokens class-attribute instance-attribute

num_cached_tokens: int = 0

num_nans_in_logits class-attribute instance-attribute

num_nans_in_logits: int = 0

pooling_output class-attribute instance-attribute

pooling_output: Tensor | None = None

request_id instance-attribute

request_id: str

routed_experts class-attribute instance-attribute

routed_experts: ndarray | None = None

stop_reason class-attribute instance-attribute

stop_reason: int | str | None = None

trace_headers class-attribute instance-attribute

trace_headers: Mapping[str, str] | None = None

EngineCoreOutputs

Bases: Struct

Source code in vllm/v1/engine/__init__.py
class EngineCoreOutputs(
    msgspec.Struct,
    array_like=True,  # type: ignore[call-arg]
    omit_defaults=True,  # type: ignore[call-arg]
    gc=False,
):  # type: ignore[call-arg]
    # NOTE(Nick): We could consider ways to make this more compact,
    # e.g. columnwise layout

    engine_index: int = 0

    # [num_reqs]
    outputs: list[EngineCoreOutput] = []
    scheduler_stats: SchedulerStats | None = None
    timestamp: float = 0.0

    utility_output: UtilityOutput | None = None
    finished_requests: set[str] | None = None
    journey_events: list[RequestJourneyEvent] | None = None

    # In DP case, used to signal that the current wave of requests
    # has finished and the engines are paused.
    wave_complete: int | None = None
    # In DP case, used to signal that a request was received for an
    # "old" wave, so the next wave needs to be started in other engines.
    start_wave: int | None = None

    def __post_init__(self):
        if self.timestamp == 0.0:
            self.timestamp = time.monotonic()

engine_index class-attribute instance-attribute

engine_index: int = 0

finished_requests class-attribute instance-attribute

finished_requests: set[str] | None = None

journey_events class-attribute instance-attribute

journey_events: list[RequestJourneyEvent] | None = None

outputs class-attribute instance-attribute

outputs: list[EngineCoreOutput] = []

scheduler_stats class-attribute instance-attribute

scheduler_stats: SchedulerStats | None = None

start_wave class-attribute instance-attribute

start_wave: int | None = None

timestamp class-attribute instance-attribute

timestamp: float = 0.0

utility_output class-attribute instance-attribute

utility_output: UtilityOutput | None = None

wave_complete class-attribute instance-attribute

wave_complete: int | None = None

__post_init__

__post_init__()
Source code in vllm/v1/engine/__init__.py
def __post_init__(self):
    if self.timestamp == 0.0:
        self.timestamp = time.monotonic()

EngineCoreRequest

Bases: Struct

Source code in vllm/v1/engine/__init__.py
class EngineCoreRequest(
    msgspec.Struct,
    array_like=True,  # type: ignore[call-arg]
    omit_defaults=True,  # type: ignore[call-arg]
    gc=False,
):  # type: ignore[call-arg]
    request_id: str
    prompt_token_ids: list[int] | None
    mm_features: list[MultiModalFeatureSpec] | None
    sampling_params: SamplingParams | None
    pooling_params: PoolingParams | None
    eos_token_id: int | None
    arrival_time: float
    lora_request: LoRARequest | None
    cache_salt: str | None
    data_parallel_rank: int | None
    prompt_embeds: torch.Tensor | None = None

    # Index of the client, used to ensure outputs are sent back to the same
    # client for this request when scaling out the front-end.
    client_index: int = 0

    # Used in DP case to indicate which wave of requests this is expected to
    # belong to, to cover a race condition where the request is sent before
    # a wave finished notification is received.
    current_wave: int = 0
    priority: int = 0

    trace_headers: Mapping[str, str] | None = None

    # The user-provided request ID. This field is set internally,
    # copied from the provided request_id that's originally assigned
    # to the request_id field, see InputProcessor.assign_request_id().
    # Used in outputs and to support abort(req_id, internal=False).
    external_req_id: str | None = None

    @property
    def params(self) -> SamplingParams | PoolingParams:
        """Return the processed params (sampling or pooling)."""
        if self.sampling_params is not None:
            return self.sampling_params
        assert self.pooling_params is not None
        return self.pooling_params

arrival_time instance-attribute

arrival_time: float

cache_salt instance-attribute

cache_salt: str | None

client_index class-attribute instance-attribute

client_index: int = 0

current_wave class-attribute instance-attribute

current_wave: int = 0

data_parallel_rank instance-attribute

data_parallel_rank: int | None

eos_token_id instance-attribute

eos_token_id: int | None

external_req_id class-attribute instance-attribute

external_req_id: str | None = None

lora_request instance-attribute

lora_request: LoRARequest | None

mm_features instance-attribute

mm_features: list[MultiModalFeatureSpec] | None

params property

Return the processed params (sampling or pooling).

pooling_params instance-attribute

pooling_params: PoolingParams | None

priority class-attribute instance-attribute

priority: int = 0

prompt_embeds class-attribute instance-attribute

prompt_embeds: Tensor | None = None

prompt_token_ids instance-attribute

prompt_token_ids: list[int] | None

request_id instance-attribute

request_id: str

sampling_params instance-attribute

sampling_params: SamplingParams | None

trace_headers class-attribute instance-attribute

trace_headers: Mapping[str, str] | None = None

EngineCoreRequestType

Bases: Enum

Request types defined as hex byte strings, so it can be sent over sockets without separate encoding step.

Source code in vllm/v1/engine/__init__.py
class EngineCoreRequestType(enum.Enum):
    """
    Request types defined as hex byte strings, so it can be sent over sockets
    without separate encoding step.
    """

    ADD = b"\x00"
    ABORT = b"\x01"
    START_DP_WAVE = b"\x02"
    UTILITY = b"\x03"
    # Sentinel used within EngineCoreProc.
    EXECUTOR_FAILED = b"\x04"

ABORT class-attribute instance-attribute

ABORT = b'\x01'

ADD class-attribute instance-attribute

ADD = b'\x00'

EXECUTOR_FAILED class-attribute instance-attribute

EXECUTOR_FAILED = b'\x04'

START_DP_WAVE class-attribute instance-attribute

START_DP_WAVE = b'\x02'

UTILITY class-attribute instance-attribute

UTILITY = b'\x03'

FinishReason

Bases: IntEnum

Reason a request finished - stop, length, abort, or error.

Int rather than Str for more compact serialization.

stop - a stop string was emitted length - max_tokens was consumed, or max_model_len was reached abort - aborted by client error - retryable request-level internal error (e.g., KV load failure). Invariant: always converted to 500 Internal Server Error.

Source code in vllm/v1/engine/__init__.py
class FinishReason(enum.IntEnum):
    """
    Reason a request finished - stop, length, abort, or error.

    Int rather than Str for more compact serialization.

    stop - a stop string was emitted
    length - max_tokens was consumed, or max_model_len was reached
    abort - aborted by client
    error - retryable request-level internal error (e.g., KV load failure).
            Invariant: always converted to 500 Internal Server Error.

    """

    STOP = 0
    LENGTH = 1
    ABORT = 2
    ERROR = 3

    def __str__(self):
        return FINISH_REASON_STRINGS[self.value]

ABORT class-attribute instance-attribute

ABORT = 2

ERROR class-attribute instance-attribute

ERROR = 3

LENGTH class-attribute instance-attribute

LENGTH = 1

STOP class-attribute instance-attribute

STOP = 0

__str__

__str__()
Source code in vllm/v1/engine/__init__.py
def __str__(self):
    return FINISH_REASON_STRINGS[self.value]

ReconfigureDistributedRequest

Bases: Struct

Source code in vllm/v1/engine/__init__.py
class ReconfigureDistributedRequest(msgspec.Struct):
    new_data_parallel_size: int
    new_data_parallel_rank: int
    new_data_parallel_rank_local: int
    new_data_parallel_master_ip: str
    new_data_parallel_master_port: int

new_data_parallel_master_ip instance-attribute

new_data_parallel_master_ip: str

new_data_parallel_master_port instance-attribute

new_data_parallel_master_port: int

new_data_parallel_rank instance-attribute

new_data_parallel_rank: int

new_data_parallel_rank_local instance-attribute

new_data_parallel_rank_local: int

new_data_parallel_size instance-attribute

new_data_parallel_size: int

ReconfigureRankType

Bases: IntEnum

Rank type for reconfiguring distributed request.

Source code in vllm/v1/engine/__init__.py
class ReconfigureRankType(enum.IntEnum):
    """
    Rank type for reconfiguring distributed request.
    """

    KEEP_CURRENT_RANK = -1
    SHUTDOWN_CURRENT_RANK = -2

KEEP_CURRENT_RANK class-attribute instance-attribute

KEEP_CURRENT_RANK = -1

SHUTDOWN_CURRENT_RANK class-attribute instance-attribute

SHUTDOWN_CURRENT_RANK = -2

UtilityOutput

Bases: Struct

Source code in vllm/v1/engine/__init__.py
class UtilityOutput(
    msgspec.Struct,
    array_like=True,  # type: ignore[call-arg]
    gc=False,
):  # type: ignore[call-arg]
    call_id: int

    # Non-None implies the call failed, result should be None.
    failure_message: str | None = None
    result: UtilityResult | None = None

call_id instance-attribute

call_id: int

failure_message class-attribute instance-attribute

failure_message: str | None = None

result class-attribute instance-attribute

result: UtilityResult | None = None