Skip to content

vllm.reasoning.holo2_reasoning_parser

logger module-attribute

logger = init_logger(__name__)

Holo2ReasoningParser

Bases: ReasoningParser

Reasoning parser for the Holo2 models which are based on Qwen3.

The Holo2 model uses ... tokens to denote reasoning text but is part of the chat template. This parser extracts the reasoning content until in the model's output.

The model provides a switch to enable or disable reasoning output via the 'thinking=False' parameter.

Chat template args: - thinking: Whether to enable reasoning output (default: True)

Parsing rules on model output
  • thinking == False -> Model output is treated as purely the content |content|
  • thinking == True -> Model output is |reasoning_content||content|
Source code in vllm/reasoning/holo2_reasoning_parser.py
class Holo2ReasoningParser(ReasoningParser):
    """
    Reasoning parser for the Holo2 models which are based on Qwen3.

    The Holo2 model uses <think>...</think> tokens to denote reasoning text but <think>
    is part of the chat template. This parser extracts the reasoning content until
    </think> in the model's output.

    The model provides a switch to enable or disable reasoning
    output via the 'thinking=False' parameter.

    Chat template args:
    - thinking: Whether to enable reasoning output (default: True)


    Parsing rules on model output:
        - thinking == False
            -> Model output is treated as purely the content |content|
        - thinking == True
            -> Model output is |reasoning_content|</think>|content|
    """

    def __init__(self, tokenizer: TokenizerLike, *args, **kwargs):
        super().__init__(tokenizer, *args, **kwargs)

        chat_kwargs = kwargs.get("chat_template_kwargs", {}) or {}
        # Deepseek V3 and Holo2 are similar. However, Holo2 models think by default.
        # this parser without user specified chat template args is initiated once for
        # all requests in the structured output manager. So it is important that without
        # user specified chat template args, the default thinking is True.

        enable_thinking = bool(chat_kwargs.get("thinking", True))

        if enable_thinking:
            self._parser = DeepSeekR1ReasoningParser(tokenizer, *args, **kwargs)
        else:
            self._parser = IdentityReasoningParser(tokenizer, *args, **kwargs)

    def is_reasoning_end(self, input_ids: Sequence[int]) -> bool:
        return self._parser.is_reasoning_end(input_ids)

    def extract_content_ids(self, input_ids: list[int]) -> list[int]:
        return self._parser.extract_content_ids(input_ids)

    def extract_reasoning(
        self, model_output: str, request: ChatCompletionRequest
    ) -> tuple[str | None, str | None]:
        return self._parser.extract_reasoning(model_output, request)

    def extract_reasoning_streaming(
        self,
        previous_text: str,
        current_text: str,
        delta_text: str,
        previous_token_ids: Sequence[int],
        current_token_ids: Sequence[int],
        delta_token_ids: Sequence[int],
    ) -> DeltaMessage | None:
        return self._parser.extract_reasoning_streaming(
            previous_text,
            current_text,
            delta_text,
            previous_token_ids,
            current_token_ids,
            delta_token_ids,
        )

_parser instance-attribute

_parser = DeepSeekR1ReasoningParser(
    tokenizer, *args, **kwargs
)

__init__

__init__(tokenizer: TokenizerLike, *args, **kwargs)
Source code in vllm/reasoning/holo2_reasoning_parser.py
def __init__(self, tokenizer: TokenizerLike, *args, **kwargs):
    super().__init__(tokenizer, *args, **kwargs)

    chat_kwargs = kwargs.get("chat_template_kwargs", {}) or {}
    # Deepseek V3 and Holo2 are similar. However, Holo2 models think by default.
    # this parser without user specified chat template args is initiated once for
    # all requests in the structured output manager. So it is important that without
    # user specified chat template args, the default thinking is True.

    enable_thinking = bool(chat_kwargs.get("thinking", True))

    if enable_thinking:
        self._parser = DeepSeekR1ReasoningParser(tokenizer, *args, **kwargs)
    else:
        self._parser = IdentityReasoningParser(tokenizer, *args, **kwargs)

extract_content_ids

extract_content_ids(input_ids: list[int]) -> list[int]
Source code in vllm/reasoning/holo2_reasoning_parser.py
def extract_content_ids(self, input_ids: list[int]) -> list[int]:
    return self._parser.extract_content_ids(input_ids)

extract_reasoning

extract_reasoning(
    model_output: str, request: ChatCompletionRequest
) -> tuple[str | None, str | None]
Source code in vllm/reasoning/holo2_reasoning_parser.py
def extract_reasoning(
    self, model_output: str, request: ChatCompletionRequest
) -> tuple[str | None, str | None]:
    return self._parser.extract_reasoning(model_output, request)

extract_reasoning_streaming

extract_reasoning_streaming(
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None
Source code in vllm/reasoning/holo2_reasoning_parser.py
def extract_reasoning_streaming(
    self,
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
    return self._parser.extract_reasoning_streaming(
        previous_text,
        current_text,
        delta_text,
        previous_token_ids,
        current_token_ids,
        delta_token_ids,
    )

is_reasoning_end

is_reasoning_end(input_ids: Sequence[int]) -> bool
Source code in vllm/reasoning/holo2_reasoning_parser.py
def is_reasoning_end(self, input_ids: Sequence[int]) -> bool:
    return self._parser.is_reasoning_end(input_ids)