Skip to content

vllm.entrypoints.openai.parser.responses_parser

logger module-attribute

logger = getLogger(__name__)

ResponsesParser

Incremental parser over completion tokens with reasoning support.

Source code in vllm/entrypoints/openai/parser/responses_parser.py
class ResponsesParser:
    """Incremental parser over completion tokens with reasoning support."""

    def __init__(
        self,
        *,
        tokenizer: AnyTokenizer,
        reasoning_parser_cls: Callable[[AnyTokenizer], ReasoningParser],
        response_messages: list[ResponseInputOutputItem],
        request: ResponsesRequest,
        tool_parser_cls: Callable[[TokenizerLike], ToolParser] | None,
    ):
        self.response_messages: list[ResponseInputOutputItem] = (
            # TODO: initial messages may not be properly typed
            response_messages
        )
        self.num_init_messages = len(response_messages)
        self.tokenizer = tokenizer
        self.request = request

        self.reasoning_parser_instance = reasoning_parser_cls(tokenizer)
        self.tool_parser_instance = None
        if tool_parser_cls is not None:
            self.tool_parser_instance = tool_parser_cls(tokenizer)

    def process(self, output: CompletionOutput) -> "ResponsesParser":
        reasoning_content, content = self.reasoning_parser_instance.extract_reasoning(
            output.text, request=self.request
        )
        if reasoning_content:
            self.response_messages.append(
                ResponseReasoningItem(
                    type="reasoning",
                    id=f"rs_{random_uuid()}",
                    summary=[],
                    content=[
                        Content(
                            type="reasoning_text",
                            text=reasoning_content,
                        )
                    ],
                )
            )

        function_calls: list[ResponseFunctionToolCall] = []
        if self.tool_parser_instance is not None:
            tool_call_info = self.tool_parser_instance.extract_tool_calls(
                content if content is not None else "",
                request=self.request,  # type: ignore
            )
            if tool_call_info is not None and tool_call_info.tools_called:
                # extract_tool_calls() returns a list of tool calls.
                function_calls.extend(
                    ResponseFunctionToolCall(
                        id=f"fc_{random_uuid()}",
                        call_id=f"call_{random_uuid()}",
                        type="function_call",
                        status="completed",
                        name=tool_call.function.name,
                        arguments=tool_call.function.arguments,
                    )
                    for tool_call in tool_call_info.tool_calls
                )
                content = tool_call_info.content
                if content and content.strip() == "":
                    content = None

        if content:
            self.response_messages.append(
                ResponseOutputMessage(
                    type="message",
                    id=f"msg_{random_uuid()}",
                    status="completed",
                    role="assistant",
                    content=[
                        ResponseOutputText(
                            annotations=[],  # TODO
                            type="output_text",
                            text=content,
                            logprobs=None,  # TODO
                        )
                    ],
                )
            )
        if len(function_calls) > 0:
            self.response_messages.extend(function_calls)

        return self

num_init_messages instance-attribute

num_init_messages = len(response_messages)

reasoning_parser_instance instance-attribute

reasoning_parser_instance = reasoning_parser_cls(tokenizer)

request instance-attribute

request = request

response_messages instance-attribute

response_messages: list[ResponseInputOutputItem] = (
    response_messages
)

tokenizer instance-attribute

tokenizer = tokenizer

tool_parser_instance instance-attribute

tool_parser_instance = None

__init__

__init__(
    *,
    tokenizer: AnyTokenizer,
    reasoning_parser_cls: Callable[
        [AnyTokenizer], ReasoningParser
    ],
    response_messages: list[ResponseInputOutputItem],
    request: ResponsesRequest,
    tool_parser_cls: Callable[[TokenizerLike], ToolParser]
    | None,
)
Source code in vllm/entrypoints/openai/parser/responses_parser.py
def __init__(
    self,
    *,
    tokenizer: AnyTokenizer,
    reasoning_parser_cls: Callable[[AnyTokenizer], ReasoningParser],
    response_messages: list[ResponseInputOutputItem],
    request: ResponsesRequest,
    tool_parser_cls: Callable[[TokenizerLike], ToolParser] | None,
):
    self.response_messages: list[ResponseInputOutputItem] = (
        # TODO: initial messages may not be properly typed
        response_messages
    )
    self.num_init_messages = len(response_messages)
    self.tokenizer = tokenizer
    self.request = request

    self.reasoning_parser_instance = reasoning_parser_cls(tokenizer)
    self.tool_parser_instance = None
    if tool_parser_cls is not None:
        self.tool_parser_instance = tool_parser_cls(tokenizer)

process

process(output: CompletionOutput) -> ResponsesParser
Source code in vllm/entrypoints/openai/parser/responses_parser.py
def process(self, output: CompletionOutput) -> "ResponsesParser":
    reasoning_content, content = self.reasoning_parser_instance.extract_reasoning(
        output.text, request=self.request
    )
    if reasoning_content:
        self.response_messages.append(
            ResponseReasoningItem(
                type="reasoning",
                id=f"rs_{random_uuid()}",
                summary=[],
                content=[
                    Content(
                        type="reasoning_text",
                        text=reasoning_content,
                    )
                ],
            )
        )

    function_calls: list[ResponseFunctionToolCall] = []
    if self.tool_parser_instance is not None:
        tool_call_info = self.tool_parser_instance.extract_tool_calls(
            content if content is not None else "",
            request=self.request,  # type: ignore
        )
        if tool_call_info is not None and tool_call_info.tools_called:
            # extract_tool_calls() returns a list of tool calls.
            function_calls.extend(
                ResponseFunctionToolCall(
                    id=f"fc_{random_uuid()}",
                    call_id=f"call_{random_uuid()}",
                    type="function_call",
                    status="completed",
                    name=tool_call.function.name,
                    arguments=tool_call.function.arguments,
                )
                for tool_call in tool_call_info.tool_calls
            )
            content = tool_call_info.content
            if content and content.strip() == "":
                content = None

    if content:
        self.response_messages.append(
            ResponseOutputMessage(
                type="message",
                id=f"msg_{random_uuid()}",
                status="completed",
                role="assistant",
                content=[
                    ResponseOutputText(
                        annotations=[],  # TODO
                        type="output_text",
                        text=content,
                        logprobs=None,  # TODO
                    )
                ],
            )
        )
    if len(function_calls) > 0:
        self.response_messages.extend(function_calls)

    return self

get_responses_parser_for_simple_context

get_responses_parser_for_simple_context(
    *,
    tokenizer: AnyTokenizer,
    reasoning_parser_cls: Callable[
        [AnyTokenizer], ReasoningParser
    ],
    response_messages: list[ResponseInputOutputItem],
    request: ResponsesRequest,
    tool_parser_cls,
) -> ResponsesParser

Factory function to create a ResponsesParser with optional reasoning parser.

Returns:

Type Description
ResponsesParser

ResponsesParser instance configured with the provided parser

Source code in vllm/entrypoints/openai/parser/responses_parser.py
def get_responses_parser_for_simple_context(
    *,
    tokenizer: AnyTokenizer,
    reasoning_parser_cls: Callable[[AnyTokenizer], ReasoningParser],
    response_messages: list[ResponseInputOutputItem],
    request: ResponsesRequest,
    tool_parser_cls,
) -> ResponsesParser:
    """Factory function to create a ResponsesParser with
    optional reasoning parser.

    Returns:
        ResponsesParser instance configured with the provided parser
    """
    return ResponsesParser(
        tokenizer=tokenizer,
        reasoning_parser_cls=reasoning_parser_cls,
        response_messages=response_messages,
        request=request,
        tool_parser_cls=tool_parser_cls,
    )