gen_ai_hub.orchestration.models.response
index
/home/jenkins/agent/workspace/ation_generative-ai-hub-sdk_main/gen_ai_hub/orchestration/models/response.py

 
Modules
       
json

 
Classes
       
builtins.object
BaseLLMResult
LLMResult
LLMResultStreaming
BaseModuleResults
ModuleResults
ModuleResultsStreaming
ChatDelta
FunctionCall
GenericModuleResult
LLMChoice
LLMChoiceStreaming
LLMUsage
MessageToolCall
OrchestrationResponse
OrchestrationResponseStreaming
ResponseChatMessage
ToolCallChunk

 
class BaseLLMResult(builtins.object)
    BaseLLMResult(id: str, object: str, created: int, model: str) -> None
 
Base class for LLM results containing common attributes.
 
Attributes:
    id: Unique identifier for the LLM operation.
    object: Type of object returned (e.g., "chat.completion").
    created: Timestamp when this result was created.
    model: Name or identifier of the model used.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, id: str, object: str, created: int, model: str) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'created': <class 'int'>, 'id': <class 'str'>, 'model': <class 'str'>, 'object': <class 'str'>}
__dataclass_fields__ = {'created': Field(name='created',type=<class 'int'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=<class 'str'>,default=<data...appingproxy({}),kw_only=False,_field_type=_FIELD), 'model': Field(name='model',type=<class 'str'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'object': Field(name='object',type=<class 'str'>,default=<...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('id', 'object', 'created', 'model')

 
class BaseModuleResults(builtins.object)
    BaseModuleResults(input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None) -&gt; None
 
Base class for module results containing grounding, common filtering and masking attributes.
 
Attributes:
    input_filtering: Results from the input filtering module.
    output_filtering: Results from the output filtering module.
    input_masking: Results from the input masking module.
    grounding: A list of extracted text to be provided as grounding context.
    input_translation: Results from the input translation module.
    output_translation: Results from the output translation module.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'grounding': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'input_filtering': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'input_masking': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'input_translation': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'output_filtering': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult], 'output_translation': typing.Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult]}
__dataclass_fields__ = {'grounding': Field(name='grounding',type=typing.Optional[gen_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_filtering': Field(name='input_filtering',type=typing.Optiona...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_masking': Field(name='input_masking',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_translation': Field(name='input_translation',type=typing.Optio...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_filtering': Field(name='output_filtering',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_translation': Field(name='output_translation',type=typing.Opti...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('input_filtering', 'output_filtering', 'input_masking', 'grounding', 'input_translation', 'output_translation')
grounding = None
input_filtering = None
input_masking = None
input_translation = None
output_filtering = None
output_translation = None

 
class ChatDelta(builtins.object)
    ChatDelta(content: str, role: Optional[str] = None, refusal: Optional[str] = None, tool_calls: Optional[List[gen_ai_hub.orchestration.models.response.ToolCallChunk]] = None) -&gt; None
 
Represents a partial update in a streaming chat response.
 
Attributes:
    content: The text content of the chat delta.
    role: Optional role identifier (e.g., 'assistant', 'user') for the message delta.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, content: str, role: Optional[str] = None, refusal: Optional[str] = None, tool_calls: Optional[List[gen_ai_hub.orchestration.models.response.ToolCallChunk]] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'content': <class 'str'>, 'refusal': typing.Optional[str], 'role': typing.Optional[str], 'tool_calls': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.ToolCallChunk]]}
__dataclass_fields__ = {'content': Field(name='content',type=<class 'str'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'refusal': Field(name='refusal',type=typing.Optional[str],d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'role': Field(name='role',type=typing.Optional[str],defa...appingproxy({}),kw_only=False,_field_type=_FIELD), 'tool_calls': Field(name='tool_calls',type=typing.Optional[typ...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('content', 'role', 'refusal', 'tool_calls')
refusal = None
role = None
tool_calls = None

 
class FunctionCall(builtins.object)
    FunctionCall(name: Optional[str] = None, arguments: Optional[str] = None) -&gt; None
 
The function that the model called.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, name: Optional[str] = None, arguments: Optional[str] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).
parse_arguments(self) -> dict
Attempts to parse the arguments string as JSON.

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'arguments': typing.Optional[str], 'name': typing.Optional[str]}
__dataclass_fields__ = {'arguments': Field(name='arguments',type=typing.Optional[str]...ur function.'}),kw_only=False,_field_type=_FIELD), 'name': Field(name='name',type=typing.Optional[str],defa...ion to call.'}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('name', 'arguments')
arguments = None
name = None

 
class GenericModuleResult(builtins.object)
    GenericModuleResult(message: str, data: Optional[Dict[str, Any]] = None) -&gt; None
 
Represents a generic module result in the orchestration process.
 
Attributes:
    message: A message or description generated by the module.
    data: Additional data relevant to the module result.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, message: str, data: Optional[Dict[str, Any]] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'data': typing.Optional[typing.Dict[str, typing.Any]], 'message': <class 'str'>}
__dataclass_fields__ = {'data': Field(name='data',type=typing.Optional[typing.Di...appingproxy({}),kw_only=False,_field_type=_FIELD), 'message': Field(name='message',type=<class 'str'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('message', 'data')
data = None

 
class LLMChoice(builtins.object)
    LLMChoice(index: int, message: gen_ai_hub.orchestration.models.response.ResponseChatMessage, finish_reason: str, logprobs: Optional[Dict[str, float]] = None) -&gt; None
 
Represents an individual choice or response generated by the LLM.
 
Attributes:
    index: The index of this particular choice in the list of possible choices.
    message: The message object containing the role and content of the response.
    finish_reason: The reason why the model stopped generating tokens.
    logprobs: Optional dictionary containing token log probabilities.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, index: int, message: gen_ai_hub.orchestration.models.response.ResponseChatMessage, finish_reason: str, logprobs: Optional[Dict[str, float]] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'finish_reason': <class 'str'>, 'index': <class 'int'>, 'logprobs': typing.Optional[typing.Dict[str, float]], 'message': <class 'gen_ai_hub.orchestration.models.response.ResponseChatMessage'>}
__dataclass_fields__ = {'finish_reason': Field(name='finish_reason',type=<class 'str'>,de...appingproxy({}),kw_only=False,_field_type=_FIELD), 'index': Field(name='index',type=<class 'int'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'logprobs': Field(name='logprobs',type=typing.Optional[typin...appingproxy({}),kw_only=False,_field_type=_FIELD), 'message': Field(name='message',type=<class 'gen_ai_hub.orc...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('index', 'message', 'finish_reason', 'logprobs')
logprobs = None

 
class LLMChoiceStreaming(builtins.object)
    LLMChoiceStreaming(index: int, delta: gen_ai_hub.orchestration.models.response.ChatDelta, finish_reason: Optional[str] = None, logprobs: Optional[Dict[str, float]] = None) -&gt; None
 
Represents a streaming choice or partial response generated by the LLM.
 
Attributes:
    index: The index of this particular choice in the list of possible choices.
    delta: The partial update (ChatDelta) for this choice.
    finish_reason: Optional reason for why the generation stopped, may be None during streaming.
    logprobs: Optional dictionary containing token log probabilities.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, index: int, delta: gen_ai_hub.orchestration.models.response.ChatDelta, finish_reason: Optional[str] = None, logprobs: Optional[Dict[str, float]] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'delta': <class 'gen_ai_hub.orchestration.models.response.ChatDelta'>, 'finish_reason': typing.Optional[str], 'index': <class 'int'>, 'logprobs': typing.Optional[typing.Dict[str, float]]}
__dataclass_fields__ = {'delta': Field(name='delta',type=<class 'gen_ai_hub.orche...appingproxy({}),kw_only=False,_field_type=_FIELD), 'finish_reason': Field(name='finish_reason',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'index': Field(name='index',type=<class 'int'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'logprobs': Field(name='logprobs',type=typing.Optional[typin...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('index', 'delta', 'finish_reason', 'logprobs')
finish_reason = None
logprobs = None

 
class LLMResult(BaseLLMResult)
    LLMResult(id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoice], usage: gen_ai_hub.orchestration.models.response.LLMUsage, system_fingerprint: Optional[str] = None) -&gt; None
 
Represents the complete result from an LLM operation.
 
Attributes:
    id: The unique identifier for this LLM operation.
    object: The type of object returned (typically "chat.completion").
    created: The timestamp when this result was created.
    model: The name or identifier of the model used for generating the result.
    choices: A list of possible choices generated by the LLM.
    usage: The token usage statistics for this operation.
    system_fingerprint: An optional system fingerprint for tracking the model used.
 
 
Method resolution order:
LLMResult
BaseLLMResult
builtins.object

Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoice], usage: gen_ai_hub.orchestration.models.response.LLMUsage, system_fingerprint: Optional[str] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data and other attributes defined here:
__annotations__ = {'choices': typing.List[gen_ai_hub.orchestration.models.response.LLMChoice], 'system_fingerprint': typing.Optional[str], 'usage': <class 'gen_ai_hub.orchestration.models.response.LLMUsage'>}
__dataclass_fields__ = {'choices': Field(name='choices',type=typing.List[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'created': Field(name='created',type=<class 'int'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=<class 'str'>,default=<data...appingproxy({}),kw_only=False,_field_type=_FIELD), 'model': Field(name='model',type=<class 'str'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'object': Field(name='object',type=<class 'str'>,default=<...appingproxy({}),kw_only=False,_field_type=_FIELD), 'system_fingerprint': Field(name='system_fingerprint',type=typing.Opti...appingproxy({}),kw_only=False,_field_type=_FIELD), 'usage': Field(name='usage',type=<class 'gen_ai_hub.orche...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('id', 'object', 'created', 'model', 'choices', 'usage', 'system_fingerprint')
system_fingerprint = None

Data descriptors inherited from BaseLLMResult:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

 
class LLMResultStreaming(BaseLLMResult)
    LLMResultStreaming(id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming], system_fingerprint: Optional[str] = None) -&gt; None
 
Represents a streaming result from an LLM operation.
 
Attributes:
    id: The unique identifier for this LLM operation.
    object: The type of object returned (typically "chat.completion.chunk").
    created: The timestamp when this result was created.
    model: The name or identifier of the model used.
    choices: A list of streaming choices generated by the LLM.
    system_fingerprint: An optional system fingerprint for tracking the model used.
 
 
Method resolution order:
LLMResultStreaming
BaseLLMResult
builtins.object

Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, id: str, object: str, created: int, model: str, choices: List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming], system_fingerprint: Optional[str] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data and other attributes defined here:
__annotations__ = {'choices': typing.List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming], 'system_fingerprint': typing.Optional[str]}
__dataclass_fields__ = {'choices': Field(name='choices',type=typing.List[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'created': Field(name='created',type=<class 'int'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=<class 'str'>,default=<data...appingproxy({}),kw_only=False,_field_type=_FIELD), 'model': Field(name='model',type=<class 'str'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'object': Field(name='object',type=<class 'str'>,default=<...appingproxy({}),kw_only=False,_field_type=_FIELD), 'system_fingerprint': Field(name='system_fingerprint',type=typing.Opti...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('id', 'object', 'created', 'model', 'choices', 'system_fingerprint')
system_fingerprint = None

Data descriptors inherited from BaseLLMResult:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

 
class LLMUsage(builtins.object)
    LLMUsage(completion_tokens: int, prompt_tokens: int, total_tokens: int) -&gt; None
 
Represents the token usage statistics for an LLM (Large Language Model) operation.
 
Attributes:
    completion_tokens: The number of tokens generated by the model in the response.
    prompt_tokens: The number of tokens in the input prompt.
    total_tokens: The total number of tokens used, including both prompt and completion tokens.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, completion_tokens: int, prompt_tokens: int, total_tokens: int) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'completion_tokens': <class 'int'>, 'prompt_tokens': <class 'int'>, 'total_tokens': <class 'int'>}
__dataclass_fields__ = {'completion_tokens': Field(name='completion_tokens',type=<class 'int'...appingproxy({}),kw_only=False,_field_type=_FIELD), 'prompt_tokens': Field(name='prompt_tokens',type=<class 'int'>,de...appingproxy({}),kw_only=False,_field_type=_FIELD), 'total_tokens': Field(name='total_tokens',type=<class 'int'>,def...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('completion_tokens', 'prompt_tokens', 'total_tokens')

 
class MessageToolCall(builtins.object)
    MessageToolCall(id: str, type: Literal['function'], function: gen_ai_hub.orchestration.models.response.FunctionCall) -&gt; None
 
Represents a tool call within a message, specifically a function call.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, id: str, type: Literal['function'], function: gen_ai_hub.orchestration.models.response.FunctionCall) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).
to_dict(self)

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'function': <class 'gen_ai_hub.orchestration.models.response.FunctionCall'>, 'id': <class 'str'>, 'type': typing.Literal['function']}
__dataclass_fields__ = {'function': Field(name='function',type=<class 'gen_ai_hub.or...odel called.'}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=<class 'str'>,default=<data...e tool call.'}),kw_only=False,_field_type=_FIELD), 'type': Field(name='type',type=typing.Literal['function'...s supported.'}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('id', 'type', 'function')

 
class ModuleResults(BaseModuleResults)
    ModuleResults(input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResult] = None, templating: Optional[List[gen_ai_hub.orchestration.models.response.ResponseChatMessage]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoice]] = None) -&gt; None
 
Represents the results of various modules used in processing an orchestration request.
 
Attributes:
    templating: A list of messages that define the conversation's context or template.
    llm: The result from the LLM operation.
    input_filtering: The result of any input filtering, if applicable.
    output_filtering: The result of any output filtering, if applicable.
    input_masking: The result of input masking, if applicable.
    output_unmasking: The result of output unmasking, if applicable.
 
 
Method resolution order:
ModuleResults
BaseModuleResults
builtins.object

Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResult] = None, templating: Optional[List[gen_ai_hub.orchestration.models.response.ResponseChatMessage]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoice]] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data and other attributes defined here:
__annotations__ = {'llm': typing.Optional[gen_ai_hub.orchestration.models.response.LLMResult], 'output_unmasking': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.LLMChoice]], 'templating': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.ResponseChatMessage]]}
__dataclass_fields__ = {'grounding': Field(name='grounding',type=typing.Optional[gen_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_filtering': Field(name='input_filtering',type=typing.Optiona...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_masking': Field(name='input_masking',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_translation': Field(name='input_translation',type=typing.Optio...appingproxy({}),kw_only=False,_field_type=_FIELD), 'llm': Field(name='llm',type=typing.Optional[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_filtering': Field(name='output_filtering',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_translation': Field(name='output_translation',type=typing.Opti...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_unmasking': Field(name='output_unmasking',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'templating': Field(name='templating',type=typing.Optional[typ...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('input_filtering', 'output_filtering', 'input_masking', 'grounding', 'input_translation', 'output_translation', 'llm', 'templating', 'output_unmasking')
llm = None
output_unmasking = None
templating = None

Data descriptors inherited from BaseModuleResults:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes inherited from BaseModuleResults:
grounding = None
input_filtering = None
input_masking = None
input_translation = None
output_filtering = None
output_translation = None

 
class ModuleResultsStreaming(BaseModuleResults)
    ModuleResultsStreaming(input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResultStreaming] = None, templating: Optional[List[gen_ai_hub.orchestration.models.response.ChatDelta]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming]] = None) -&gt; None
 
Represents the streaming results of various modules used in processing an orchestration request.
 
Attributes:
    llm: The streaming result from the LLM operation.
    templating: A list of chat deltas that define the conversation's context or template.
    input_filtering: The result of any input filtering, if applicable.
    output_filtering: The result of any output filtering, if applicable.
    input_masking: The result of input masking, if applicable.
    output_unmasking: The result of output unmasking for streaming responses.
 
 
Method resolution order:
ModuleResultsStreaming
BaseModuleResults
builtins.object

Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, input_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_filtering: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_masking: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, grounding: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, input_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, output_translation: Optional[gen_ai_hub.orchestration.models.response.GenericModuleResult] = None, llm: Optional[gen_ai_hub.orchestration.models.response.LLMResultStreaming] = None, templating: Optional[List[gen_ai_hub.orchestration.models.response.ChatDelta]] = None, output_unmasking: Optional[List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming]] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data and other attributes defined here:
__annotations__ = {'llm': typing.Optional[gen_ai_hub.orchestration.models.response.LLMResultStreaming], 'output_unmasking': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.LLMChoiceStreaming]], 'templating': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.ChatDelta]]}
__dataclass_fields__ = {'grounding': Field(name='grounding',type=typing.Optional[gen_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_filtering': Field(name='input_filtering',type=typing.Optiona...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_masking': Field(name='input_masking',type=typing.Optional[...appingproxy({}),kw_only=False,_field_type=_FIELD), 'input_translation': Field(name='input_translation',type=typing.Optio...appingproxy({}),kw_only=False,_field_type=_FIELD), 'llm': Field(name='llm',type=typing.Optional[gen_ai_hub...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_filtering': Field(name='output_filtering',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_translation': Field(name='output_translation',type=typing.Opti...appingproxy({}),kw_only=False,_field_type=_FIELD), 'output_unmasking': Field(name='output_unmasking',type=typing.Option...appingproxy({}),kw_only=False,_field_type=_FIELD), 'templating': Field(name='templating',type=typing.Optional[typ...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('input_filtering', 'output_filtering', 'input_masking', 'grounding', 'input_translation', 'output_translation', 'llm', 'templating', 'output_unmasking')
llm = None
output_unmasking = None
templating = None

Data descriptors inherited from BaseModuleResults:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes inherited from BaseModuleResults:
grounding = None
input_filtering = None
input_masking = None
input_translation = None
output_filtering = None
output_translation = None

 
class OrchestrationResponse(builtins.object)
    OrchestrationResponse(request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResults, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResult) -&gt; None
 
Represents the complete response from an orchestration process.
 
Attributes:
    request_id: The unique identifier for the request being processed.
    module_results: The results from the various modules involved in processing the request.
    orchestration_result: The final result from the orchestration, typically mirroring the LLM result.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResults, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResult) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'module_results': <class 'gen_ai_hub.orchestration.models.response.ModuleResults'>, 'orchestration_result': <class 'gen_ai_hub.orchestration.models.response.LLMResult'>, 'request_id': <class 'str'>}
__dataclass_fields__ = {'module_results': Field(name='module_results',type=<class 'gen_ai_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'orchestration_result': Field(name='orchestration_result',type=<class 'g...appingproxy({}),kw_only=False,_field_type=_FIELD), 'request_id': Field(name='request_id',type=<class 'str'>,defau...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('request_id', 'module_results', 'orchestration_result')

 
class OrchestrationResponseStreaming(builtins.object)
    OrchestrationResponseStreaming(request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResultsStreaming, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResultStreaming) -&gt; None
 
Represents the streaming response from an orchestration process.
 
Attributes:
    request_id: The unique identifier for the request being processed.
    module_results: The streaming results from the various modules involved in processing the request.
    orchestration_result: The streaming result from the orchestration.
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, request_id: str, module_results: gen_ai_hub.orchestration.models.response.ModuleResultsStreaming, orchestration_result: gen_ai_hub.orchestration.models.response.LLMResultStreaming) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'module_results': <class 'gen_ai_hub.orchestration.models.response.ModuleResultsStreaming'>, 'orchestration_result': <class 'gen_ai_hub.orchestration.models.response.LLMResultStreaming'>, 'request_id': <class 'str'>}
__dataclass_fields__ = {'module_results': Field(name='module_results',type=<class 'gen_ai_...appingproxy({}),kw_only=False,_field_type=_FIELD), 'orchestration_result': Field(name='orchestration_result',type=<class 'g...appingproxy({}),kw_only=False,_field_type=_FIELD), 'request_id': Field(name='request_id',type=<class 'str'>,defau...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('request_id', 'module_results', 'orchestration_result')

 
class ResponseChatMessage(builtins.object)
    ResponseChatMessage(role: str, content: str, refusal: Optional[str] = None, tool_calls: Optional[List[gen_ai_hub.orchestration.models.response.MessageToolCall]] = None) -&gt; None
 
ResponseChatMessage(role: str, content: str, refusal: Optional[str] = None, tool_calls: Optional[List[gen_ai_hub.orchestration.models.response.MessageToolCall]] = None)
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, role: str, content: str, refusal: Optional[str] = None, tool_calls: Optional[List[gen_ai_hub.orchestration.models.response.MessageToolCall]] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).
to_dict(self)

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'content': <class 'str'>, 'refusal': typing.Optional[str], 'role': <class 'str'>, 'tool_calls': typing.Optional[typing.List[gen_ai_hub.orchestration.models.response.MessageToolCall]]}
__dataclass_fields__ = {'content': Field(name='content',type=<class 'str'>,default=...appingproxy({}),kw_only=False,_field_type=_FIELD), 'refusal': Field(name='refusal',type=typing.Optional[str],d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'role': Field(name='role',type=<class 'str'>,default=<da...appingproxy({}),kw_only=False,_field_type=_FIELD), 'tool_calls': Field(name='tool_calls',type=typing.Optional[typ...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('role', 'content', 'refusal', 'tool_calls')
refusal = None
tool_calls = None

 
class ToolCallChunk(builtins.object)
    ToolCallChunk(index: int, id: Optional[str] = None, type: Optional[str] = None, function: Optional[gen_ai_hub.orchestration.models.response.FunctionCall] = None) -&gt; None
 
ToolCallChunk(index: int, id: Optional[str] = None, type: Optional[str] = None, function: Optional[gen_ai_hub.orchestration.models.response.FunctionCall] = None)
 
  Methods defined here:
__eq__(self, other)
Return self==value.
__init__(self, index: int, id: Optional[str] = None, type: Optional[str] = None, function: Optional[gen_ai_hub.orchestration.models.response.FunctionCall] = None) -> None
Initialize self.  See help(type(self)) for accurate signature.
__repr__(self)
Return repr(self).

Data descriptors defined here:
__dict__
dictionary for instance variables (if defined)
__weakref__
list of weak references to the object (if defined)

Data and other attributes defined here:
__annotations__ = {'function': typing.Optional[gen_ai_hub.orchestration.models.response.FunctionCall], 'id': typing.Optional[str], 'index': <class 'int'>, 'type': typing.Optional[str]}
__dataclass_fields__ = {'function': Field(name='function',type=typing.Optional[gen_a...appingproxy({}),kw_only=False,_field_type=_FIELD), 'id': Field(name='id',type=typing.Optional[str],defaul...appingproxy({}),kw_only=False,_field_type=_FIELD), 'index': Field(name='index',type=<class 'int'>,default=<d...appingproxy({}),kw_only=False,_field_type=_FIELD), 'type': Field(name='type',type=typing.Optional[str],defa...appingproxy({}),kw_only=False,_field_type=_FIELD)}
__dataclass_params__ = _DataclassParams(init=True,repr=True,eq=True,order=False,unsafe_hash=False,frozen=False)
__hash__ = None
__match_args__ = ('index', 'id', 'type', 'function')
function = None
id = None
type = None

 
Data
        Any = typing.Any
Dict = typing.Dict
List = typing.List
Literal = typing.Literal
Optional = typing.Optional