Close httplib2 connections.
create(parent, body=None, x__xgafv=None)
Creates a version for the specified Playbook.
Deletes the specified version of the Playbook.
Retrieves the specified version of the Playbook.
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
Lists versions for the specified Playbook.
Retrieves the next page of results.
restore(name, body=None, x__xgafv=None)
Retrieves the specified version of the Playbook and stores it as the current playbook draft, returning the playbook with resources updated.
close()
Close httplib2 connections.
create(parent, body=None, x__xgafv=None)
Creates a version for the specified Playbook. Args: parent: string, Required. The playbook to create a version for. Format: `projects//locations//agents//playbooks/`. (required) body: object, The request body. The object takes the form of: { # Playbook version is a snapshot of the playbook at certain timestamp. "description": "A String", # Optional. The description of the playbook version. "examples": [ # Output only. Snapshot of the examples belonging to the playbook when the playbook version is created. { # Example represents a sample execution of the playbook in the conversation. An example consists of a list of ordered actions performed by end user or Dialogflow agent according the playbook instructions to fulfill the task. "actions": [ # Required. The ordered list of actions performed by the end user and the Dialogflow agent. { # Action performed by end user or Dialogflow agent in the conversation. "agentUtterance": { # AgentUtterance represents one message sent by the agent. # Optional. Action performed by the agent as a message. "requireGeneration": True or False, # Optional. True if the agent utterance needs to be generated by the LLM. Only used in webhook response to differentiate from empty text. Revisit whether we need this field or mark `text` as optional when we expose webhook interface to customer. "text": "A String", # Required. Message content in text. }, "completeTime": "A String", # Output only. Timestamp of the completion of the agent action. "displayName": "A String", # Output only. The display name of the action. "event": { # Event represents the event sent by the customer. # Optional. The agent received an event from the customer or a system event is emitted. "event": "A String", # Required. Name of the event. "text": "A String", # Optional. Unstructured text payload of the event. }, "flowInvocation": { # Stores metadata of the invocation of a child CX flow. Flow invocation actions enter the child flow. # Optional. Action performed on behalf of the agent by invoking a CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "flowState": "A String", # Required. Flow invocation's output state. "inputActionParameters": { # Optional. A list of input parameters for the flow. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the flow invocation. "a_key": "", # Properties of the object. }, }, "flowStateUpdate": { # Stores metadata of the state update action, such as a state machine execution in flows. # Optional. Output only. The state machine update in flows. "destination": "A String", # The destination of the transition. Format: `projects//locations//agents//flows//pages/` or `projects//locations//agents//playbooks/`. "eventType": "A String", # The type of the event that triggered the state update. "functionCall": { # Stores the metadata of a function call to execute. # The function call to execute. "name": "A String", # The name of the function call. }, "pageState": { # Stores the state of a page and its flow. # The updated page and flow state. "displayName": "A String", # The display name of the page. "page": "A String", # The ID of the page. Format: `projects//locations//agents//flows//pages/`. "status": "A String", # The status of the page. }, "updatedParameters": { # The updated parameters. "a_key": "", # Properties of the object. }, }, "flowTransition": { # Stores metadata of the transition to a target CX flow. Flow transition actions exit the caller playbook and enter the child flow. # Optional. Action performed on behalf of the agent by transitioning to a target CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, }, "intentMatch": { # Stores metadata of the intent match action. # Optional. Output only. Intent Match in flows. "matchedIntents": [ # The matched intent. { # Stores the matched intent, which is the result of the intent match action. "displayName": "A String", # The display name of the matched intent. "generativeFallback": { # The generative fallback response of the matched intent. "a_key": "", # Properties of the object. }, "intentId": "A String", # The ID of the matched intent. "score": 3.14, # The score of the matched intent. }, ], }, "llmCall": { # Stores metadata of the call of an LLM. # Optional. Output only. LLM call performed by the agent. "model": "A String", # The model of the LLM call. "retrievedExamples": [ # A list of relevant examples used for the LLM prompt. { # Relevant example used for the LLM prompt. "exampleDisplayName": "A String", # The display name of the example. "exampleId": "A String", # The id of the example. "matchedRetrievalLabel": "A String", # Optional. The matched retrieval label of this LLM call. "retrievalStrategy": "A String", # Retrieval strategy of the example. }, ], "temperature": 3.14, # The temperature of the LLM call. "tokenCount": { # Stores token counts of the LLM call. # The token counts of the LLM call. "conversationContextTokenCount": "A String", # The number of tokens used for the conversation history in the prompt. "exampleTokenCount": "A String", # The number of tokens used for the retrieved examples in the prompt. "totalInputTokenCount": "A String", # The total number of tokens used for the input to the LLM call. "totalOutputTokenCount": "A String", # The total number of tokens used for the output of the LLM call. }, }, "playbookInvocation": { # Stores metadata of the invocation of a child playbook. Playbook invocation actions enter the child playbook. # Optional. Action performed on behalf of the agent by invoking a child playbook. "displayName": "A String", # Output only. The display name of the playbook. "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "playbookInput": { # Input of the playbook. # Optional. Input of the child playbook invocation. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. Output of the child playbook invocation. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "playbookState": "A String", # Required. Playbook invocation's output state. }, "playbookTransition": { # Stores metadata of the transition to another target playbook. Playbook transition actions exit the caller playbook and enter the target playbook. # Optional. Action performed on behalf of the agent by transitioning to a target playbook. "displayName": "A String", # Output only. The display name of the playbook. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. }, "startTime": "A String", # Output only. Timestamp of the start of the agent action. "status": { # The status of the action. # Optional. Output only. The status of the action. "exception": { # Exception thrown during the execution of an action. # Optional. The exception thrown during the execution of the action. "errorMessage": "A String", # Optional. The error message. }, }, "stt": { # Stores metadata of the Speech-to-Text action. # Optional. Speech-to-text action performed by the agent. }, "subExecutionSteps": [ # Optional. The detailed tracing information for sub execution steps of the action. { # A span represents a sub execution step of an action. "completeTime": "A String", # Timestamp of the completion of the span. "metrics": [ # The unordered collection of metrics in this span. { # A named metric is a metric with name, value and unit. "name": "A String", # The name of the metric. "unit": "A String", # The unit in which this metric is reported. Follows [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard. "value": "", # The value of the metric. }, ], "name": "A String", # The name of the span. "startTime": "A String", # Timestamp of the start of the span. "tags": [ # The metadata tags of the span such as span type. "A String", ], }, ], "toolUse": { # Stores metadata of the invocation of an action supported by a tool. # Optional. Action performed on behalf of the agent by calling a plugin tool. "action": "A String", # Optional. Name of the action to be called during the tool use. "dataStoreToolTrace": { # The tracing information for the data store tool. # Optional. Data store tool trace. "dataStoreConnectionSignals": { # Data store connection feature output signals. Might be only partially field if processing stop before the final answer. Reasons for this can be, but are not limited to: empty UCS search results, positive RAI check outcome, grounding failure, ... # Optional. Data store connection feature output signals. "answer": "A String", # Optional. The final compiled answer. "answerGenerationModelCallSignals": { # Diagnostic info related to the answer generation model call. # Optional. Diagnostic info related to the answer generation model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "answerParts": [ # Optional. Answer parts with relevant citations. Concatenation of texts should add up the `answer` (not counting whitespaces). { # Answer part with citation. "supportingIndices": [ # Citations for this answer part. Indices of `search_snippets`. 42, ], "text": "A String", # Substring of the answer. }, ], "citedSnippets": [ # Optional. Snippets cited by the answer generation model from the most to least relevant. { # Snippet cited by the answer generation model. "searchSnippet": { # Search snippet details. # Details of the snippet. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, "snippetIndex": 42, # Index of the snippet in `search_snippets` field. }, ], "groundingSignals": { # Grounding signals. # Optional. Grounding signals. "decision": "A String", # Represents the decision of the grounding check. "score": "A String", # Grounding score bucket setting. }, "rewriterModelCallSignals": { # Diagnostic info related to the rewriter model call. # Optional. Diagnostic info related to the rewriter model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "rewrittenQuery": "A String", # Optional. Rewritten string query used for search. "safetySignals": { # Safety check results. # Optional. Safety check result. "bannedPhraseMatch": "A String", # Specifies banned phrase match subject. "decision": "A String", # Safety decision. "matchedBannedPhrase": "A String", # The matched banned phrase if there was a match. }, "searchSnippets": [ # Optional. Search snippets included in the answer generation prompt. { # Search snippet details. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, ], }, }, "displayName": "A String", # Output only. The display name of the tool. "inputActionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the action. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool that should be used. Format: `projects//locations//agents//tools/`. "webhookToolTrace": { # The tracing information for the webhook tool. # Optional. Webhook tool trace. "webhookTag": "A String", # Optional. The tag of the webhook. "webhookUri": "A String", # Optional. The url of the webhook. }, }, "tts": { # Stores metadata of the Text-to-Speech action. # Optional. Text-to-speech action performed by the agent. }, "userUtterance": { # UserUtterance represents one message sent by the customer. # Optional. Agent obtained a message from the customer. "audio": "A String", # Optional. Audio input. "audioTokens": [ # Optional. Tokens of the audio input. 42, ], "text": "A String", # Required. Message content in text. }, }, ], "conversationState": "A String", # Required. Example's output state. "createTime": "A String", # Output only. The timestamp of initial example creation. "description": "A String", # Optional. The high level concise description of the example. The max number of characters is 200. "displayName": "A String", # Required. The display name of the example. "languageCode": "A String", # Optional. The language code of the example. If not specified, the agent's default language is used. Note: languages must be enabled in the agent before they can be used. Note: example's language code is not currently used in dialogflow agents. "name": "A String", # The unique identifier of the playbook example. Format: `projects//locations//agents//playbooks//examples/`. "playbookInput": { # Input of the playbook. # Optional. The input to the playbook in the example. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. The output of the playbook in the example. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "tokenCount": "A String", # Output only. Estimated number of tokes current example takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the example was updated. }, ], "name": "A String", # The unique identifier of the playbook version. Format: `projects//locations//agents//playbooks//versions/`. "playbook": { # Playbook is the basic building block to instruct the LLM how to execute a certain task. A playbook consists of a goal to accomplish, an optional list of step by step instructions (the step instruction may refers to name of the custom or default plugin tools to use) to perform the task, a list of contextual input data to be passed in at the beginning of the invoked, and a list of output parameters to store the playbook result. # Output only. Snapshot of the playbook when the playbook version is created. "createTime": "A String", # Output only. The timestamp of initial playbook creation. "displayName": "A String", # Required. The human-readable name of the playbook, unique within an agent. "goal": "A String", # Required. High level description of the goal the playbook intend to accomplish. A goal should be concise since it's visible to other playbooks that may reference this playbook. "handlers": [ # Optional. A list of registered handlers to execute based on the specified triggers. { # Handler can be used to define custom logic to be executed based on the user-specified triggers. "eventHandler": { # A handler that is triggered by the specified event. # A handler triggered by event. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "event": "A String", # Required. The name of the event that triggers this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when the event occurs. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, }, "lifecycleHandler": { # A handler that is triggered on the specific lifecycle_stage of the playbook execution. # A handler triggered during specific lifecycle of the playbook execution. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when this handler is triggered. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, "lifecycleStage": "A String", # Required. The name of the lifecycle stage that triggers this handler. Supported values: * `playbook-start` * `pre-action-selection` * `pre-action-execution` }, }, ], "inputParameterDefinitions": [ # Optional. Defined structured input parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "instruction": { # Message of the Instruction of the playbook. # Instruction to accomplish target goal. "guidelines": "A String", # General guidelines for the playbook. These are unstructured instructions that are not directly part of the goal, e.g. "Always be polite". It's valid for this text to be long and used instead of steps altogether. "steps": [ # Ordered list of step by step execution instructions to accomplish target goal. { # Message of single step execution. "steps": [ # Sub-processing needed to execute the current step. # Object with schema name: GoogleCloudDialogflowCxV3beta1PlaybookStep ], "text": "A String", # Step instruction in text format. }, ], }, "llmModelSettings": { # Settings for LLM models. # Optional. Llm model settings for the playbook. "model": "A String", # The selected LLM model. "promptText": "A String", # The custom prompt to use. }, "name": "A String", # The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "outputParameterDefinitions": [ # Optional. Defined structured output parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "playbookType": "A String", # Optional. Type of the playbook. "referencedFlows": [ # Output only. The resource name of flows referenced by the current playbook in the instructions. "A String", ], "referencedPlaybooks": [ # Output only. The resource name of other playbooks referenced by the current playbook in the instructions. "A String", ], "referencedTools": [ # Optional. The resource name of tools referenced by the current playbook in the instructions. If not provided explicitly, they are will be implied using the tool being referenced in goal and steps. "A String", ], "speechSettings": { # Define behaviors of speech to text detection. # Optional. Playbook level Settings for speech to text detection. "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, "tokenCount": "A String", # Output only. Estimated number of tokes current playbook takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the playbook version was updated. }, "updateTime": "A String", # Output only. Last time the playbook version was created or modified. } x__xgafv: string, V1 error format. Allowed values 1 - v1 error format 2 - v2 error format Returns: An object of the form: { # Playbook version is a snapshot of the playbook at certain timestamp. "description": "A String", # Optional. The description of the playbook version. "examples": [ # Output only. Snapshot of the examples belonging to the playbook when the playbook version is created. { # Example represents a sample execution of the playbook in the conversation. An example consists of a list of ordered actions performed by end user or Dialogflow agent according the playbook instructions to fulfill the task. "actions": [ # Required. The ordered list of actions performed by the end user and the Dialogflow agent. { # Action performed by end user or Dialogflow agent in the conversation. "agentUtterance": { # AgentUtterance represents one message sent by the agent. # Optional. Action performed by the agent as a message. "requireGeneration": True or False, # Optional. True if the agent utterance needs to be generated by the LLM. Only used in webhook response to differentiate from empty text. Revisit whether we need this field or mark `text` as optional when we expose webhook interface to customer. "text": "A String", # Required. Message content in text. }, "completeTime": "A String", # Output only. Timestamp of the completion of the agent action. "displayName": "A String", # Output only. The display name of the action. "event": { # Event represents the event sent by the customer. # Optional. The agent received an event from the customer or a system event is emitted. "event": "A String", # Required. Name of the event. "text": "A String", # Optional. Unstructured text payload of the event. }, "flowInvocation": { # Stores metadata of the invocation of a child CX flow. Flow invocation actions enter the child flow. # Optional. Action performed on behalf of the agent by invoking a CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "flowState": "A String", # Required. Flow invocation's output state. "inputActionParameters": { # Optional. A list of input parameters for the flow. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the flow invocation. "a_key": "", # Properties of the object. }, }, "flowStateUpdate": { # Stores metadata of the state update action, such as a state machine execution in flows. # Optional. Output only. The state machine update in flows. "destination": "A String", # The destination of the transition. Format: `projects//locations//agents//flows//pages/` or `projects//locations//agents//playbooks/`. "eventType": "A String", # The type of the event that triggered the state update. "functionCall": { # Stores the metadata of a function call to execute. # The function call to execute. "name": "A String", # The name of the function call. }, "pageState": { # Stores the state of a page and its flow. # The updated page and flow state. "displayName": "A String", # The display name of the page. "page": "A String", # The ID of the page. Format: `projects//locations//agents//flows//pages/`. "status": "A String", # The status of the page. }, "updatedParameters": { # The updated parameters. "a_key": "", # Properties of the object. }, }, "flowTransition": { # Stores metadata of the transition to a target CX flow. Flow transition actions exit the caller playbook and enter the child flow. # Optional. Action performed on behalf of the agent by transitioning to a target CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, }, "intentMatch": { # Stores metadata of the intent match action. # Optional. Output only. Intent Match in flows. "matchedIntents": [ # The matched intent. { # Stores the matched intent, which is the result of the intent match action. "displayName": "A String", # The display name of the matched intent. "generativeFallback": { # The generative fallback response of the matched intent. "a_key": "", # Properties of the object. }, "intentId": "A String", # The ID of the matched intent. "score": 3.14, # The score of the matched intent. }, ], }, "llmCall": { # Stores metadata of the call of an LLM. # Optional. Output only. LLM call performed by the agent. "model": "A String", # The model of the LLM call. "retrievedExamples": [ # A list of relevant examples used for the LLM prompt. { # Relevant example used for the LLM prompt. "exampleDisplayName": "A String", # The display name of the example. "exampleId": "A String", # The id of the example. "matchedRetrievalLabel": "A String", # Optional. The matched retrieval label of this LLM call. "retrievalStrategy": "A String", # Retrieval strategy of the example. }, ], "temperature": 3.14, # The temperature of the LLM call. "tokenCount": { # Stores token counts of the LLM call. # The token counts of the LLM call. "conversationContextTokenCount": "A String", # The number of tokens used for the conversation history in the prompt. "exampleTokenCount": "A String", # The number of tokens used for the retrieved examples in the prompt. "totalInputTokenCount": "A String", # The total number of tokens used for the input to the LLM call. "totalOutputTokenCount": "A String", # The total number of tokens used for the output of the LLM call. }, }, "playbookInvocation": { # Stores metadata of the invocation of a child playbook. Playbook invocation actions enter the child playbook. # Optional. Action performed on behalf of the agent by invoking a child playbook. "displayName": "A String", # Output only. The display name of the playbook. "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "playbookInput": { # Input of the playbook. # Optional. Input of the child playbook invocation. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. Output of the child playbook invocation. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "playbookState": "A String", # Required. Playbook invocation's output state. }, "playbookTransition": { # Stores metadata of the transition to another target playbook. Playbook transition actions exit the caller playbook and enter the target playbook. # Optional. Action performed on behalf of the agent by transitioning to a target playbook. "displayName": "A String", # Output only. The display name of the playbook. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. }, "startTime": "A String", # Output only. Timestamp of the start of the agent action. "status": { # The status of the action. # Optional. Output only. The status of the action. "exception": { # Exception thrown during the execution of an action. # Optional. The exception thrown during the execution of the action. "errorMessage": "A String", # Optional. The error message. }, }, "stt": { # Stores metadata of the Speech-to-Text action. # Optional. Speech-to-text action performed by the agent. }, "subExecutionSteps": [ # Optional. The detailed tracing information for sub execution steps of the action. { # A span represents a sub execution step of an action. "completeTime": "A String", # Timestamp of the completion of the span. "metrics": [ # The unordered collection of metrics in this span. { # A named metric is a metric with name, value and unit. "name": "A String", # The name of the metric. "unit": "A String", # The unit in which this metric is reported. Follows [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard. "value": "", # The value of the metric. }, ], "name": "A String", # The name of the span. "startTime": "A String", # Timestamp of the start of the span. "tags": [ # The metadata tags of the span such as span type. "A String", ], }, ], "toolUse": { # Stores metadata of the invocation of an action supported by a tool. # Optional. Action performed on behalf of the agent by calling a plugin tool. "action": "A String", # Optional. Name of the action to be called during the tool use. "dataStoreToolTrace": { # The tracing information for the data store tool. # Optional. Data store tool trace. "dataStoreConnectionSignals": { # Data store connection feature output signals. Might be only partially field if processing stop before the final answer. Reasons for this can be, but are not limited to: empty UCS search results, positive RAI check outcome, grounding failure, ... # Optional. Data store connection feature output signals. "answer": "A String", # Optional. The final compiled answer. "answerGenerationModelCallSignals": { # Diagnostic info related to the answer generation model call. # Optional. Diagnostic info related to the answer generation model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "answerParts": [ # Optional. Answer parts with relevant citations. Concatenation of texts should add up the `answer` (not counting whitespaces). { # Answer part with citation. "supportingIndices": [ # Citations for this answer part. Indices of `search_snippets`. 42, ], "text": "A String", # Substring of the answer. }, ], "citedSnippets": [ # Optional. Snippets cited by the answer generation model from the most to least relevant. { # Snippet cited by the answer generation model. "searchSnippet": { # Search snippet details. # Details of the snippet. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, "snippetIndex": 42, # Index of the snippet in `search_snippets` field. }, ], "groundingSignals": { # Grounding signals. # Optional. Grounding signals. "decision": "A String", # Represents the decision of the grounding check. "score": "A String", # Grounding score bucket setting. }, "rewriterModelCallSignals": { # Diagnostic info related to the rewriter model call. # Optional. Diagnostic info related to the rewriter model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "rewrittenQuery": "A String", # Optional. Rewritten string query used for search. "safetySignals": { # Safety check results. # Optional. Safety check result. "bannedPhraseMatch": "A String", # Specifies banned phrase match subject. "decision": "A String", # Safety decision. "matchedBannedPhrase": "A String", # The matched banned phrase if there was a match. }, "searchSnippets": [ # Optional. Search snippets included in the answer generation prompt. { # Search snippet details. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, ], }, }, "displayName": "A String", # Output only. The display name of the tool. "inputActionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the action. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool that should be used. Format: `projects//locations//agents//tools/`. "webhookToolTrace": { # The tracing information for the webhook tool. # Optional. Webhook tool trace. "webhookTag": "A String", # Optional. The tag of the webhook. "webhookUri": "A String", # Optional. The url of the webhook. }, }, "tts": { # Stores metadata of the Text-to-Speech action. # Optional. Text-to-speech action performed by the agent. }, "userUtterance": { # UserUtterance represents one message sent by the customer. # Optional. Agent obtained a message from the customer. "audio": "A String", # Optional. Audio input. "audioTokens": [ # Optional. Tokens of the audio input. 42, ], "text": "A String", # Required. Message content in text. }, }, ], "conversationState": "A String", # Required. Example's output state. "createTime": "A String", # Output only. The timestamp of initial example creation. "description": "A String", # Optional. The high level concise description of the example. The max number of characters is 200. "displayName": "A String", # Required. The display name of the example. "languageCode": "A String", # Optional. The language code of the example. If not specified, the agent's default language is used. Note: languages must be enabled in the agent before they can be used. Note: example's language code is not currently used in dialogflow agents. "name": "A String", # The unique identifier of the playbook example. Format: `projects//locations//agents//playbooks//examples/`. "playbookInput": { # Input of the playbook. # Optional. The input to the playbook in the example. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. The output of the playbook in the example. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "tokenCount": "A String", # Output only. Estimated number of tokes current example takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the example was updated. }, ], "name": "A String", # The unique identifier of the playbook version. Format: `projects//locations//agents//playbooks//versions/`. "playbook": { # Playbook is the basic building block to instruct the LLM how to execute a certain task. A playbook consists of a goal to accomplish, an optional list of step by step instructions (the step instruction may refers to name of the custom or default plugin tools to use) to perform the task, a list of contextual input data to be passed in at the beginning of the invoked, and a list of output parameters to store the playbook result. # Output only. Snapshot of the playbook when the playbook version is created. "createTime": "A String", # Output only. The timestamp of initial playbook creation. "displayName": "A String", # Required. The human-readable name of the playbook, unique within an agent. "goal": "A String", # Required. High level description of the goal the playbook intend to accomplish. A goal should be concise since it's visible to other playbooks that may reference this playbook. "handlers": [ # Optional. A list of registered handlers to execute based on the specified triggers. { # Handler can be used to define custom logic to be executed based on the user-specified triggers. "eventHandler": { # A handler that is triggered by the specified event. # A handler triggered by event. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "event": "A String", # Required. The name of the event that triggers this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when the event occurs. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, }, "lifecycleHandler": { # A handler that is triggered on the specific lifecycle_stage of the playbook execution. # A handler triggered during specific lifecycle of the playbook execution. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when this handler is triggered. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, "lifecycleStage": "A String", # Required. The name of the lifecycle stage that triggers this handler. Supported values: * `playbook-start` * `pre-action-selection` * `pre-action-execution` }, }, ], "inputParameterDefinitions": [ # Optional. Defined structured input parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "instruction": { # Message of the Instruction of the playbook. # Instruction to accomplish target goal. "guidelines": "A String", # General guidelines for the playbook. These are unstructured instructions that are not directly part of the goal, e.g. "Always be polite". It's valid for this text to be long and used instead of steps altogether. "steps": [ # Ordered list of step by step execution instructions to accomplish target goal. { # Message of single step execution. "steps": [ # Sub-processing needed to execute the current step. # Object with schema name: GoogleCloudDialogflowCxV3beta1PlaybookStep ], "text": "A String", # Step instruction in text format. }, ], }, "llmModelSettings": { # Settings for LLM models. # Optional. Llm model settings for the playbook. "model": "A String", # The selected LLM model. "promptText": "A String", # The custom prompt to use. }, "name": "A String", # The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "outputParameterDefinitions": [ # Optional. Defined structured output parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "playbookType": "A String", # Optional. Type of the playbook. "referencedFlows": [ # Output only. The resource name of flows referenced by the current playbook in the instructions. "A String", ], "referencedPlaybooks": [ # Output only. The resource name of other playbooks referenced by the current playbook in the instructions. "A String", ], "referencedTools": [ # Optional. The resource name of tools referenced by the current playbook in the instructions. If not provided explicitly, they are will be implied using the tool being referenced in goal and steps. "A String", ], "speechSettings": { # Define behaviors of speech to text detection. # Optional. Playbook level Settings for speech to text detection. "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, "tokenCount": "A String", # Output only. Estimated number of tokes current playbook takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the playbook version was updated. }, "updateTime": "A String", # Output only. Last time the playbook version was created or modified. }
delete(name, x__xgafv=None)
Deletes the specified version of the Playbook. Args: name: string, Required. The name of the playbook version to delete. Format: `projects//locations//agents//playbooks//versions/`. (required) x__xgafv: string, V1 error format. Allowed values 1 - v1 error format 2 - v2 error format Returns: An object of the form: { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } }
get(name, x__xgafv=None)
Retrieves the specified version of the Playbook. Args: name: string, Required. The name of the playbook version. Format: `projects//locations//agents//playbooks//versions/`. (required) x__xgafv: string, V1 error format. Allowed values 1 - v1 error format 2 - v2 error format Returns: An object of the form: { # Playbook version is a snapshot of the playbook at certain timestamp. "description": "A String", # Optional. The description of the playbook version. "examples": [ # Output only. Snapshot of the examples belonging to the playbook when the playbook version is created. { # Example represents a sample execution of the playbook in the conversation. An example consists of a list of ordered actions performed by end user or Dialogflow agent according the playbook instructions to fulfill the task. "actions": [ # Required. The ordered list of actions performed by the end user and the Dialogflow agent. { # Action performed by end user or Dialogflow agent in the conversation. "agentUtterance": { # AgentUtterance represents one message sent by the agent. # Optional. Action performed by the agent as a message. "requireGeneration": True or False, # Optional. True if the agent utterance needs to be generated by the LLM. Only used in webhook response to differentiate from empty text. Revisit whether we need this field or mark `text` as optional when we expose webhook interface to customer. "text": "A String", # Required. Message content in text. }, "completeTime": "A String", # Output only. Timestamp of the completion of the agent action. "displayName": "A String", # Output only. The display name of the action. "event": { # Event represents the event sent by the customer. # Optional. The agent received an event from the customer or a system event is emitted. "event": "A String", # Required. Name of the event. "text": "A String", # Optional. Unstructured text payload of the event. }, "flowInvocation": { # Stores metadata of the invocation of a child CX flow. Flow invocation actions enter the child flow. # Optional. Action performed on behalf of the agent by invoking a CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "flowState": "A String", # Required. Flow invocation's output state. "inputActionParameters": { # Optional. A list of input parameters for the flow. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the flow invocation. "a_key": "", # Properties of the object. }, }, "flowStateUpdate": { # Stores metadata of the state update action, such as a state machine execution in flows. # Optional. Output only. The state machine update in flows. "destination": "A String", # The destination of the transition. Format: `projects//locations//agents//flows//pages/` or `projects//locations//agents//playbooks/`. "eventType": "A String", # The type of the event that triggered the state update. "functionCall": { # Stores the metadata of a function call to execute. # The function call to execute. "name": "A String", # The name of the function call. }, "pageState": { # Stores the state of a page and its flow. # The updated page and flow state. "displayName": "A String", # The display name of the page. "page": "A String", # The ID of the page. Format: `projects//locations//agents//flows//pages/`. "status": "A String", # The status of the page. }, "updatedParameters": { # The updated parameters. "a_key": "", # Properties of the object. }, }, "flowTransition": { # Stores metadata of the transition to a target CX flow. Flow transition actions exit the caller playbook and enter the child flow. # Optional. Action performed on behalf of the agent by transitioning to a target CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, }, "intentMatch": { # Stores metadata of the intent match action. # Optional. Output only. Intent Match in flows. "matchedIntents": [ # The matched intent. { # Stores the matched intent, which is the result of the intent match action. "displayName": "A String", # The display name of the matched intent. "generativeFallback": { # The generative fallback response of the matched intent. "a_key": "", # Properties of the object. }, "intentId": "A String", # The ID of the matched intent. "score": 3.14, # The score of the matched intent. }, ], }, "llmCall": { # Stores metadata of the call of an LLM. # Optional. Output only. LLM call performed by the agent. "model": "A String", # The model of the LLM call. "retrievedExamples": [ # A list of relevant examples used for the LLM prompt. { # Relevant example used for the LLM prompt. "exampleDisplayName": "A String", # The display name of the example. "exampleId": "A String", # The id of the example. "matchedRetrievalLabel": "A String", # Optional. The matched retrieval label of this LLM call. "retrievalStrategy": "A String", # Retrieval strategy of the example. }, ], "temperature": 3.14, # The temperature of the LLM call. "tokenCount": { # Stores token counts of the LLM call. # The token counts of the LLM call. "conversationContextTokenCount": "A String", # The number of tokens used for the conversation history in the prompt. "exampleTokenCount": "A String", # The number of tokens used for the retrieved examples in the prompt. "totalInputTokenCount": "A String", # The total number of tokens used for the input to the LLM call. "totalOutputTokenCount": "A String", # The total number of tokens used for the output of the LLM call. }, }, "playbookInvocation": { # Stores metadata of the invocation of a child playbook. Playbook invocation actions enter the child playbook. # Optional. Action performed on behalf of the agent by invoking a child playbook. "displayName": "A String", # Output only. The display name of the playbook. "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "playbookInput": { # Input of the playbook. # Optional. Input of the child playbook invocation. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. Output of the child playbook invocation. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "playbookState": "A String", # Required. Playbook invocation's output state. }, "playbookTransition": { # Stores metadata of the transition to another target playbook. Playbook transition actions exit the caller playbook and enter the target playbook. # Optional. Action performed on behalf of the agent by transitioning to a target playbook. "displayName": "A String", # Output only. The display name of the playbook. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. }, "startTime": "A String", # Output only. Timestamp of the start of the agent action. "status": { # The status of the action. # Optional. Output only. The status of the action. "exception": { # Exception thrown during the execution of an action. # Optional. The exception thrown during the execution of the action. "errorMessage": "A String", # Optional. The error message. }, }, "stt": { # Stores metadata of the Speech-to-Text action. # Optional. Speech-to-text action performed by the agent. }, "subExecutionSteps": [ # Optional. The detailed tracing information for sub execution steps of the action. { # A span represents a sub execution step of an action. "completeTime": "A String", # Timestamp of the completion of the span. "metrics": [ # The unordered collection of metrics in this span. { # A named metric is a metric with name, value and unit. "name": "A String", # The name of the metric. "unit": "A String", # The unit in which this metric is reported. Follows [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard. "value": "", # The value of the metric. }, ], "name": "A String", # The name of the span. "startTime": "A String", # Timestamp of the start of the span. "tags": [ # The metadata tags of the span such as span type. "A String", ], }, ], "toolUse": { # Stores metadata of the invocation of an action supported by a tool. # Optional. Action performed on behalf of the agent by calling a plugin tool. "action": "A String", # Optional. Name of the action to be called during the tool use. "dataStoreToolTrace": { # The tracing information for the data store tool. # Optional. Data store tool trace. "dataStoreConnectionSignals": { # Data store connection feature output signals. Might be only partially field if processing stop before the final answer. Reasons for this can be, but are not limited to: empty UCS search results, positive RAI check outcome, grounding failure, ... # Optional. Data store connection feature output signals. "answer": "A String", # Optional. The final compiled answer. "answerGenerationModelCallSignals": { # Diagnostic info related to the answer generation model call. # Optional. Diagnostic info related to the answer generation model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "answerParts": [ # Optional. Answer parts with relevant citations. Concatenation of texts should add up the `answer` (not counting whitespaces). { # Answer part with citation. "supportingIndices": [ # Citations for this answer part. Indices of `search_snippets`. 42, ], "text": "A String", # Substring of the answer. }, ], "citedSnippets": [ # Optional. Snippets cited by the answer generation model from the most to least relevant. { # Snippet cited by the answer generation model. "searchSnippet": { # Search snippet details. # Details of the snippet. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, "snippetIndex": 42, # Index of the snippet in `search_snippets` field. }, ], "groundingSignals": { # Grounding signals. # Optional. Grounding signals. "decision": "A String", # Represents the decision of the grounding check. "score": "A String", # Grounding score bucket setting. }, "rewriterModelCallSignals": { # Diagnostic info related to the rewriter model call. # Optional. Diagnostic info related to the rewriter model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "rewrittenQuery": "A String", # Optional. Rewritten string query used for search. "safetySignals": { # Safety check results. # Optional. Safety check result. "bannedPhraseMatch": "A String", # Specifies banned phrase match subject. "decision": "A String", # Safety decision. "matchedBannedPhrase": "A String", # The matched banned phrase if there was a match. }, "searchSnippets": [ # Optional. Search snippets included in the answer generation prompt. { # Search snippet details. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, ], }, }, "displayName": "A String", # Output only. The display name of the tool. "inputActionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the action. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool that should be used. Format: `projects//locations//agents//tools/`. "webhookToolTrace": { # The tracing information for the webhook tool. # Optional. Webhook tool trace. "webhookTag": "A String", # Optional. The tag of the webhook. "webhookUri": "A String", # Optional. The url of the webhook. }, }, "tts": { # Stores metadata of the Text-to-Speech action. # Optional. Text-to-speech action performed by the agent. }, "userUtterance": { # UserUtterance represents one message sent by the customer. # Optional. Agent obtained a message from the customer. "audio": "A String", # Optional. Audio input. "audioTokens": [ # Optional. Tokens of the audio input. 42, ], "text": "A String", # Required. Message content in text. }, }, ], "conversationState": "A String", # Required. Example's output state. "createTime": "A String", # Output only. The timestamp of initial example creation. "description": "A String", # Optional. The high level concise description of the example. The max number of characters is 200. "displayName": "A String", # Required. The display name of the example. "languageCode": "A String", # Optional. The language code of the example. If not specified, the agent's default language is used. Note: languages must be enabled in the agent before they can be used. Note: example's language code is not currently used in dialogflow agents. "name": "A String", # The unique identifier of the playbook example. Format: `projects//locations//agents//playbooks//examples/`. "playbookInput": { # Input of the playbook. # Optional. The input to the playbook in the example. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. The output of the playbook in the example. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "tokenCount": "A String", # Output only. Estimated number of tokes current example takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the example was updated. }, ], "name": "A String", # The unique identifier of the playbook version. Format: `projects//locations//agents//playbooks//versions/`. "playbook": { # Playbook is the basic building block to instruct the LLM how to execute a certain task. A playbook consists of a goal to accomplish, an optional list of step by step instructions (the step instruction may refers to name of the custom or default plugin tools to use) to perform the task, a list of contextual input data to be passed in at the beginning of the invoked, and a list of output parameters to store the playbook result. # Output only. Snapshot of the playbook when the playbook version is created. "createTime": "A String", # Output only. The timestamp of initial playbook creation. "displayName": "A String", # Required. The human-readable name of the playbook, unique within an agent. "goal": "A String", # Required. High level description of the goal the playbook intend to accomplish. A goal should be concise since it's visible to other playbooks that may reference this playbook. "handlers": [ # Optional. A list of registered handlers to execute based on the specified triggers. { # Handler can be used to define custom logic to be executed based on the user-specified triggers. "eventHandler": { # A handler that is triggered by the specified event. # A handler triggered by event. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "event": "A String", # Required. The name of the event that triggers this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when the event occurs. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, }, "lifecycleHandler": { # A handler that is triggered on the specific lifecycle_stage of the playbook execution. # A handler triggered during specific lifecycle of the playbook execution. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when this handler is triggered. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, "lifecycleStage": "A String", # Required. The name of the lifecycle stage that triggers this handler. Supported values: * `playbook-start` * `pre-action-selection` * `pre-action-execution` }, }, ], "inputParameterDefinitions": [ # Optional. Defined structured input parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "instruction": { # Message of the Instruction of the playbook. # Instruction to accomplish target goal. "guidelines": "A String", # General guidelines for the playbook. These are unstructured instructions that are not directly part of the goal, e.g. "Always be polite". It's valid for this text to be long and used instead of steps altogether. "steps": [ # Ordered list of step by step execution instructions to accomplish target goal. { # Message of single step execution. "steps": [ # Sub-processing needed to execute the current step. # Object with schema name: GoogleCloudDialogflowCxV3beta1PlaybookStep ], "text": "A String", # Step instruction in text format. }, ], }, "llmModelSettings": { # Settings for LLM models. # Optional. Llm model settings for the playbook. "model": "A String", # The selected LLM model. "promptText": "A String", # The custom prompt to use. }, "name": "A String", # The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "outputParameterDefinitions": [ # Optional. Defined structured output parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "playbookType": "A String", # Optional. Type of the playbook. "referencedFlows": [ # Output only. The resource name of flows referenced by the current playbook in the instructions. "A String", ], "referencedPlaybooks": [ # Output only. The resource name of other playbooks referenced by the current playbook in the instructions. "A String", ], "referencedTools": [ # Optional. The resource name of tools referenced by the current playbook in the instructions. If not provided explicitly, they are will be implied using the tool being referenced in goal and steps. "A String", ], "speechSettings": { # Define behaviors of speech to text detection. # Optional. Playbook level Settings for speech to text detection. "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, "tokenCount": "A String", # Output only. Estimated number of tokes current playbook takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the playbook version was updated. }, "updateTime": "A String", # Output only. Last time the playbook version was created or modified. }
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
Lists versions for the specified Playbook. Args: parent: string, Required. The playbook to list versions for. Format: `projects//locations//agents//playbooks/`. (required) pageSize: integer, Optional. The maximum number of items to return in a single page. By default 100 and at most 1000. pageToken: string, Optional. The next_page_token value returned from a previous list request. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format 2 - v2 error format Returns: An object of the form: { # The response message for Playbooks.ListPlaybookVersions. "nextPageToken": "A String", # Token to retrieve the next page of results, or empty if there are no more results in the list. "playbookVersions": [ # The list of playbook version. There will be a maximum number of items returned based on the page_size field in the request. { # Playbook version is a snapshot of the playbook at certain timestamp. "description": "A String", # Optional. The description of the playbook version. "examples": [ # Output only. Snapshot of the examples belonging to the playbook when the playbook version is created. { # Example represents a sample execution of the playbook in the conversation. An example consists of a list of ordered actions performed by end user or Dialogflow agent according the playbook instructions to fulfill the task. "actions": [ # Required. The ordered list of actions performed by the end user and the Dialogflow agent. { # Action performed by end user or Dialogflow agent in the conversation. "agentUtterance": { # AgentUtterance represents one message sent by the agent. # Optional. Action performed by the agent as a message. "requireGeneration": True or False, # Optional. True if the agent utterance needs to be generated by the LLM. Only used in webhook response to differentiate from empty text. Revisit whether we need this field or mark `text` as optional when we expose webhook interface to customer. "text": "A String", # Required. Message content in text. }, "completeTime": "A String", # Output only. Timestamp of the completion of the agent action. "displayName": "A String", # Output only. The display name of the action. "event": { # Event represents the event sent by the customer. # Optional. The agent received an event from the customer or a system event is emitted. "event": "A String", # Required. Name of the event. "text": "A String", # Optional. Unstructured text payload of the event. }, "flowInvocation": { # Stores metadata of the invocation of a child CX flow. Flow invocation actions enter the child flow. # Optional. Action performed on behalf of the agent by invoking a CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "flowState": "A String", # Required. Flow invocation's output state. "inputActionParameters": { # Optional. A list of input parameters for the flow. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the flow invocation. "a_key": "", # Properties of the object. }, }, "flowStateUpdate": { # Stores metadata of the state update action, such as a state machine execution in flows. # Optional. Output only. The state machine update in flows. "destination": "A String", # The destination of the transition. Format: `projects//locations//agents//flows//pages/` or `projects//locations//agents//playbooks/`. "eventType": "A String", # The type of the event that triggered the state update. "functionCall": { # Stores the metadata of a function call to execute. # The function call to execute. "name": "A String", # The name of the function call. }, "pageState": { # Stores the state of a page and its flow. # The updated page and flow state. "displayName": "A String", # The display name of the page. "page": "A String", # The ID of the page. Format: `projects//locations//agents//flows//pages/`. "status": "A String", # The status of the page. }, "updatedParameters": { # The updated parameters. "a_key": "", # Properties of the object. }, }, "flowTransition": { # Stores metadata of the transition to a target CX flow. Flow transition actions exit the caller playbook and enter the child flow. # Optional. Action performed on behalf of the agent by transitioning to a target CX flow. "displayName": "A String", # Output only. The display name of the flow. "flow": "A String", # Required. The unique identifier of the flow. Format: `projects//locations//agents/`. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, }, "intentMatch": { # Stores metadata of the intent match action. # Optional. Output only. Intent Match in flows. "matchedIntents": [ # The matched intent. { # Stores the matched intent, which is the result of the intent match action. "displayName": "A String", # The display name of the matched intent. "generativeFallback": { # The generative fallback response of the matched intent. "a_key": "", # Properties of the object. }, "intentId": "A String", # The ID of the matched intent. "score": 3.14, # The score of the matched intent. }, ], }, "llmCall": { # Stores metadata of the call of an LLM. # Optional. Output only. LLM call performed by the agent. "model": "A String", # The model of the LLM call. "retrievedExamples": [ # A list of relevant examples used for the LLM prompt. { # Relevant example used for the LLM prompt. "exampleDisplayName": "A String", # The display name of the example. "exampleId": "A String", # The id of the example. "matchedRetrievalLabel": "A String", # Optional. The matched retrieval label of this LLM call. "retrievalStrategy": "A String", # Retrieval strategy of the example. }, ], "temperature": 3.14, # The temperature of the LLM call. "tokenCount": { # Stores token counts of the LLM call. # The token counts of the LLM call. "conversationContextTokenCount": "A String", # The number of tokens used for the conversation history in the prompt. "exampleTokenCount": "A String", # The number of tokens used for the retrieved examples in the prompt. "totalInputTokenCount": "A String", # The total number of tokens used for the input to the LLM call. "totalOutputTokenCount": "A String", # The total number of tokens used for the output of the LLM call. }, }, "playbookInvocation": { # Stores metadata of the invocation of a child playbook. Playbook invocation actions enter the child playbook. # Optional. Action performed on behalf of the agent by invoking a child playbook. "displayName": "A String", # Output only. The display name of the playbook. "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "playbookInput": { # Input of the playbook. # Optional. Input of the child playbook invocation. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. Output of the child playbook invocation. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "playbookState": "A String", # Required. Playbook invocation's output state. }, "playbookTransition": { # Stores metadata of the transition to another target playbook. Playbook transition actions exit the caller playbook and enter the target playbook. # Optional. Action performed on behalf of the agent by transitioning to a target playbook. "displayName": "A String", # Output only. The display name of the playbook. "inputActionParameters": { # A list of input parameters for the action. "a_key": "", # Properties of the object. }, "playbook": "A String", # Required. The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. }, "startTime": "A String", # Output only. Timestamp of the start of the agent action. "status": { # The status of the action. # Optional. Output only. The status of the action. "exception": { # Exception thrown during the execution of an action. # Optional. The exception thrown during the execution of the action. "errorMessage": "A String", # Optional. The error message. }, }, "stt": { # Stores metadata of the Speech-to-Text action. # Optional. Speech-to-text action performed by the agent. }, "subExecutionSteps": [ # Optional. The detailed tracing information for sub execution steps of the action. { # A span represents a sub execution step of an action. "completeTime": "A String", # Timestamp of the completion of the span. "metrics": [ # The unordered collection of metrics in this span. { # A named metric is a metric with name, value and unit. "name": "A String", # The name of the metric. "unit": "A String", # The unit in which this metric is reported. Follows [The Unified Code for Units of Measure](https://unitsofmeasure.org/ucum.html) standard. "value": "", # The value of the metric. }, ], "name": "A String", # The name of the span. "startTime": "A String", # Timestamp of the start of the span. "tags": [ # The metadata tags of the span such as span type. "A String", ], }, ], "toolUse": { # Stores metadata of the invocation of an action supported by a tool. # Optional. Action performed on behalf of the agent by calling a plugin tool. "action": "A String", # Optional. Name of the action to be called during the tool use. "dataStoreToolTrace": { # The tracing information for the data store tool. # Optional. Data store tool trace. "dataStoreConnectionSignals": { # Data store connection feature output signals. Might be only partially field if processing stop before the final answer. Reasons for this can be, but are not limited to: empty UCS search results, positive RAI check outcome, grounding failure, ... # Optional. Data store connection feature output signals. "answer": "A String", # Optional. The final compiled answer. "answerGenerationModelCallSignals": { # Diagnostic info related to the answer generation model call. # Optional. Diagnostic info related to the answer generation model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "answerParts": [ # Optional. Answer parts with relevant citations. Concatenation of texts should add up the `answer` (not counting whitespaces). { # Answer part with citation. "supportingIndices": [ # Citations for this answer part. Indices of `search_snippets`. 42, ], "text": "A String", # Substring of the answer. }, ], "citedSnippets": [ # Optional. Snippets cited by the answer generation model from the most to least relevant. { # Snippet cited by the answer generation model. "searchSnippet": { # Search snippet details. # Details of the snippet. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, "snippetIndex": 42, # Index of the snippet in `search_snippets` field. }, ], "groundingSignals": { # Grounding signals. # Optional. Grounding signals. "decision": "A String", # Represents the decision of the grounding check. "score": "A String", # Grounding score bucket setting. }, "rewriterModelCallSignals": { # Diagnostic info related to the rewriter model call. # Optional. Diagnostic info related to the rewriter model call. "model": "A String", # Name of the generative model. For example, "gemini-ultra", "gemini-pro", "gemini-1.5-flash" etc. Defaults to "Other" if the model is unknown. "modelOutput": "A String", # Output of the generative model. "renderedPrompt": "A String", # Prompt as sent to the model. }, "rewrittenQuery": "A String", # Optional. Rewritten string query used for search. "safetySignals": { # Safety check results. # Optional. Safety check result. "bannedPhraseMatch": "A String", # Specifies banned phrase match subject. "decision": "A String", # Safety decision. "matchedBannedPhrase": "A String", # The matched banned phrase if there was a match. }, "searchSnippets": [ # Optional. Search snippets included in the answer generation prompt. { # Search snippet details. "documentTitle": "A String", # Title of the enclosing document. "documentUri": "A String", # Uri for the document. Present if specified for the document. "metadata": { # Metadata associated with the document. "a_key": "", # Properties of the object. }, "text": "A String", # Text included in the prompt. }, ], }, }, "displayName": "A String", # Output only. The display name of the tool. "inputActionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "outputActionParameters": { # Optional. A list of output parameters generated by the action. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool that should be used. Format: `projects//locations//agents//tools/`. "webhookToolTrace": { # The tracing information for the webhook tool. # Optional. Webhook tool trace. "webhookTag": "A String", # Optional. The tag of the webhook. "webhookUri": "A String", # Optional. The url of the webhook. }, }, "tts": { # Stores metadata of the Text-to-Speech action. # Optional. Text-to-speech action performed by the agent. }, "userUtterance": { # UserUtterance represents one message sent by the customer. # Optional. Agent obtained a message from the customer. "audio": "A String", # Optional. Audio input. "audioTokens": [ # Optional. Tokens of the audio input. 42, ], "text": "A String", # Required. Message content in text. }, }, ], "conversationState": "A String", # Required. Example's output state. "createTime": "A String", # Output only. The timestamp of initial example creation. "description": "A String", # Optional. The high level concise description of the example. The max number of characters is 200. "displayName": "A String", # Required. The display name of the example. "languageCode": "A String", # Optional. The language code of the example. If not specified, the agent's default language is used. Note: languages must be enabled in the agent before they can be used. Note: example's language code is not currently used in dialogflow agents. "name": "A String", # The unique identifier of the playbook example. Format: `projects//locations//agents//playbooks//examples/`. "playbookInput": { # Input of the playbook. # Optional. The input to the playbook in the example. "actionParameters": { # Optional. A list of input parameters for the action. "a_key": "", # Properties of the object. }, "precedingConversationSummary": "A String", # Optional. Summary string of the preceding conversation for the child playbook invocation. }, "playbookOutput": { # Output of the playbook. # Optional. The output of the playbook in the example. "actionParameters": { # Optional. A Struct object of output parameters for the action. "a_key": "", # Properties of the object. }, "executionSummary": "A String", # Optional. Summary string of the execution result of the child playbook. "state": "A String", # End state of the playbook. }, "tokenCount": "A String", # Output only. Estimated number of tokes current example takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the example was updated. }, ], "name": "A String", # The unique identifier of the playbook version. Format: `projects//locations//agents//playbooks//versions/`. "playbook": { # Playbook is the basic building block to instruct the LLM how to execute a certain task. A playbook consists of a goal to accomplish, an optional list of step by step instructions (the step instruction may refers to name of the custom or default plugin tools to use) to perform the task, a list of contextual input data to be passed in at the beginning of the invoked, and a list of output parameters to store the playbook result. # Output only. Snapshot of the playbook when the playbook version is created. "createTime": "A String", # Output only. The timestamp of initial playbook creation. "displayName": "A String", # Required. The human-readable name of the playbook, unique within an agent. "goal": "A String", # Required. High level description of the goal the playbook intend to accomplish. A goal should be concise since it's visible to other playbooks that may reference this playbook. "handlers": [ # Optional. A list of registered handlers to execute based on the specified triggers. { # Handler can be used to define custom logic to be executed based on the user-specified triggers. "eventHandler": { # A handler that is triggered by the specified event. # A handler triggered by event. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "event": "A String", # Required. The name of the event that triggers this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when the event occurs. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, }, "lifecycleHandler": { # A handler that is triggered on the specific lifecycle_stage of the playbook execution. # A handler triggered during specific lifecycle of the playbook execution. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when this handler is triggered. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, "lifecycleStage": "A String", # Required. The name of the lifecycle stage that triggers this handler. Supported values: * `playbook-start` * `pre-action-selection` * `pre-action-execution` }, }, ], "inputParameterDefinitions": [ # Optional. Defined structured input parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "instruction": { # Message of the Instruction of the playbook. # Instruction to accomplish target goal. "guidelines": "A String", # General guidelines for the playbook. These are unstructured instructions that are not directly part of the goal, e.g. "Always be polite". It's valid for this text to be long and used instead of steps altogether. "steps": [ # Ordered list of step by step execution instructions to accomplish target goal. { # Message of single step execution. "steps": [ # Sub-processing needed to execute the current step. # Object with schema name: GoogleCloudDialogflowCxV3beta1PlaybookStep ], "text": "A String", # Step instruction in text format. }, ], }, "llmModelSettings": { # Settings for LLM models. # Optional. Llm model settings for the playbook. "model": "A String", # The selected LLM model. "promptText": "A String", # The custom prompt to use. }, "name": "A String", # The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "outputParameterDefinitions": [ # Optional. Defined structured output parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "playbookType": "A String", # Optional. Type of the playbook. "referencedFlows": [ # Output only. The resource name of flows referenced by the current playbook in the instructions. "A String", ], "referencedPlaybooks": [ # Output only. The resource name of other playbooks referenced by the current playbook in the instructions. "A String", ], "referencedTools": [ # Optional. The resource name of tools referenced by the current playbook in the instructions. If not provided explicitly, they are will be implied using the tool being referenced in goal and steps. "A String", ], "speechSettings": { # Define behaviors of speech to text detection. # Optional. Playbook level Settings for speech to text detection. "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, "tokenCount": "A String", # Output only. Estimated number of tokes current playbook takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the playbook version was updated. }, "updateTime": "A String", # Output only. Last time the playbook version was created or modified. }, ], }
list_next()
Retrieves the next page of results. Args: previous_request: The request for the previous page. (required) previous_response: The response from the request for the previous page. (required) Returns: A request object that you can call 'execute()' on to request the next page. Returns None if there are no more items in the collection.
restore(name, body=None, x__xgafv=None)
Retrieves the specified version of the Playbook and stores it as the current playbook draft, returning the playbook with resources updated. Args: name: string, Required. The name of the playbook version. Format: `projects//locations//agents//playbooks//versions/`. (required) body: object, The request body. The object takes the form of: { # The request message for Playbooks.RestorePlaybookVersion. } x__xgafv: string, V1 error format. Allowed values 1 - v1 error format 2 - v2 error format Returns: An object of the form: { # The response message for Playbooks.RestorePlaybookVersion. "playbook": { # Playbook is the basic building block to instruct the LLM how to execute a certain task. A playbook consists of a goal to accomplish, an optional list of step by step instructions (the step instruction may refers to name of the custom or default plugin tools to use) to perform the task, a list of contextual input data to be passed in at the beginning of the invoked, and a list of output parameters to store the playbook result. # The updated playbook. "createTime": "A String", # Output only. The timestamp of initial playbook creation. "displayName": "A String", # Required. The human-readable name of the playbook, unique within an agent. "goal": "A String", # Required. High level description of the goal the playbook intend to accomplish. A goal should be concise since it's visible to other playbooks that may reference this playbook. "handlers": [ # Optional. A list of registered handlers to execute based on the specified triggers. { # Handler can be used to define custom logic to be executed based on the user-specified triggers. "eventHandler": { # A handler that is triggered by the specified event. # A handler triggered by event. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "event": "A String", # Required. The name of the event that triggers this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when the event occurs. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, }, "lifecycleHandler": { # A handler that is triggered on the specific lifecycle_stage of the playbook execution. # A handler triggered during specific lifecycle of the playbook execution. "condition": "A String", # Optional. The condition that must be satisfied to trigger this handler. "fulfillment": { # A fulfillment can do one or more of the following actions at the same time: * Generate rich message responses. * Set parameter values. * Call the webhook. Fulfillments can be called at various stages in the Page or Form lifecycle. For example, when a DetectIntentRequest drives a session to enter a new page, the page's entry fulfillment can add a static response to the QueryResult in the returning DetectIntentResponse, call the webhook (for example, to load user data from a database), or both. # Required. The fulfillment to call when this handler is triggered. "advancedSettings": { # Hierarchical advanced settings for agent/flow/page/fulfillment/parameter. Settings exposed at lower level overrides the settings exposed at higher level. Overriding occurs at the sub-setting level. For example, the playback_interruption_settings at fulfillment level only overrides the playback_interruption_settings at the agent level, leaving other settings at the agent level unchanged. DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. # Hierarchical advanced settings for this fulfillment. The settings exposed at the lower level overrides the settings exposed at the higher level. "audioExportGcsDestination": { # Google Cloud Storage location for a Dialogflow operation that writes or exports objects (e.g. exported agent or transcripts) outside of Dialogflow. # If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: - Agent level - Flow level "uri": "A String", # Required. The Google Cloud Storage URI for the exported objects. A URI is of the form: `gs://bucket/object-name-or-prefix` Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. }, "dtmfSettings": { # Define behaviors for DTMF (dual tone multi frequency). # Settings for DTMF. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level. "enabled": True or False, # If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). "endpointingTimeoutDuration": "A String", # Endpoint timeout setting for matching dtmf input to regex. "finishDigit": "A String", # The digit that terminates a DTMF digit sequence. "interdigitTimeoutDuration": "A String", # Interdigit timeout setting for matching dtmf input to regex. "maxDigits": 42, # Max length of DTMF digits. }, "loggingSettings": { # Define behaviors on logging. # Settings for logging. Settings for Dialogflow History, Contact Center messages, StackDriver logs, and speech logging. Exposed at the following levels: - Agent level. "enableConsentBasedRedaction": True or False, # Enables consent-based end-user input redaction, if true, a pre-defined session parameter `$session.params.conversation-redaction` will be used to determine if the utterance should be redacted. "enableInteractionLogging": True or False, # Enables DF Interaction logging. "enableStackdriverLogging": True or False, # Enables Google Cloud Logging. }, "speechSettings": { # Define behaviors of speech to text detection. # Settings for speech to text detection. Exposed at the following levels: - Agent level - Flow level - Page level - Parameter level "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, }, "conditionalCases": [ # Conditional cases for this fulfillment. { # A list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. "cases": [ # A list of cascading if-else conditions. { # Each case has a Boolean condition. When it is evaluated to be True, the corresponding messages will be selected and evaluated recursively. "caseContent": [ # A list of case content. { # The list of messages or conditional cases to activate for this case. "additionalCases": # Object with schema name: GoogleCloudDialogflowCxV3beta1FulfillmentConditionalCases # Additional cases to be evaluated. "message": { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. # Returned message. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, }, ], "condition": "A String", # The condition to activate and select this case. Empty means the condition is always true. The condition is evaluated against form parameters or session parameters. See the [conditions reference](https://cloud.google.com/dialogflow/cx/docs/reference/condition). }, ], }, ], "enableGenerativeFallback": True or False, # If the flag is true, the agent will utilize LLM to generate a text response. If LLM generation fails, the defined responses in the fulfillment will be respected. This flag is only useful for fulfillments associated with no-match event handlers. "generators": [ # A list of Generators to be called during this fulfillment. { # Generator settings used by the LLM to generate a text response. "generator": "A String", # Required. The generator to call. Format: `projects//locations//agents//generators/`. "inputParameters": { # Map from placeholder parameter in the Generator to corresponding session parameters. By default, Dialogflow uses the session parameter with the same name to fill in the generator template. e.g. If there is a placeholder parameter `city` in the Generator, Dialogflow default to fill in the `$city` with `$session.params.city`. However, you may choose to fill `$city` with `$session.params.desination-city`. - Map key: parameter ID - Map value: session parameter name "a_key": "A String", }, "outputParameter": "A String", # Required. Output parameter which should contain the generator response. }, ], "messages": [ # The list of rich message responses to present to the user. { # Represents a response message that can be returned by a conversational agent. Response messages are also used for output audio synthesis. The approach is as follows: * If at least one OutputAudioText response is present, then all OutputAudioText responses are linearly concatenated, and the result is used for output audio synthesis. * If the OutputAudioText responses are a mixture of text and SSML, then the concatenated result is treated as SSML; otherwise, the result is treated as either text or SSML as appropriate. The agent designer should ideally use either text or SSML consistently throughout the bot design. * Otherwise, all Text responses are linearly concatenated, and the result is used for output audio synthesis. This approach allows for more sophisticated user experience scenarios, where the text displayed to the user may differ from what is heard. "channel": "A String", # The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. "conversationSuccess": { # Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates that the conversation succeeded. * In a webhook response when you determine that you handled the customer issue. # Indicates that the conversation succeeded. "metadata": { # Custom metadata. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "endInteraction": { # Indicates that interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. A signal that indicates the interaction with the Dialogflow agent has ended. This message is generated by Dialogflow only when the conversation reaches `END_SESSION` page. It is not supposed to be defined by the user. It's guaranteed that there is at most one such message in each response. }, "knowledgeInfoCard": { # Represents info card response. If the response contains generative knowledge prediction, Dialogflow will return a payload with Infobot Messenger compatible info card. Otherwise, the info card response is skipped. # Represents info card for knowledge answers, to be better rendered in Dialogflow Messenger. }, "liveAgentHandoff": { # Indicates that the conversation should be handed off to a live agent. Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. You may set this, for example: * In the entry_fulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. * In a webhook response when you determine that the customer issue can only be handled by a human. # Hands off conversation to a human agent. "metadata": { # Custom metadata for your handoff procedure. Dialogflow doesn't impose any structure on this. "a_key": "", # Properties of the object. }, }, "mixedAudio": { # Represents an audio message that is composed of both segments synthesized from the Dialogflow agent prompts and ones hosted externally at the specified URIs. The external URIs are specified via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. # Output only. An audio response message composed of both the synthesized Dialogflow agent responses and responses defined via play_audio. This message is generated by Dialogflow only and not supposed to be defined by the user. "segments": [ # Segments this audio response is composed of. { # Represents one segment of audio. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this segment can be interrupted by the end user's speech and the client should then start the next Dialogflow request. "audio": "A String", # Raw audio synthesized from the Dialogflow agent's response using the output config specified in the request. "uri": "A String", # Client-specific URI that points to an audio clip accessible to the client. Dialogflow does not impose any validation on it. }, ], }, "outputAudioText": { # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. # A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "ssml": "A String", # The SSML text to be synthesized. For more information, see [SSML](/speech/text-to-speech/docs/ssml). "text": "A String", # The raw text to be synthesized. }, "payload": { # Returns a response containing a custom, platform-specific payload. "a_key": "", # Properties of the object. }, "playAudio": { # Specifies an audio clip to be played by the client as part of the response. # Signal that the client should play an audio clip hosted at a client-specific URI. Dialogflow uses this to construct mixed_audio. However, Dialogflow itself does not try to read or process the URI in any way. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "audioUri": "A String", # Required. URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. }, "telephonyTransferCall": { # Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. # A signal that the client should transfer the phone call connected to this agent to a third-party endpoint. "phoneNumber": "A String", # Transfer the call to a phone number in [E.164 format](https://en.wikipedia.org/wiki/E.164). }, "text": { # The text response message. # Returns a text response. "allowPlaybackInterruption": True or False, # Output only. Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. "text": [ # Required. A collection of text response variants. If multiple variants are defined, only one text response variant is returned at runtime. "A String", ], }, "toolCall": { # Represents a call of a specific tool's action with the specified inputs. # Returns the definition of a tool call that should be executed by the client. "action": "A String", # Required. The name of the tool's action associated with this call. "inputParameters": { # Optional. The action's input parameters. "a_key": "", # Properties of the object. }, "tool": "A String", # Required. The tool associated with this call. Format: `projects//locations//agents//tools/`. }, }, ], "returnPartialResponses": True or False, # Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. "setParameterActions": [ # Set parameter values before executing the webhook. { # Setting a parameter value. "parameter": "A String", # Display name of the parameter. "value": "", # The new value of the parameter. A null value clears the parameter. }, ], "tag": "A String", # The value of this field will be populated in the WebhookRequest `fulfillmentInfo.tag` field by Dialogflow when the associated webhook is called. The tag is typically used by the webhook service to identify which fulfillment is being called, but it could be used for other purposes. This field is required if `webhook` is specified. "webhook": "A String", # The webhook to call. Format: `projects//locations//agents//webhooks/`. }, "lifecycleStage": "A String", # Required. The name of the lifecycle stage that triggers this handler. Supported values: * `playbook-start` * `pre-action-selection` * `pre-action-execution` }, }, ], "inputParameterDefinitions": [ # Optional. Defined structured input parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "instruction": { # Message of the Instruction of the playbook. # Instruction to accomplish target goal. "guidelines": "A String", # General guidelines for the playbook. These are unstructured instructions that are not directly part of the goal, e.g. "Always be polite". It's valid for this text to be long and used instead of steps altogether. "steps": [ # Ordered list of step by step execution instructions to accomplish target goal. { # Message of single step execution. "steps": [ # Sub-processing needed to execute the current step. # Object with schema name: GoogleCloudDialogflowCxV3beta1PlaybookStep ], "text": "A String", # Step instruction in text format. }, ], }, "llmModelSettings": { # Settings for LLM models. # Optional. Llm model settings for the playbook. "model": "A String", # The selected LLM model. "promptText": "A String", # The custom prompt to use. }, "name": "A String", # The unique identifier of the playbook. Format: `projects//locations//agents//playbooks/`. "outputParameterDefinitions": [ # Optional. Defined structured output parameters for this playbook. { # Defines the properties of a parameter. Used to define parameters used in the agent and the input / output parameters for each fulfillment. "description": "A String", # Human-readable description of the parameter. Limited to 300 characters. "name": "A String", # Required. Name of parameter. "type": "A String", # Type of parameter. "typeSchema": { # Encapsulates different type schema variations: either a reference to an a schema that's already defined by a tool, or an inline definition. # Optional. Type schema of parameter. "inlineSchema": { # A type schema object that's specified inline. # Set if this is an inline schema definition. "items": # Object with schema name: GoogleCloudDialogflowCxV3beta1TypeSchema # Schema of the elements if this is an ARRAY type. "type": "A String", # Data type of the schema. }, "schemaReference": { # A reference to the schema of an existing tool. # Set if this is a schema reference. "schema": "A String", # The name of the schema. "tool": "A String", # The tool that contains this schema definition. Format: `projects//locations//agents//tools/`. }, }, }, ], "playbookType": "A String", # Optional. Type of the playbook. "referencedFlows": [ # Output only. The resource name of flows referenced by the current playbook in the instructions. "A String", ], "referencedPlaybooks": [ # Output only. The resource name of other playbooks referenced by the current playbook in the instructions. "A String", ], "referencedTools": [ # Optional. The resource name of tools referenced by the current playbook in the instructions. If not provided explicitly, they are will be implied using the tool being referenced in goal and steps. "A String", ], "speechSettings": { # Define behaviors of speech to text detection. # Optional. Playbook level Settings for speech to text detection. "endpointerSensitivity": 42, # Sensitivity of the speech model that detects the end of speech. Scale from 0 to 100. "models": { # Mapping from language to Speech-to-Text model. The mapped Speech-to-Text model will be selected for requests from its corresponding language. For more information, see [Speech models](https://cloud.google.com/dialogflow/cx/docs/concept/speech-models). "a_key": "A String", }, "noSpeechTimeout": "A String", # Timeout before detecting no speech. "useTimeoutBasedEndpointing": True or False, # Use timeout based endpointing, interpreting endpointer sensitivity as seconds of timeout value. }, "tokenCount": "A String", # Output only. Estimated number of tokes current playbook takes when sent to the LLM. "updateTime": "A String", # Output only. Last time the playbook version was updated. }, }