Skip to content

Standard Protocol Mode gRPC API Reference

This guide provides an in-depth explanation of the Standard Protocol Mode, covering how to interact with the gRPC API, structure requests, and process returned events.

gRPC API Reference

The gRPC API is a powerful tool for processing multi-modal content such as text, image. It supports batch processing (sending all data at once) and streaming (sending data incrementally), making it suitable for applications requiring real-time analysis or post-processing workflows.

The API is designed with the following capabilities:

  • Scalable analysis: Handle large amounts of data efficiently.
  • Real-time streaming: Analyze data as it arrives.
  • Multi-modal content support: Process diverse content types in a unified request.
  • Custom safety policies: Tailor content analysis to your specific requirements.

gRPC Protocol

The gRPC protocol defines the structure for requests and responses. Below is the complete .proto definition:

syntax = "proto3";

package rai;

option csharp_namespace = "Microsoft.ContentSafety.Analyze.V2";

import "google/protobuf/wrappers.proto";
import "google/protobuf/any.proto";

service ResponsibleAI {
  rpc AnalyzeBySafetyPolicy(stream AnalyzeBySafetyPolicyRequest ) returns (stream AnalyzeBySafetyPolicyResponse );
}



message AnalyzeBySafetyPolicyRequest {
  oneof request {
    SafetyPolicy safety_policy_inline = 1;
    string safety_policy_name = 2;
    Buffer buffer = 3;
    bool stopProcessing = 4;
    bool commitBuffer = 5;
    AOAIRawBuffer aoaiRawBuffer = 6;
  }
  string parent_policy_name = 7;
}

message AOAIRawBuffer {
  ApiName api_name = 1;
  Source source = 2;
  string payload = 3;
}

enum ApiName{
  API_NAME_UNSPECIFIED = 0;
  API_NAME_COMPLETION = 1;
  API_NAME_CHATCOMPLETION = 2;
  API_NAME_REALTIME = 3;
  API_NAME_ASSISTANT = 4;
  API_NAME_IMAGE_CREATE = 5;
}


message SafetyPolicy{
  repeated TaskSetting task_settings = 1;
  string name = 2;
}

message TaskSetting{
  string setting_id = 1;
  bool setting_enabled = 2;
  repeated AppliedFor applied_for = 3;
  TaskKind kind = 4;
  oneof task_setting {
    HarmCategoryTaskSetting harm_category_task_setting = 5;
    BlocklistTaskSetting blocklist_task_setting = 6;
    SafetyIncidentTaskSetting safety_incident_task_setting = 7;
    CustomHarmCategoryTaskSetting custom_harm_category_task_setting = 8;
  }
  BlockingCriteria blocking_criteria = 9;
}

message AppliedFor{
  Role role = 1;
  Source source = 2;
}

message HarmCategoryTaskSetting{
  HarmCategory harm_category = 1;
  string display_name = 2;
}

message BlocklistTaskSetting{
  string name = 1;
}

message SafetyIncidentTaskSetting{
  string name = 1;
}

message CustomHarmCategoryTaskSetting{
  string name = 1;
}

message BlockingCriteria{
  oneof value {
    int32 allowed_severity = 1;
    bool is_detected = 2;
    RiskLevel allowed_risk_level = 3;
    float allowed_score = 4;
  };
  bool enabled = 5;
  BlockingCriteriaKind kind = 6;
}

message Buffer {
  repeated Message messages = 1;
}

message Message {
  string message_id = 1;
  Source source = 2;
  Role role = 3;
  repeated Content contents = 4;
  string previous_message_id = 5;
}

message Content {
  google.protobuf.Int32Value content_index = 1;
  ModalityKind kind = 2;
  string text = 3;
  string image_base64 = 4;
  string image_blob = 5;
  AudioObject audio = 6;
  string ref_id = 7;
  string arguments = 8;
}

message AudioObject {
  string audio_data = 1;
  AudioFormat audio_format = 2;
  string audio_transcript = 3;
}

message AnalyzeBySafetyPolicyResponse{
  oneof response{
    AnalysisResult analysis_result = 1;
    Watermark watermark = 2;
    Completion completion = 3;
  }
}

message AnalysisResult {
  Offset offset = 1;
  State state = 2;
  Result result = 3;
  repeated TaskResult task_results = 4;
}

message Watermark{
  Source source = 1;
  string message_id = 2;
  int32 content_index = 3;
  int32 offset = 4;
}

message Completion {
  EndReason end_reason = 1;
  string error_description = 2;
}

message TaskResult {
  string setting_id = 1;
  ResultCode result_code = 2;
  string result_code_detail = 3;
  bool is_blocking_criteria_met = 4;
  TaskKind kind = 5;
  oneof task_result{
    HarmCategoryTaskResult harm_category_task_result = 6;
    BlocklistTaskResult blocklist_task_result = 7;
    SafetyIncidentTaskResult safety_incident_task_result = 8;
    CustomCategoryTaskResult custom_category_task_result = 9;
  }
}

message HarmCategoryTaskResult{
  HarmCategory harm_category = 1;
  ModalityKind kind = 2;
  bool is_detected = 3;
  int32 severity = 4;
  RiskLevel risk_level = 5;
  map<string, google.protobuf.Any> advanced = 6;
}

message BlocklistTaskResult{
  string name = 1;
  bool is_detected = 2;
  map<string, google.protobuf.Any> advanced = 3;
}

message SafetyIncidentTaskResult{
  string name = 1;
  bool is_detected = 2;
  map<string, google.protobuf.Any> advanced = 3;
}

message CustomCategoryTaskResult{
  string name = 1;
  bool is_detected = 2;
  map<string, google.protobuf.Any> advanced = 3;
}

message Offset{
  string message_id = 1;
  int32 content_index = 2;
  int32 start_offset = 3;
  int32 end_offset = 4;
}

enum TaskKind{
  TASK_KIND_UNSPECIFIED = 0;
  TASK_KIND_HARM_CATEGORY = 1;
  TASK_KIND_BLOCKLIST = 2;
  TASK_KIND_SAFETY_INCIDENT = 3;
  TASK_KIND_CUSTOM_HARM_CATEGORY = 4;
}

enum ResultCode {
  RESULT_CODE_UNSPECIFIED = 0;
  RESULT_CODE_OK = 1;
  RESULT_CODE_NO_VALID_INPUT = 2;
  RESULT_CODE_INTERNAL_TIMEOUT = 3;
  RESULT_CODE_INTERNAL_ERROR = 4;
}

enum HarmCategory {
  HARM_CATEGORY_UNSPECIFIED = 0;
  HARM_CATEGORY_CELEBRITY = 1;
  HARM_CATEGORY_CODE_VULNERABILITY = 2;
  HARM_CATEGORY_DRUG = 3;
  HARM_CATEGORY_HATE = 4;
  HARM_CATEGORY_PROMPT_INJECTION = 5;
  HARM_CATEGORY_PROTECTED_MATERIAL_CODE = 6;
  HARM_CATEGORY_PROTECTED_MATERIAL_TEXT = 7;
  HARM_CATEGORY_SEXUAL = 8;
  HARM_CATEGORY_SELF_HARM = 9;
  HARM_CATEGORY_VIOLENCE = 10;
  HARM_CATEGORY_XPIA = 11;
  HARM_CATEGORY_FATE_INAPPROPRIATE = 12;
  HARM_CATEGORY_FATE_SUICIDE_HELP = 13;
  HARM_CATEGORY_FATE_OFFENSIVE = 14;
  HARM_CATEGORY_TXT2_CODE_LOW_AUC = 15;
  HARM_CATEGORY_BING_JAILBREAK = 16;
  HARM_CATEGORY_FATE_POLITICS = 17;
  HARM_CATEGORY_ELECTION_CRITICAL_INFORMATION = 18;
}

enum RiskLevel{
  RISK_LEVEL_UNSPECIFIED = 0;
  RISK_LEVEL_SAFE = 1;
  RISK_LEVEL_LOW = 2;
  RISK_LEVEL_MEDIUM = 3;
  RISK_LEVEL_HIGH = 4;
}

enum Role{
  ROLE_UNSPECIFIED = 0;
  ROLE_ALL = 1;
  ROLE_USER = 2;
  ROLE_SYSTEM = 3;
  ROLE_ASSISTANT = 4;
  ROLE_TOOL = 5;
  ROLE_FUNCTION = 6;
}

enum ModalityKind{
  MODALITY_KIND_UNSPECIFIED = 0;
  MODALITY_KIND_TEXT = 1;
  MODALITY_KIND_IMAGE = 2;
  MODALITY_KIND_AUDIO = 3;
  MODALITY_KIND_VIDEO = 4;
  MODALITY_KIND_REF = 5;
}
enum Source{
  SOURCE_UNSPECIFIED = 0;
  SOURCE_ALL = 1;
  SOURCE_PROMPT = 2;
  SOURCE_COMPLETION = 3;
}

enum EndReason {
  END_REASON_UNSPECIFIED = 0;
  END_REASON_END_OF_STREAM = 1;
  END_REASON_POLICY_INVALID = 2;
  END_REASON_BUFFER_MESSAGE_INVALID = 3;
  END_REASON_CALLER_PREMATURE_CLOSE = 4;
  END_REASON_SERVER_ERROR = 5;
//  END_REASON_TIMEOUT = 2;
//  END_REASON_SERVER_SHUTDOWN = 3;
//  END_REASON_SERVER_ERROR = 4;
//  END_REASON_PROTOCOL_VIOLATION = 5;
//  END_REASON_POLICY_NOT_FOUND = 6;
//  END_REASON_POLICY_INVALID = 7;
//  END_REASON_SEGMENTATION_INVALID = 8;
//  END_REASON_IMAGE_ERROR = 10;
}

enum State{
  STATE_UNSPECIFIED = 0;
  STATE_ANALYSIS_ALL_SUCCEEDED = 1;
  STATE_ANALYSIS_NOT_ALL_SUCCEEDED = 2;
}

enum Result{
  RESULT_UNSPECIFIED = 0;
  RESULT_NO_CRITERIA_MET = 1;
  RESULT_BLOCKING_CRITERIA_MET = 2;
}

enum AudioFormat{
  AUDIO_FORMAT_UNSPECIFIED = 0;
  AUDIO_FORMAT_PCM16 = 1;
  AUDIO_FORMAT_G711_ULAW = 2;
  AUDIO_FORMAT_G711_ALAW = 3;
}

enum BlockingCriteriaKind{
  BLOCKING_CRITERIA_KIND_UNSPECIFIED = 0;
  BLOCKING_CRITERIA_KIND_SEVERITY = 1;
  BLOCKING_CRITERIA_KIND_RISK_LEVEL = 2;
  BLOCKING_CRITERIA_KIND_IS_DETECTED = 3;
  BLOCKING_CRITERIA_KIND_SCORE = 4;
}

Example gRPC Payload

Below is an example of a typical payload for analyzing text and image data:

{
  "buffer": {
    "messages": [
      {
        "messageId": "0",
        "source": "SOURCE_PROMPT",
        "role": "ROLE_USER",
        "contents": [
          {
            "contentIndex": 0,
            "modality": "MODALITY_TEXT",
            "text": "In the ever-evolving landscape of technology, artificial intelligence (AI) has emerged as a transformative force reshaping industries and societies alike. From healthcare to education and finance, the applications of AI are vast and continually expanding. It has not only enhanced productivity but also provided innovative solutions to complex challenges. The journey from rule-based systems to advanced deep learning models reflects AI’s incredible progress over the decades. Powered by massive datasets, improved algorithms, and superior computational resources, AI now permeates every facet of modern life. However, this progress also raises ethical concerns about privacy, fairness, and potential misuse of the technology. In everyday life, AI's presence is undeniable. We interact with voice assistants to manage tasks, rely on recommendation systems to discover products and entertainment, and use navigation apps to optimize travel. These technologies streamline our routines and create new economic opportunities. Despite these benefits, AI still faces significant hurdles. Its inability to fully grasp human emotions and intentions poses challenges for applications requiring nuanced decision-making. To address this, we need robust policies ensuring AI systems are transparent, fair, and accountable. Looking ahead, AI holds immense potential in diverse fields. Autonomous robots could perform hazardous jobs, medical AI could enhance diagnostics with precise imaging analysis, and climate scientists might leverage AI to combat environmental challenges. Yet, these advancements come with responsibilities. AI is a tool, and its impact depends entirely on how it is deployed. Striking a balance between innovation and ethics will be crucial for developers and policymakers alike. The future of AI is undoubtedly bright, but it remains a human endeavor. By fostering international collaboration, sharing advancements, and addressing risks, we can ensure this powerful technology benefits humanity as a whole. Only with a collective effort can we unlock AI’s true potential and shape a better, more equitable future for everyone."
          },
          {
            "contentIndex": 0,
            "modality": "MODALITY_TEXT",
            "text": "In the ever-evolving landscape of technology, artificial intelligence (AI) has emerged as a transformative force reshaping industries and societies alike. From healthcare to education and finance, the applications of AI are vast and continually expanding. It has not only enhanced productivity but also provided innovative solutions to complex challenges. The journey from rule-based systems to advanced deep learning models reflects AI’s incredible progress over the decades. Powered by massive datasets, improved algorithms, and superior computational resources, AI now permeates every facet of modern life. However, this progress also raises ethical concerns about privacy, fairness, and potential misuse of the technology. In everyday life, AI's presence is undeniable. We interact with voice assistants to manage tasks, rely on recommendation systems to discover products and entertainment, and use navigation apps to optimize travel. These technologies streamline our routines and create new economic opportunities. Despite these benefits, AI still faces significant hurdles. Its inability to fully grasp human emotions and intentions poses challenges for applications requiring nuanced decision-making. To address this, we need robust policies ensuring AI systems are transparent, fair, and accountable. Looking ahead, AI holds immense potential in diverse fields. Autonomous robots could perform hazardous jobs, medical AI could enhance diagnostics with precise imaging analysis, and climate scientists might leverage AI to combat environmental challenges. Yet, these advancements come with responsibilities. AI is a tool, and its impact depends entirely on how it is deployed. Striking a balance between innovation and ethics will be crucial for developers and policymakers alike. The future of AI is undoubtedly bright, but it remains a human endeavor. By fostering international collaboration, sharing advancements, and addressing risks, we can ensure this powerful technology benefits humanity as a whole. Only with a collective effort can we unlock AI’s true potential and shape a better, more equitable future for everyone. lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollitlorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollitlorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit"
          }
        ]
      },
      {
        "messageId": "1",
        "source": "SOURCE_PROMPT",
        "role": "ROLE_USER",
        "contents": [
          {
            "contentIndex": 0,
            "modality": "MODALITY_TEXT",
            "text": "lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollitlorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollitlorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit. var password=123;sdasdsdsadsadasdasdaswqwrefvadsfafadsfasdfvcsdluhlu****dhsldhjs*********"
          }
        ]
      }
    ]
  }
}

Payload Structure

The gRPC payload follows a similar structure but allows for streaming messages to support real-time processing. The fields source, messageId, and contentIndex ensure precise tracking of content.

Fields:

Field Type Description
source string The origin of the message. Possible values: PROMPT, COMPLETION.
messageId string Unique identifier for the message.
contentIndex integer Index of the content within the message.
modality string Type of the content. Possible values: TEXT, IMAGE.
text string The text content (for TEXT modality).
imageBase64 string Base64-encoded image data (for IMAGE modality).

Events Returned by the Service

The API responds with three types of events, each providing specific insights into the content analysis process.

1. Analysis Result Event

This event provides detailed feedback about the analyzed content, including harm detection and its severity.

Example:

{
  "analysisResult": {
    "offset": {
      "source": "PROMPT",
      "messageId": "0",
      "contentIndex": 0,
      "startOffset": 0,
      "endOffset": 50
    },
    "harmCategoryTaskResults": [
      {
        "result": "OK",
        "isBlockingCriteriaMet": true,
        "kind": "HARM_CATEGORY",
        "harmCategoryTaskResult": {
          "harmCategory": "HATE",
          "isDetected": true,
          "severity": 3,
          "riskLevel": "HIGH"
        }
      }
    ]
  }
}

Fields:

Field Type Description
offset object Specifies the range of content analyzed.
offset.source string Same as the input source.
offset.messageId string Same as the input messageId.
offset.contentIndex integer Same as the input contentIndex.
offset.startOffset integer UTF-8 start offset for the analyzed content.
offset.endOffset integer UTF-8 end offset for the analyzed content.
harmCategoryTaskResults array Results for harm category detection tasks.
result string Indicates the outcome. Possible values: OK, NoModel.
isBlockingCriteriaMet boolean Whether the content is blocked due to harm detection.
kind string Type of task. Example: HARM_CATEGORY.
harmCategoryTaskResult object Details of the harm detection.
harmCategoryTaskResult.harmCategory string Type of harm detected (e.g., HATE).
harmCategoryTaskResult.isDetected boolean Whether harm was detected.
harmCategoryTaskResult.severity integer Severity level (1-5).
harmCategoryTaskResult.riskLevel string Risk level (e.g., HIGH, LOW).

2. Watermark Event

Indicates that the content up to a specific offset has been analyzed and verified as safe.

Example:

{
  "watermark": {
    "source": "COMPLETION",
    "messageId": "0",
    "contentIndex": 0,
    "offset": 50
  }
}

Fields:

Field Type Description
watermark object Represents content analysis progress and safety status.
source string Same as input source.
messageId string Same as input messageId.
contentIndex integer Same as input contentIndex.
offset integer Maximum offset analyzed and verified as safe.

3. Completion Event

Signals the completion of the content analysis process. Example:

{
  "completion": {
    "end_reason": "END_REASON_END_OF_STREAM",
    "error_description": ""
  }
}

Fields:

Field Type Description
completion object Represents the completion of the analysis stream.
end_reason string Reason for stream completion. Example: END_REASON_END_OF_STREAM.
error_description string Description of any errors encountered (empty if no errors).

Summary

The Standard Protocol Mode-Streaming provides a robust framework for analyzing multi-modal content, enabling real-time insights and flexible integration. By following this guide, developers can efficiently integrate the gRPC API into their systems, ensuring precise and scalable content analysis.