SEMCONV: {
    GEN_AI_PROVIDER_NAME: "gen_ai.provider.name";
    GEN_AI_OPERATION_NAME: "gen_ai.operation.name";
    GEN_AI_AGENT_ID: "gen_ai.agent.id";
    GEN_AI_AGENT_NAME: "gen_ai.agent.name";
    GEN_AI_AGENT_DESCRIPTION: "gen_ai.agent.description";
    GEN_AI_CONVERSATION_ID: "gen_ai.conversation.id";
    GEN_AI_TOOL_NAME: "gen_ai.tool.name";
    GEN_AI_TOOL_DESCRIPTION: "gen_ai.tool.description";
    GEN_AI_TOOL_TYPE: "gen_ai.tool.type";
    GEN_AI_TOOL_CALL_ID: "gen_ai.tool.call.id";
    GEN_AI_TOOL_CALL_ARGUMENTS: "gen_ai.tool.call.arguments";
    GEN_AI_TOOL_CALL_RESULT: "gen_ai.tool.call.result";
    GEN_AI_TOOL_DEFINITIONS: "gen_ai.tool.definitions";
    GEN_AI_REQUEST_MODEL: "gen_ai.request.model";
    GEN_AI_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens";
    GEN_AI_REQUEST_TEMPERATURE: "gen_ai.request.temperature";
    GEN_AI_REQUEST_TOP_P: "gen_ai.request.top_p";
    GEN_AI_REQUEST_TOP_K: "gen_ai.request.top_k";
    GEN_AI_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty";
    GEN_AI_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty";
    GEN_AI_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences";
    GEN_AI_REQUEST_CHOICE_COUNT: "gen_ai.request.choice.count";
    GEN_AI_REQUEST_SEED: "gen_ai.request.seed";
    GEN_AI_RESPONSE_ID: "gen_ai.response.id";
    GEN_AI_RESPONSE_MODEL: "gen_ai.response.model";
    GEN_AI_RESPONSE_FINISH_REASONS: "gen_ai.response.finish_reasons";
    GEN_AI_OUTPUT_TYPE: "gen_ai.output.type";
    GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens";
    GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens";
    GEN_AI_SYSTEM_INSTRUCTIONS: "gen_ai.system_instructions";
    GEN_AI_INPUT_MESSAGES: "gen_ai.input.messages";
    GEN_AI_OUTPUT_MESSAGES: "gen_ai.output.messages";
    SERVER_ADDRESS: "server.address";
    SERVER_PORT: "server.port";
    ERROR_TYPE: "error.type";
    GEN_AI_DATA_SOURCE_ID: "gen_ai.data_source.id";
    GEN_AI_EMBEDDINGS_DIMENSION_COUNT: "gen_ai.embeddings.dimension.count";
    GEN_AI_REQUEST_ENCODING_FORMATS: "gen_ai.request.encoding_formats";
    GEN_AI_SYSTEM: "gen_ai.system";
    GEN_AI_USAGE_TOTAL_TOKENS: "gen_ai.usage.total_tokens";
    GEN_AI_CONTENT_PROMPT: "gen_ai.content.prompt";
    GEN_AI_CONTENT_COMPLETION: "gen_ai.content.completion";
} = ...

Standard OpenTelemetry GenAI semantic conventions Reference: https://opentelemetry.io/docs/specs/semconv/gen-ai/ Version: v1.38.0 (development status)

Type declaration

  • ReadonlyGEN_AI_PROVIDER_NAME: "gen_ai.provider.name"
  • ReadonlyGEN_AI_OPERATION_NAME: "gen_ai.operation.name"
  • ReadonlyGEN_AI_AGENT_ID: "gen_ai.agent.id"
  • ReadonlyGEN_AI_AGENT_NAME: "gen_ai.agent.name"
  • ReadonlyGEN_AI_AGENT_DESCRIPTION: "gen_ai.agent.description"
  • ReadonlyGEN_AI_CONVERSATION_ID: "gen_ai.conversation.id"
  • ReadonlyGEN_AI_TOOL_NAME: "gen_ai.tool.name"
  • ReadonlyGEN_AI_TOOL_DESCRIPTION: "gen_ai.tool.description"
  • ReadonlyGEN_AI_TOOL_TYPE: "gen_ai.tool.type"
  • ReadonlyGEN_AI_TOOL_CALL_ID: "gen_ai.tool.call.id"
  • ReadonlyGEN_AI_TOOL_CALL_ARGUMENTS: "gen_ai.tool.call.arguments"
  • ReadonlyGEN_AI_TOOL_CALL_RESULT: "gen_ai.tool.call.result"
  • ReadonlyGEN_AI_TOOL_DEFINITIONS: "gen_ai.tool.definitions"
  • ReadonlyGEN_AI_REQUEST_MODEL: "gen_ai.request.model"
  • ReadonlyGEN_AI_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens"
  • ReadonlyGEN_AI_REQUEST_TEMPERATURE: "gen_ai.request.temperature"
  • ReadonlyGEN_AI_REQUEST_TOP_P: "gen_ai.request.top_p"
  • ReadonlyGEN_AI_REQUEST_TOP_K: "gen_ai.request.top_k"
  • ReadonlyGEN_AI_REQUEST_FREQUENCY_PENALTY: "gen_ai.request.frequency_penalty"
  • ReadonlyGEN_AI_REQUEST_PRESENCE_PENALTY: "gen_ai.request.presence_penalty"
  • ReadonlyGEN_AI_REQUEST_STOP_SEQUENCES: "gen_ai.request.stop_sequences"
  • ReadonlyGEN_AI_REQUEST_CHOICE_COUNT: "gen_ai.request.choice.count"
  • ReadonlyGEN_AI_REQUEST_SEED: "gen_ai.request.seed"
  • ReadonlyGEN_AI_RESPONSE_ID: "gen_ai.response.id"
  • ReadonlyGEN_AI_RESPONSE_MODEL: "gen_ai.response.model"
  • ReadonlyGEN_AI_RESPONSE_FINISH_REASONS: "gen_ai.response.finish_reasons"
  • ReadonlyGEN_AI_OUTPUT_TYPE: "gen_ai.output.type"
  • ReadonlyGEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens"
  • ReadonlyGEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens"
  • ReadonlyGEN_AI_SYSTEM_INSTRUCTIONS: "gen_ai.system_instructions"
  • ReadonlyGEN_AI_INPUT_MESSAGES: "gen_ai.input.messages"
  • ReadonlyGEN_AI_OUTPUT_MESSAGES: "gen_ai.output.messages"
  • ReadonlySERVER_ADDRESS: "server.address"
  • ReadonlySERVER_PORT: "server.port"
  • ReadonlyERROR_TYPE: "error.type"
  • ReadonlyGEN_AI_DATA_SOURCE_ID: "gen_ai.data_source.id"
  • ReadonlyGEN_AI_EMBEDDINGS_DIMENSION_COUNT: "gen_ai.embeddings.dimension.count"
  • ReadonlyGEN_AI_REQUEST_ENCODING_FORMATS: "gen_ai.request.encoding_formats"
  • ReadonlyGEN_AI_SYSTEM: "gen_ai.system"

    Use GEN_AI_PROVIDER_NAME instead

  • ReadonlyGEN_AI_USAGE_TOTAL_TOKENS: "gen_ai.usage.total_tokens"

    Total tokens should be computed client-side from input + output

  • ReadonlyGEN_AI_CONTENT_PROMPT: "gen_ai.content.prompt"

    Use GEN_AI_INPUT_MESSAGES or structured events instead

  • ReadonlyGEN_AI_CONTENT_COMPLETION: "gen_ai.content.completion"

    Use GEN_AI_OUTPUT_MESSAGES or structured events instead