diff --git a/ai b/ai new file mode 160000 index 0000000..1e7ffc2 --- /dev/null +++ b/ai @@ -0,0 +1 @@ +Subproject commit 1e7ffc2e2e87456c312ebe9ed155d1288c546cdd diff --git a/examples/ai-sdk-example/index.js b/examples/ai-sdk-example/index.js new file mode 100644 index 0000000..28715da --- /dev/null +++ b/examples/ai-sdk-example/index.js @@ -0,0 +1,68 @@ +import 'dotenv/config'; +import { generateText } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { agentops } from 'agentops'; +import { trace } from '@opentelemetry/api'; + +// Enable debug logging +process.env.DEBUG = 'agentops:*'; + +console.log('🚀 Starting AI SDK v5 Example with AgentOps'); + +async function main() { + // Initialize AgentOps + const agentOps = agentops; + + // Skip automatic instrumentation for now and just init the client + try { + console.log('📡 Initializing AgentOps...'); + await agentOps.init({ + apiKey: process.env.AGENTOPS_API_KEY, + serviceName: 'ai-sdk-example', + }); + console.log('✅ AgentOps initialized successfully'); + console.log('📊 AgentOps initialized status:', agentOps.initialized); + } catch (error) { + console.error('❌ Failed to initialize AgentOps:', error.message); + console.log('📝 Note: This example will run without AgentOps instrumentation'); + } + + try { + console.log('🤖 Generating text with OpenAI...'); + + // Use AI SDK directly with manual telemetry + const result = await generateText({ + model: openai('gpt-3.5-turbo'), + prompt: 'What is the meaning of life?', + experimental_telemetry: { + isEnabled: true, + recordInputs: true, + recordOutputs: true, + functionId: 'example.generateText', + metadata: { + 'example.manual_telemetry': true, + 'example.question_type': 'philosophical' + } + } + }); + + console.log('✅ Generation completed!'); + console.log('📝 Generated text:', result.text); + console.log('📊 Usage:', result.usage); + + } catch (error) { + console.error('❌ Error generating text:', error.message); + } + + console.log('🎉 Example completed successfully!'); + console.log('📈 Check your AgentOps dashboard for telemetry data'); + + // Wait a bit to ensure spans are exported + console.log('⏳ Waiting for span export...'); + await new Promise(resolve => setTimeout(resolve, 2000)); + + console.log('👋 AgentOps shutdown complete'); + process.exit(0); +} + +main().catch(console.error); \ No newline at end of file diff --git a/examples/ai-sdk-example/package-lock.json b/examples/ai-sdk-example/package-lock.json new file mode 100644 index 0000000..d054757 --- /dev/null +++ b/examples/ai-sdk-example/package-lock.json @@ -0,0 +1,189 @@ +{ + "name": "basic-ai-sdk-example", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "basic-ai-sdk-example", + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "@ai-sdk/openai": "^2.0.0-beta.9", + "@opentelemetry/api": "^1.9.0", + "agentops": "file:../../", + "ai": "^5.0.0-beta.21", + "dotenv": "^17.2.0" + }, + "devDependencies": { + "@types/node": "^20.0.0" + } + }, + "../..": { + "name": "agentops", + "version": "0.1.0", + "license": "MIT", + "dependencies": { + "@openai/agents": "^0.0.8", + "@opentelemetry/api": "^1.8.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.52.1", + "@opentelemetry/instrumentation": "^0.52.1", + "@opentelemetry/resources": "^1.25.1", + "@opentelemetry/sdk-node": "^0.52.1", + "@opentelemetry/sdk-trace-base": "^1.25.1", + "@opentelemetry/sdk-trace-node": "^1.25.1", + "@opentelemetry/semantic-conventions": "^1.25.1", + "debug": "^4.4.1" + }, + "devDependencies": { + "@types/debug": "^4.1.12", + "@types/jest": "^29.5.14", + "@types/node": "^20.17.57", + "@typescript-eslint/eslint-plugin": "^7.0.0", + "@typescript-eslint/parser": "^7.0.0", + "eslint": "^8.57.0", + "jest": "^29.7.0", + "ts-jest": "^29.1.5", + "typescript": "^5.6.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@ai-sdk/openai": { + "version": "2.0.0-canary.20", + "resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-2.0.0-canary.20.tgz", + "integrity": "sha512-AipaQeOz/nIPTtZLJaqG9sxf8zWqZ1UGLG1QOLhNpWwSBDXPVw5k0cWhLtReuZrL/ncKvL6BrGN9aEZLqcmWAg==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "2.0.0-canary.14", + "@ai-sdk/provider-utils": "3.0.0-canary.19" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.24.0" + } + }, + "node_modules/@ai-sdk/provider": { + "version": "2.0.0-canary.14", + "resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-2.0.0-canary.14.tgz", + "integrity": "sha512-aN83hjdjDCyhkOdulwMsxmGb91owS+bCSe6FWg1TEwusNM35vv020nY//Gid/0NdIpVkZJGzAajgCWrnno2zzA==", + "license": "Apache-2.0", + "dependencies": { + "json-schema": "^0.4.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@ai-sdk/provider-utils": { + "version": "3.0.0-canary.19", + "resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-3.0.0-canary.19.tgz", + "integrity": "sha512-4IJw6/wkWYLYfFYPvCs5go0L/sBRZsIRW1l/R6LniF4WjAH2+R4dMbESgBmzx+Z2+W+W6gFeK8dnQByn7vaA/w==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "2.0.0-canary.14", + "@standard-schema/spec": "^1.0.0", + "zod-to-json-schema": "^3.24.1" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.23.8" + } + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "license": "Apache-2.0", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@standard-schema/spec": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz", + "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==", + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.8", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.8.tgz", + "integrity": "sha512-HzbgCY53T6bfu4tT7Aq3TvViJyHjLjPNaAS3HOuMc9pw97KHsUtXNX4L+wu59g1WnjsZSko35MbEqnO58rihhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/agentops": { + "resolved": "../..", + "link": true + }, + "node_modules/ai": { + "version": "5.0.0-canary.24", + "resolved": "https://registry.npmjs.org/ai/-/ai-5.0.0-canary.24.tgz", + "integrity": "sha512-vqaMmM6XFwjz9mNjox9ehjkWFwXbSchhor5MiqgKZ1qRyoTvoYzAt6oCZwg5kN5jXNQ3rZVuyE8N3BbPbwma2Q==", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/provider": "2.0.0-canary.14", + "@ai-sdk/provider-utils": "3.0.0-canary.19", + "@opentelemetry/api": "1.9.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "zod": "^3.23.8" + } + }, + "node_modules/dotenv": { + "version": "17.2.0", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.2.0.tgz", + "integrity": "sha512-Q4sgBT60gzd0BB0lSyYD3xM4YxrXA9y4uBDof1JNYGzOXrQdQ6yX+7XIAqoFOGQFOTK1D3Hts5OllpxMDZFONQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==", + "license": "(AFL-2.1 OR BSD-3-Clause)" + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", + "peer": true, + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.24.6", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.6.tgz", + "integrity": "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==", + "license": "ISC", + "peerDependencies": { + "zod": "^3.24.1" + } + } + } +} diff --git a/examples/ai-sdk-example/package.json b/examples/ai-sdk-example/package.json new file mode 100644 index 0000000..6c496de --- /dev/null +++ b/examples/ai-sdk-example/package.json @@ -0,0 +1,30 @@ +{ + "name": "basic-ai-sdk-example", + "version": "1.0.0", + "description": "Basic example of AI SDK v5 with AgentOps instrumentation", + "main": "index.js", + "type": "module", + "scripts": { + "start": "node index.js", + "dev": "node --watch index.js" + }, + "dependencies": { + "@ai-sdk/openai": "^2.0.0-beta.9", + "@opentelemetry/api": "^1.9.0", + "agentops": "file:../../", + "ai": "^5.0.0-beta.21", + "dotenv": "^17.2.0" + }, + "devDependencies": { + "@types/node": "^20.0.0" + }, + "keywords": [ + "ai", + "openai", + "agentops", + "telemetry", + "observability" + ], + "author": "AgentOps", + "license": "MIT" +} diff --git a/package-lock.json b/package-lock.json index 6cbe58f..87c4336 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,7 +9,7 @@ "version": "0.1.0", "license": "MIT", "dependencies": { - "@openai/agents": "^0.0.1", + "@openai/agents": "^0.0.8", "@opentelemetry/api": "^1.8.0", "@opentelemetry/exporter-trace-otlp-http": "^0.52.1", "@opentelemetry/instrumentation": "^0.52.1", @@ -1218,9 +1218,9 @@ } }, "node_modules/@modelcontextprotocol/sdk": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.12.3.tgz", - "integrity": "sha512-DyVYSOafBvk3/j1Oka4z5BWT8o4AFmoNyZY9pALOm7Lh3GZglR71Co4r4dEUoqDWdDazIZQHBe7J2Nwkg6gHgQ==", + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.16.0.tgz", + "integrity": "sha512-8ofX7gkZcLj9H9rSd50mCgm3SSF8C7XoclxJuLoV0Cz3rEQ1tv9MZRYYvJtm9n1BiEQQMzSmE/w2AEkNacLYfg==", "license": "MIT", "optional": true, "dependencies": { @@ -1229,6 +1229,7 @@ "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", + "eventsource-parser": "^3.0.0", "express": "^5.0.1", "express-rate-limit": "^7.5.0", "pkce-challenge": "^5.0.0", @@ -1279,22 +1280,22 @@ } }, "node_modules/@openai/agents": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/@openai/agents/-/agents-0.0.1.tgz", - "integrity": "sha512-sIOeusrIjDL6A1vgLdjUINYUSfhxw1SICmpJZj8BYdIetJucnXjXfYZIYpBByv6MTOF8M7nGlSKi0zZEMUSKRA==", + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/@openai/agents/-/agents-0.0.8.tgz", + "integrity": "sha512-HAPP4QM47kWeWw70uxCzr5zjqHuDIvQ8Obx+98J66lcEeIZzMChHN60k5ew8DITScmzDVAVuwdzfAImSyq002w==", "license": "MIT", "dependencies": { - "@openai/agents-core": "0.0.1", - "@openai/agents-openai": "0.0.1", - "@openai/agents-realtime": "0.0.1", + "@openai/agents-core": "0.0.8", + "@openai/agents-openai": "0.0.8", + "@openai/agents-realtime": "0.0.8", "debug": "^4.4.0", "openai": "^5.0.1" } }, "node_modules/@openai/agents-core": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/@openai/agents-core/-/agents-core-0.0.1.tgz", - "integrity": "sha512-15rIIwfvxalqVcY1GfUyZ8BWFcylOoIVG/pqFzoxArPP7E9M73/9gfdqztoEcwg2868RLRWhWOXa4XY2eByklw==", + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/@openai/agents-core/-/agents-core-0.0.8.tgz", + "integrity": "sha512-CMSq4iuvGaYkEAw0Z6oT+EDNgoCQF3YsYky29fbLDA6W3uuR53D2l6XzikAh0xwJUeuGZ7jQ1PsAxxg/hAW68A==", "license": "MIT", "dependencies": { "@openai/zod": "npm:zod@^3.25.40", @@ -1314,24 +1315,25 @@ } }, "node_modules/@openai/agents-openai": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/@openai/agents-openai/-/agents-openai-0.0.1.tgz", - "integrity": "sha512-/MDFPSi3ocQoogbInQSTJQHki2Plfl/gKgPMDifPB0zIcG3nP0MmmtVe3Ts64t4jTGZpmaaQ+JsY+KAv5wJKow==", + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/@openai/agents-openai/-/agents-openai-0.0.8.tgz", + "integrity": "sha512-VUsUOXNkqsjQv1EwxyjYWoiACCsaQ8OlHtQAmw2jo6rNeHzEsGF7WLhqwDAzRDwZOVPwo4aF54iIcANeysywEg==", "license": "MIT", "dependencies": { - "@openai/agents-core": "0.0.1", + "@openai/agents-core": "0.0.8", "@openai/zod": "npm:zod@^3.25.40", "debug": "^4.4.0", "openai": "^5.0.1" } }, "node_modules/@openai/agents-realtime": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/@openai/agents-realtime/-/agents-realtime-0.0.1.tgz", - "integrity": "sha512-+0ApAhtGVKndIVcW/EZby9xgvUvmK7/vg1a+wSFm7ZCcWCPGViwDzKKw2xLnn5KBHlxnpOZE+VIW9FZoEFUewQ==", + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/@openai/agents-realtime/-/agents-realtime-0.0.8.tgz", + "integrity": "sha512-f+CxHICIFvCwbMCznop+bz+TTgnFfFpscN+9OTfiU5ITnaohRf+qbyU8PRgQZnSbsxRZyTOgqFoJ+2wWxM5tHA==", "license": "MIT", "dependencies": { - "@openai/agents-core": "0.0.1", + "@openai/agents-core": "0.0.8", + "@openai/zod": "npm:zod@^3.25.40", "@types/ws": "^8.18.1", "debug": "^4.4.0", "ws": "^8.18.1" @@ -1339,9 +1341,9 @@ }, "node_modules/@openai/zod": { "name": "zod", - "version": "3.25.67", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz", - "integrity": "sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==", + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", "license": "MIT", "funding": { "url": "https://github.com/sponsors/colinhacks" @@ -3787,13 +3789,13 @@ } }, "node_modules/eventsource-parser": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.2.tgz", - "integrity": "sha512-6RxOBZ/cYgd8usLwsEl+EC09Au/9BcmCKYF2/xbml6DNczf7nv0MQb+7BA2F+li6//I+28VNlQR37XfQtcAJuA==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.3.tgz", + "integrity": "sha512-nVpZkTMM9rF6AQ9gPJpFsNAMt48wIzB5TQgiTLdHiuO8XEDhUgZEhqKlZWXbIzo9VmJ/HvysHqEaVeD5v9TPvA==", "license": "MIT", "optional": true, "engines": { - "node": ">=18.0.0" + "node": ">=20.0.0" } }, "node_modules/execa": { @@ -3890,9 +3892,9 @@ } }, "node_modules/express-rate-limit": { - "version": "7.5.0", - "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-7.5.0.tgz", - "integrity": "sha512-eB5zbQh5h+VenMPM3fh+nw1YExi5nMr6HUCR62ELSP11huvxm/Uir1H1QEyTkk5QX6A58pX6NmaTMceKZ0Eodg==", + "version": "7.5.1", + "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-7.5.1.tgz", + "integrity": "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw==", "license": "MIT", "optional": true, "engines": { @@ -3902,7 +3904,7 @@ "url": "https://github.com/sponsors/express-rate-limit" }, "peerDependencies": { - "express": "^4.11 || 5 || ^5.0.0-beta.1" + "express": ">= 4.11" } }, "node_modules/fast-deep-equal": { @@ -5786,9 +5788,9 @@ } }, "node_modules/openai": { - "version": "5.5.1", - "resolved": "https://registry.npmjs.org/openai/-/openai-5.5.1.tgz", - "integrity": "sha512-5i19097mGotHA1eFsM6Tjd/tJ8uo9sa5Ysv4Q6bKJ2vtN6rc0MzMrUefXnLXYAJcmMQrC1Efhj0AvfIkXrQamw==", + "version": "5.10.1", + "resolved": "https://registry.npmjs.org/openai/-/openai-5.10.1.tgz", + "integrity": "sha512-fq6xVfv1/gpLbsj8fArEt3b6B9jBxdhAK+VJ+bDvbUvNd+KTLlA3bnDeYZaBsGH9LUhJ1M1yXfp9sEyBLMx6eA==", "license": "Apache-2.0", "bin": { "openai": "bin/cli" @@ -7166,9 +7168,9 @@ } }, "node_modules/ws": { - "version": "8.18.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz", - "integrity": "sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==", + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", "license": "MIT", "engines": { "node": ">=10.0.0" @@ -7243,9 +7245,9 @@ } }, "node_modules/zod": { - "version": "3.25.67", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.67.tgz", - "integrity": "sha512-idA2YXwpCdqUSKRCACDE6ItZD9TZzy3OZMtpfLoh6oPR47lipysRrJfjzMqFxQ3uJuUPyUeWe1r9vLH33xO/Qw==", + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", "license": "MIT", "optional": true, "funding": { @@ -7253,9 +7255,9 @@ } }, "node_modules/zod-to-json-schema": { - "version": "3.24.5", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.5.tgz", - "integrity": "sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==", + "version": "3.24.6", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.6.tgz", + "integrity": "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==", "license": "ISC", "optional": true, "peerDependencies": { diff --git a/src/instrumentation/ai-sdk/exporter.ts b/src/instrumentation/ai-sdk/exporter.ts new file mode 100644 index 0000000..a18ed5e --- /dev/null +++ b/src/instrumentation/ai-sdk/exporter.ts @@ -0,0 +1,296 @@ +import { SpanExporter } from '@opentelemetry/sdk-trace-base'; +import { ReadableSpan } from '@opentelemetry/sdk-trace-base'; +import { ExportResult, ExportResultCode } from '@opentelemetry/core'; +import { + GEN_AI_SYSTEM, + GEN_AI_REQUEST_MODEL, + GEN_AI_REQUEST_TEMPERATURE, + GEN_AI_REQUEST_MAX_TOKENS, + GEN_AI_REQUEST_TOP_P, + GEN_AI_REQUEST_TOP_K, + GEN_AI_REQUEST_FREQUENCY_PENALTY, + GEN_AI_REQUEST_PRESENCE_PENALTY, + GEN_AI_REQUEST_STOP_SEQUENCES, + GEN_AI_REQUEST_STREAMING, + GEN_AI_RESPONSE_MODEL, + GEN_AI_RESPONSE_ID, + GEN_AI_RESPONSE_FINISH_REASON, + GEN_AI_USAGE_PROMPT_TOKENS, + GEN_AI_USAGE_COMPLETION_TOKENS, + GEN_AI_USAGE_TOTAL_TOKENS, + GEN_AI_STREAMING_TIME_TO_FIRST_TOKEN, + GEN_AI_STREAMING_TIME_TO_GENERATE, + GEN_AI_STREAMING_CHUNK_COUNT, +} from '../../semconv/gen_ai'; +import { + GEN_AI_PROMPT_ROLE, + GEN_AI_PROMPT_CONTENT, +} from '../../semconv/messages'; + +const debug = require('debug')('agentops:instrumentation:ai-sdk:exporter'); + +/** + * Custom exporter that transforms AI SDK attributes to gen_ai conventions. + * + * **Why This Approach:** + * + * The AI SDK has excellent built-in OpenTelemetry support that automatically creates spans + * with comprehensive telemetry data when `experimental_telemetry` is enabled. However, + * it uses `ai.*` attribute naming conventions instead of the standard `gen_ai.*` semantic + * conventions defined by OpenTelemetry. + * + * Traditional function patching is impossible with the AI SDK because: + * - Functions are exported as getter-only properties + * - Properties are non-configurable and cannot be redefined + * - Attempting to patch throws: `TypeError: Cannot redefine property: generateText` + * + * **Solution:** + * + * This exporter intercepts spans at the export stage and transforms all `ai.*` attributes + * to proper `gen_ai.*` semantic conventions, then removes the original `ai.*` attributes. + * This ensures that only compliant `gen_ai.*` attributes are sent to AgentOps. + * + * **Benefits:** + * - Works with ALL AI SDK functions without needing individual patches + * - Robust against AI SDK version changes + * - Maintains full compatibility with AI SDK's built-in telemetry + * - Ensures proper semantic convention compliance + * + * **Process:** + * 1. Wraps the base AgentOps exporter + * 2. Intercepts spans before export + * 3. Identifies AI SDK spans by attribute patterns + * 4. Maps `ai.*` attributes to `gen_ai.*` conventions + * 5. Removes original `ai.*` attributes + * 6. Forwards transformed spans to the wrapped exporter + */ +export class AISDKExporter implements SpanExporter { + constructor(private readonly wrappedExporter: SpanExporter) {} + + /** + * Exports spans after transforming AI SDK attributes. + */ + export(spans: ReadableSpan[], resultCallback: (result: ExportResult) => void): void { + const transformedSpans = spans.map(span => this.transformSpan(span)); + this.wrappedExporter.export(transformedSpans, resultCallback); + } + + /** + * Shuts down the wrapped exporter. + */ + shutdown(): Promise { + return this.wrappedExporter.shutdown(); + } + + /** + * Forces flush on the wrapped exporter. + */ + forceFlush(): Promise { + if (this.wrappedExporter.forceFlush) { + return this.wrappedExporter.forceFlush(); + } + return Promise.resolve(); + } + + /** + * Transforms a span to use only gen_ai attributes. + */ + private transformSpan(span: ReadableSpan): ReadableSpan { + // Check if this is an AI SDK span + if (!this.isAISDKSpan(span)) { + return span; + } + + debug(`Transforming AI SDK span: ${span.name}`); + + // Create a new attributes object with transformed attributes + const originalAttributes = span.attributes; + const transformedAttributes = this.transformAttributes(originalAttributes); + + // Create a new span object that preserves the original prototype and methods + const transformedSpan = Object.create(Object.getPrototypeOf(span)); + Object.assign(transformedSpan, span); + transformedSpan.attributes = transformedAttributes; + + debug(`Transformed ${Object.keys(originalAttributes).length} attributes to ${Object.keys(transformedAttributes).length} attributes`); + + return transformedSpan; + } + + /** + * Checks if a span is from the AI SDK. + */ + private isAISDKSpan(span: ReadableSpan): boolean { + const attributes = span.attributes; + + // Check for AI SDK specific attributes + return Object.keys(attributes).some(key => + key.startsWith('ai.') || + key.startsWith('gen_ai.') || + span.name.includes('ai.') || + span.instrumentationLibrary?.name?.includes('ai') + ); + } + + /** + * Transforms attributes from AI SDK format to gen_ai format. + */ + private transformAttributes(attributes: Record): Record { + const transformed: Record = {}; + + // First, copy all non-AI attributes + for (const [key, value] of Object.entries(attributes)) { + if (!key.startsWith('ai.')) { + transformed[key] = value; + } + } + + // Transform AI SDK attributes to gen_ai attributes + this.mapBasicAttributes(attributes, transformed); + this.mapPromptMessages(attributes, transformed); + this.mapToolCalls(attributes, transformed); + this.mapRequestSettings(attributes, transformed); + this.mapResponseData(attributes, transformed); + this.mapUsageMetrics(attributes, transformed); + this.mapStreamingMetrics(attributes, transformed); + + return transformed; + } + + /** + * Maps basic AI SDK attributes to gen_ai conventions. + */ + private mapBasicAttributes(attributes: Record, transformed: Record): void { + const basicMapping: Record = { + 'ai.model.provider': GEN_AI_SYSTEM, + 'ai.model.id': GEN_AI_REQUEST_MODEL, + 'ai.response.model': GEN_AI_RESPONSE_MODEL, + 'ai.response.id': GEN_AI_RESPONSE_ID, + }; + + for (const [aiKey, genAiKey] of Object.entries(basicMapping)) { + if (attributes[aiKey] !== undefined) { + transformed[genAiKey] = attributes[aiKey]; + } + } + + // Handle finish reason (convert to array format) + if (attributes['ai.response.finishReason'] !== undefined) { + transformed[GEN_AI_RESPONSE_FINISH_REASON] = [attributes['ai.response.finishReason']]; + } + } + + /** + * Maps prompt messages to indexed semantic conventions. + */ + private mapPromptMessages(attributes: Record, transformed: Record): void { + if (attributes['ai.prompt.messages'] && Array.isArray(attributes['ai.prompt.messages'])) { + const messages = attributes['ai.prompt.messages']; + + messages.forEach((message: any, index: number) => { + if (message.role) { + transformed[GEN_AI_PROMPT_ROLE.replace('{i}', index.toString())] = message.role; + } + if (message.content) { + transformed[GEN_AI_PROMPT_CONTENT.replace('{i}', index.toString())] = + typeof message.content === 'string' ? message.content : JSON.stringify(message.content); + } + }); + } + } + + /** + * Maps tool calls to indexed semantic conventions. + */ + private mapToolCalls(attributes: Record, transformed: Record): void { + if (attributes['ai.request.tools'] && Array.isArray(attributes['ai.request.tools'])) { + const tools = attributes['ai.request.tools']; + + tools.forEach((tool: any, index: number) => { + if (tool.name) { + transformed[`gen_ai.request.tools.${index}.name`] = tool.name; + } + if (tool.parameters) { + transformed[`gen_ai.request.tools.${index}.arguments`] = JSON.stringify(tool.parameters); + } + }); + } + } + + /** + * Maps request settings to gen_ai conventions. + */ + private mapRequestSettings(attributes: Record, transformed: Record): void { + const settingsMapping: Record = { + 'ai.request.temperature': GEN_AI_REQUEST_TEMPERATURE, + 'ai.request.maxTokens': GEN_AI_REQUEST_MAX_TOKENS, + 'ai.request.topP': GEN_AI_REQUEST_TOP_P, + 'ai.request.topK': GEN_AI_REQUEST_TOP_K, + 'ai.request.frequencyPenalty': GEN_AI_REQUEST_FREQUENCY_PENALTY, + 'ai.request.presencePenalty': GEN_AI_REQUEST_PRESENCE_PENALTY, + 'ai.request.stopSequences': GEN_AI_REQUEST_STOP_SEQUENCES, + }; + + for (const [aiKey, genAiKey] of Object.entries(settingsMapping)) { + if (attributes[aiKey] !== undefined) { + transformed[genAiKey] = attributes[aiKey]; + } + } + + // Handle streaming boolean + if (attributes['ai.request.streaming'] !== undefined) { + transformed[GEN_AI_REQUEST_STREAMING] = attributes['ai.request.streaming']; + } + } + + /** + * Maps response data to gen_ai conventions. + */ + private mapResponseData(attributes: Record, transformed: Record): void { + // Response text/content is typically already in gen_ai format + // or handled by the AI SDK's built-in telemetry + + // Handle response metadata + if (attributes['ai.response.text']) { + transformed['gen_ai.response.text'] = attributes['ai.response.text']; + } + + if (attributes['ai.response.timestamp']) { + transformed['gen_ai.response.timestamp'] = attributes['ai.response.timestamp']; + } + } + + /** + * Maps usage metrics to gen_ai conventions. + */ + private mapUsageMetrics(attributes: Record, transformed: Record): void { + const usageMapping: Record = { + 'ai.usage.promptTokens': GEN_AI_USAGE_PROMPT_TOKENS, + 'ai.usage.completionTokens': GEN_AI_USAGE_COMPLETION_TOKENS, + 'ai.usage.totalTokens': GEN_AI_USAGE_TOTAL_TOKENS, + }; + + for (const [aiKey, genAiKey] of Object.entries(usageMapping)) { + if (attributes[aiKey] !== undefined) { + transformed[genAiKey] = attributes[aiKey]; + } + } + } + + /** + * Maps streaming metrics to gen_ai conventions. + */ + private mapStreamingMetrics(attributes: Record, transformed: Record): void { + const streamingMapping: Record = { + 'ai.streaming.timeToFirstToken': GEN_AI_STREAMING_TIME_TO_FIRST_TOKEN, + 'ai.streaming.timeToGenerate': GEN_AI_STREAMING_TIME_TO_GENERATE, + 'ai.streaming.chunkCount': GEN_AI_STREAMING_CHUNK_COUNT, + }; + + for (const [aiKey, genAiKey] of Object.entries(streamingMapping)) { + if (attributes[aiKey] !== undefined) { + transformed[genAiKey] = attributes[aiKey]; + } + } + } +} \ No newline at end of file diff --git a/src/instrumentation/ai-sdk/index.ts b/src/instrumentation/ai-sdk/index.ts new file mode 100644 index 0000000..29fc619 --- /dev/null +++ b/src/instrumentation/ai-sdk/index.ts @@ -0,0 +1,79 @@ +import { InstrumentationBase } from '../base'; +import { InstrumentorMetadata } from '../../types'; + +export const debug = require('debug')('agentops:instrumentation:ai-sdk'); + +/** + * Instrumentation for the AI SDK by Vercel. + * + * The AI SDK has excellent built-in OpenTelemetry support that automatically creates spans + * with comprehensive telemetry data. However, it uses `ai.*` attribute naming conventions + * instead of the standard `gen_ai.*` semantic conventions. + * + * **Why This Approach:** + * + * 1. **Function Patching is Impossible**: The AI SDK exports functions as getter-only properties + * that cannot be redefined or patched. Attempting to patch them throws: + * `TypeError: Cannot redefine property: generateText` + * + * 2. **Exporter-Based Transformation**: Instead of patching, we use a custom exporter that + * intercepts spans before they're sent to AgentOps and transforms all `ai.*` attributes + * to proper `gen_ai.*` semantic conventions. + * + * 3. **Comprehensive Coverage**: This approach works for ALL AI SDK functions and any future + * additions without needing to patch individual functions. + * + * **Supported Functions:** + * - generateText() + * - generateObject() + * - streamText() + * - streamObject() + * - embed() + * - embedMany() + * - Tool calls within these functions + * + * **Transformed Attributes:** + * - `ai.model.provider` → `gen_ai.system` + * - `ai.model.id` → `gen_ai.request.model` + * - `ai.usage.promptTokens` → `gen_ai.usage.input_tokens` + * - `ai.usage.completionTokens` → `gen_ai.usage.output_tokens` + * - `ai.response.finishReason` → `gen_ai.response.finish_reasons` + * - And many more... + * + * The actual transformation logic is handled by `AISDKExporter` in the tracing core. + */ +export class AISDKInstrumentation extends InstrumentationBase { + static readonly metadata: InstrumentorMetadata = { + name: 'ai-sdk-instrumentation', + version: '1.0.0', + description: 'Instrumentation for AI SDK by Vercel - Uses exporter-based attribute transformation', + targetLibrary: 'ai', + targetVersions: ['*'] + }; + static readonly useRuntimeTargeting = true; + + /** + * Setup is intentionally minimal since the AI SDK's built-in telemetry handles span creation. + * The actual attribute transformation happens in the AISDKExporter. + */ + protected setup(moduleExports: any, moduleVersion?: string): any { + debug('AI SDK instrumentation registered - using exporter-based attribute transformation'); + + // Enable AI SDK's built-in telemetry if not already enabled + if (moduleExports.experimental_telemetry) { + debug('AI SDK telemetry already enabled'); + } else { + debug('AI SDK telemetry not found - spans will still be processed by exporter'); + } + + return moduleExports; + } + + /** + * Teardown is minimal since we don't patch any functions. + */ + protected teardown(moduleExports: any, moduleVersion?: string): any { + debug('AI SDK instrumentation teardown - no cleanup needed'); + return moduleExports; + } +} \ No newline at end of file diff --git a/src/instrumentation/index.ts b/src/instrumentation/index.ts index 0f4cd3e..e1c8794 100644 --- a/src/instrumentation/index.ts +++ b/src/instrumentation/index.ts @@ -1,9 +1,11 @@ import { InstrumentationBase } from './base'; import { TestInstrumentation } from './test-instrumentation'; import { OpenAIAgentsInstrumentation } from './openai-agents'; +import { AISDKInstrumentation } from './ai-sdk'; // registry of all available instrumentors export const AVAILABLE_INSTRUMENTORS: (typeof InstrumentationBase)[] = [ TestInstrumentation, OpenAIAgentsInstrumentation, + AISDKInstrumentation, ]; diff --git a/src/instrumentation/openai-agents/audio.ts b/src/instrumentation/openai-agents/audio.ts index e54f5ce..0c46d75 100644 --- a/src/instrumentation/openai-agents/audio.ts +++ b/src/instrumentation/openai-agents/audio.ts @@ -8,7 +8,7 @@ import { import { GEN_AI_REQUEST_MODEL, GEN_AI_RESPONSE_MODEL -} from '../../semconv/model'; +} from '../../semconv/gen_ai'; import { extractAttributesFromMapping, AttributeMap diff --git a/src/instrumentation/openai-agents/generation.ts b/src/instrumentation/openai-agents/generation.ts index 17d751a..c1cfbe1 100644 --- a/src/instrumentation/openai-agents/generation.ts +++ b/src/instrumentation/openai-agents/generation.ts @@ -8,11 +8,11 @@ import { GEN_AI_REQUEST_PRESENCE_PENALTY, GEN_AI_REQUEST_STOP_SEQUENCES, GEN_AI_RESPONSE_MODEL, - GEN_AI_RESPONSE_FINISH_REASONS, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, + GEN_AI_RESPONSE_FINISH_REASON, + GEN_AI_USAGE_PROMPT_TOKENS, + GEN_AI_USAGE_COMPLETION_TOKENS, GEN_AI_USAGE_TOTAL_TOKENS -} from '../../semconv/model'; +} from '../../semconv/gen_ai'; import { GEN_AI_PROMPT_ROLE, GEN_AI_PROMPT_CONTENT, @@ -74,8 +74,8 @@ const MODEL_CONFIG_ATTRIBUTES: AttributeMap = { }; const USAGE_ATTRIBUTES: AttributeMap = { - [GEN_AI_USAGE_INPUT_TOKENS]: 'prompt_tokens', - [GEN_AI_USAGE_OUTPUT_TOKENS]: 'completion_tokens', + [GEN_AI_USAGE_PROMPT_TOKENS]: 'prompt_tokens', + [GEN_AI_USAGE_COMPLETION_TOKENS]: 'completion_tokens', [GEN_AI_USAGE_TOTAL_TOKENS]: 'total_tokens' }; diff --git a/src/instrumentation/openai-agents/response.ts b/src/instrumentation/openai-agents/response.ts index ae73027..0fa3af6 100644 --- a/src/instrumentation/openai-agents/response.ts +++ b/src/instrumentation/openai-agents/response.ts @@ -6,10 +6,10 @@ import { import { GEN_AI_REQUEST_MODEL, GEN_AI_RESPONSE_MODEL, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, + GEN_AI_USAGE_PROMPT_TOKENS, + GEN_AI_USAGE_COMPLETION_TOKENS, GEN_AI_USAGE_TOTAL_TOKENS -} from '../../semconv/model'; +} from '../../semconv/gen_ai'; import { GEN_AI_PROMPT_ROLE, GEN_AI_PROMPT_CONTENT, @@ -66,8 +66,8 @@ const RESPONSE_MODEL_ATTRIBUTES: AttributeMap = { }; const RESPONSE_USAGE_ATTRIBUTES: AttributeMap = { - [GEN_AI_USAGE_INPUT_TOKENS]: 'input_tokens', - [GEN_AI_USAGE_OUTPUT_TOKENS]: 'output_tokens', + [GEN_AI_USAGE_PROMPT_TOKENS]: 'input_tokens', + [GEN_AI_USAGE_COMPLETION_TOKENS]: 'output_tokens', [GEN_AI_USAGE_TOTAL_TOKENS]: 'total_tokens' }; @@ -126,15 +126,15 @@ export function convertResponseSpan(data: ResponseSpanData): AttributeMap { } // _response was added with https://github.com/openai/openai-agents-js/pull/85 - if (data._response) { + if ((data as any)._response) { Object.assign(attributes, - extractAttributesFromMapping(data._response, RESPONSE_MODEL_ATTRIBUTES)); + extractAttributesFromMapping((data as any)._response, RESPONSE_MODEL_ATTRIBUTES)); Object.assign(attributes, - extractAttributesFromMapping(data._response.usage, RESPONSE_USAGE_ATTRIBUTES)); + extractAttributesFromMapping((data as any)._response.usage, RESPONSE_USAGE_ATTRIBUTES)); const completions = []; - if (Array.isArray(data._response.output)) { - for (const item of data._response.output) { + if (Array.isArray((data as any)._response.output)) { + for (const item of (data as any)._response.output) { switch (item.type) { case 'message': { // ResponseOutputMessage for (const contentItem of item.content || []) { @@ -195,6 +195,7 @@ export function convertResponseSpan(data: ResponseSpanData): AttributeMap { } } } + } if (completions.length > 0) { Object.assign(attributes, @@ -203,5 +204,4 @@ export function convertResponseSpan(data: ResponseSpanData): AttributeMap { } return attributes; -} - +} \ No newline at end of file diff --git a/src/instrumentation/test-instrumentation.ts b/src/instrumentation/test-instrumentation.ts index ba08ffb..23e7ef3 100644 --- a/src/instrumentation/test-instrumentation.ts +++ b/src/instrumentation/test-instrumentation.ts @@ -1,35 +1,30 @@ +import { SpanKind, SpanStatusCode } from '@opentelemetry/api'; import { InstrumentationBase } from './base'; -import { trace, SpanKind, SpanStatusCode } from '@opentelemetry/api'; import { InstrumentorMetadata } from '../types'; import { GEN_AI_REQUEST_MODEL, GEN_AI_REQUEST_MAX_TOKENS, GEN_AI_REQUEST_TEMPERATURE, GEN_AI_RESPONSE_MODEL, - GEN_AI_USAGE_INPUT_TOKENS, - GEN_AI_USAGE_OUTPUT_TOKENS, + GEN_AI_USAGE_PROMPT_TOKENS, + GEN_AI_USAGE_COMPLETION_TOKENS, GEN_AI_USAGE_TOTAL_TOKENS -} from '../semconv/model'; -import { - GEN_AI_PROMPT_ROLE, - GEN_AI_PROMPT_CONTENT, - GEN_AI_COMPLETION_ROLE, - GEN_AI_COMPLETION_CONTENT -} from '../semconv/messages'; +} from '../semconv/gen_ai'; /** - * Test instrumenter that generates sample spans without instrumenting any real libraries. - * Useful for verifying that span generation and export works correctly. + * Test instrumentation for agentops-test-lib module. + * This is used for testing the instrumentation system. */ export class TestInstrumentation extends InstrumentationBase { static readonly metadata: InstrumentorMetadata = { name: 'test-instrumentation', version: '1.0.0', - description: 'Test instrumentation for generating sample spans', + description: 'Test instrumentation for agentops-test-lib', targetLibrary: 'agentops-test-lib', targetVersions: ['*'] }; + static readonly useRuntimeTargeting = true; protected setup(moduleExports: any, moduleVersion?: string): any { console.log('Test instrumentation enabled - patching agentops-test-lib'); @@ -71,8 +66,8 @@ export class TestInstrumentation extends InstrumentationBase { 'gen_ai.completion.0.content': result.text, // Usage tokens - [GEN_AI_USAGE_INPUT_TOKENS]: result.usage.promptTokens, - [GEN_AI_USAGE_OUTPUT_TOKENS]: result.usage.completionTokens, + [GEN_AI_USAGE_PROMPT_TOKENS]: result.usage.promptTokens, + [GEN_AI_USAGE_COMPLETION_TOKENS]: result.usage.completionTokens, [GEN_AI_USAGE_TOTAL_TOKENS]: result.usage.totalTokens, }; console.log('[test-instrumentation] Adding response attributes:', responseAttributes); diff --git a/src/semconv/ai.ts b/src/semconv/ai.ts new file mode 100644 index 0000000..0aa639a --- /dev/null +++ b/src/semconv/ai.ts @@ -0,0 +1,60 @@ +/** + * AI SDK specific semantic conventions. + * These conventions are specific to the AI SDK by Vercel and should only be used + * for AI SDK instrumentations. For general AI conventions, use gen_ai.ts. + */ + +// AI SDK Operation identification +export const AI_OPERATION_NAME = 'ai.operation.name'; +export const AI_OPERATION_TYPE = 'ai.operation.type'; +export const AI_OPERATION_ID = 'ai.operation.id'; + +// AI SDK Generation attributes +export const AI_GENERATION_TYPE = 'ai.generation.type'; +export const AI_GENERATION_MODE = 'ai.generation.mode'; +export const AI_GENERATION_STREAMING = 'ai.generation.streaming'; + +// AI SDK Model attributes (specific to AI SDK) +export const AI_MODEL_PROVIDER = 'ai.model.provider'; +export const AI_MODEL_ID = 'ai.model.id'; +export const AI_MODEL_VERSION = 'ai.model.version'; + +// AI SDK Response attributes (specific to AI SDK) +export const AI_RESPONSE_TIMESTAMP = 'ai.response.timestamp'; +export const AI_RESPONSE_TEXT = 'ai.response.text'; +export const AI_RESPONSE_OBJECT = 'ai.response.object'; + +// AI SDK Embedding attributes +export const AI_EMBEDDING_MODEL = 'ai.embedding.model'; +export const AI_EMBEDDING_DIMENSIONS = 'ai.embedding.dimensions'; +export const AI_EMBEDDING_INPUT = 'ai.embedding.input'; +export const AI_EMBEDDING_OUTPUT = 'ai.embedding.output'; +export const AI_EMBEDDING_USAGE_TOKENS = 'ai.embedding.usage.tokens'; + +// AI SDK Schema attributes (for structured output) +export const AI_SCHEMA_NAME = 'ai.schema.name'; +export const AI_SCHEMA_DESCRIPTION = 'ai.schema.description'; +export const AI_SCHEMA_TYPE = 'ai.schema.type'; +export const AI_SCHEMA_DEFINITION = 'ai.schema.definition'; + +// AI SDK Stream attributes +export const AI_STREAM_TYPE = 'ai.stream.type'; +export const AI_STREAM_CHUNK_COUNT = 'ai.stream.chunk_count'; +export const AI_STREAM_FIRST_CHUNK_TIME = 'ai.stream.first_chunk_time'; +export const AI_STREAM_LAST_CHUNK_TIME = 'ai.stream.last_chunk_time'; + +// AI SDK Telemetry attributes +export const AI_TELEMETRY_FUNCTION_ID = 'ai.telemetry.function_id'; +export const AI_TELEMETRY_METADATA = 'ai.telemetry.metadata'; +export const AI_TELEMETRY_RECORD_INPUTS = 'ai.telemetry.record_inputs'; +export const AI_TELEMETRY_RECORD_OUTPUTS = 'ai.telemetry.record_outputs'; + +// AI SDK Provider attributes +export const AI_PROVIDER_NAME = 'ai.provider.name'; +export const AI_PROVIDER_VERSION = 'ai.provider.version'; +export const AI_PROVIDER_METADATA = 'ai.provider.metadata'; + +// AI SDK Settings attributes +export const AI_SETTINGS_MAX_RETRIES = 'ai.settings.max_retries'; +export const AI_SETTINGS_TIMEOUT = 'ai.settings.timeout'; +export const AI_SETTINGS_ABORT_SIGNAL = 'ai.settings.abort_signal'; \ No newline at end of file diff --git a/src/semconv/core.ts b/src/semconv/core.ts new file mode 100644 index 0000000..650fe3f --- /dev/null +++ b/src/semconv/core.ts @@ -0,0 +1,24 @@ +/** + * Core attributes applicable to all spans. + * Based on AgentOps Python SDK core conventions. + */ + +// Error attributes +export const ERROR_TYPE = 'error.type'; +export const ERROR_MESSAGE = 'error.message'; + +// AgentOps specific +export const AGENTOPS_TAGS = 'agentops.tags'; + +// Trace context attributes +export const TRACE_ID = 'trace.id'; +export const SPAN_ID = 'span.id'; +export const PARENT_ID = 'parent.id'; +export const GROUP_ID = 'group.id'; + +// Operation attributes +export const OPERATION_NAME = 'operation.name'; +export const OPERATION_VERSION = 'operation.version'; + +// Session/Trace attributes +export const AGENTOPS_SESSION_END_STATE = 'agentops.session.end_state'; \ No newline at end of file diff --git a/src/semconv/gen_ai.ts b/src/semconv/gen_ai.ts new file mode 100644 index 0000000..bc5ae6d --- /dev/null +++ b/src/semconv/gen_ai.ts @@ -0,0 +1,84 @@ +/** + * General AI semantic conventions based on OpenTelemetry GenAI semantic conventions. + * These conventions are framework-agnostic and should be used across all AI instrumentations. + * Based on AgentOps Python SDK span_attributes.py + */ + +// System +export const GEN_AI_SYSTEM = 'gen_ai.system'; + +// Request attributes +export const GEN_AI_REQUEST_MODEL = 'gen_ai.request.model'; +export const GEN_AI_REQUEST_MAX_TOKENS = 'gen_ai.request.max_tokens'; +export const GEN_AI_REQUEST_TEMPERATURE = 'gen_ai.request.temperature'; +export const GEN_AI_REQUEST_TOP_P = 'gen_ai.request.top_p'; +export const GEN_AI_REQUEST_TOP_K = 'gen_ai.request.top_k'; +export const GEN_AI_REQUEST_SEED = 'gen_ai.request.seed'; +export const GEN_AI_REQUEST_SYSTEM_INSTRUCTION = 'gen_ai.request.system_instruction'; +export const GEN_AI_REQUEST_CANDIDATE_COUNT = 'gen_ai.request.candidate_count'; +export const GEN_AI_REQUEST_STOP_SEQUENCES = 'gen_ai.request.stop_sequences'; +export const GEN_AI_REQUEST_TYPE = 'gen_ai.request.type'; +export const GEN_AI_REQUEST_STREAMING = 'gen_ai.request.streaming'; +export const GEN_AI_REQUEST_FREQUENCY_PENALTY = 'gen_ai.request.frequency_penalty'; +export const GEN_AI_REQUEST_PRESENCE_PENALTY = 'gen_ai.request.presence_penalty'; +export const GEN_AI_REQUEST_FUNCTIONS = 'gen_ai.request.functions'; +export const GEN_AI_REQUEST_HEADERS = 'gen_ai.request.headers'; +export const GEN_AI_REQUEST_INSTRUCTIONS = 'gen_ai.request.instructions'; +export const GEN_AI_REQUEST_VOICE = 'gen_ai.request.voice'; +export const GEN_AI_REQUEST_SPEED = 'gen_ai.request.speed'; + +// Content +export const GEN_AI_PROMPT = 'gen_ai.prompt'; +export const GEN_AI_COMPLETION = 'gen_ai.completion'; +export const GEN_AI_COMPLETION_CHUNK = 'gen_ai.completion.chunk'; + +// Response attributes +export const GEN_AI_RESPONSE_MODEL = 'gen_ai.response.model'; +export const GEN_AI_RESPONSE_FINISH_REASON = 'gen_ai.response.finish_reason'; +export const GEN_AI_RESPONSE_STOP_REASON = 'gen_ai.response.stop_reason'; +export const GEN_AI_RESPONSE_ID = 'gen_ai.response.id'; + +// Usage metrics (NOTE: Using prompt_tokens and completion_tokens as per Python SDK) +export const GEN_AI_USAGE_COMPLETION_TOKENS = 'gen_ai.usage.completion_tokens'; +export const GEN_AI_USAGE_PROMPT_TOKENS = 'gen_ai.usage.prompt_tokens'; +export const GEN_AI_USAGE_TOTAL_TOKENS = 'gen_ai.usage.total_tokens'; +export const GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS = 'gen_ai.usage.cache_creation_input_tokens'; +export const GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS = 'gen_ai.usage.cache_read_input_tokens'; +export const GEN_AI_USAGE_REASONING_TOKENS = 'gen_ai.usage.reasoning_tokens'; +export const GEN_AI_USAGE_STREAMING_TOKENS = 'gen_ai.usage.streaming_tokens'; +export const GEN_AI_USAGE_TOTAL_COST = 'gen_ai.usage.total_cost'; + +// Token type +export const GEN_AI_TOKEN_TYPE = 'gen_ai.token.type'; + +// User +export const GEN_AI_USER = 'gen_ai.user'; + +// OpenAI specific +export const GEN_AI_OPENAI_SYSTEM_FINGERPRINT = 'gen_ai.openai.system_fingerprint'; +export const GEN_AI_OPENAI_INSTRUCTIONS = 'gen_ai.openai.instructions'; +export const GEN_AI_OPENAI_API_BASE = 'gen_ai.openai.api_base'; +export const GEN_AI_OPENAI_API_VERSION = 'gen_ai.openai.api_version'; +export const GEN_AI_OPENAI_API_TYPE = 'gen_ai.openai.api_type'; + +// Streaming-specific attributes +export const GEN_AI_STREAMING_TIME_TO_FIRST_TOKEN = 'gen_ai.streaming.time_to_first_token'; +export const GEN_AI_STREAMING_TIME_TO_GENERATE = 'gen_ai.streaming.time_to_generate'; +export const GEN_AI_STREAMING_DURATION = 'gen_ai.streaming_duration'; +export const GEN_AI_STREAMING_CHUNK_COUNT = 'gen_ai.streaming.chunk_count'; + +// AgentOps specific attributes +export const AGENTOPS_ENTITY_OUTPUT = 'agentops.entity.output'; +export const AGENTOPS_ENTITY_INPUT = 'agentops.entity.input'; +export const AGENTOPS_SPAN_KIND = 'agentops.span.kind'; +export const AGENTOPS_ENTITY_NAME = 'agentops.entity.name'; +export const AGENTOPS_DECORATOR_SPEC = 'agentops.{entity_kind}.spec'; +export const AGENTOPS_DECORATOR_INPUT = 'agentops.{entity_kind}.input'; +export const AGENTOPS_DECORATOR_OUTPUT = 'agentops.{entity_kind}.output'; +export const AGENTOPS_STREAMING = 'agentops.streaming'; +export const AGENTOPS_TELEMETRY_ENABLED = 'agentops.telemetry.enabled'; +export const AGENTOPS_INSTRUMENTATION_NAME = 'agentops.instrumentation.name'; +export const AGENTOPS_INSTRUMENTATION_VERSION = 'agentops.instrumentation.version'; +export const AGENTOPS_AUTO_INSTRUMENTED = 'agentops.auto_instrumented'; +export const AGENTOPS_FUNCTION_NAME = 'agentops.function_name'; +export const AGENTOPS_ORIGINAL_SPAN_NAME = 'agentops.original.span.name'; \ No newline at end of file diff --git a/src/semconv/messages.ts b/src/semconv/messages.ts index da5fc37..2642054 100644 --- a/src/semconv/messages.ts +++ b/src/semconv/messages.ts @@ -1,4 +1,9 @@ +/** + * Message-related semantic conventions. + * Based on AgentOps Python SDK message.py conventions. + */ +// Basic message attributes export const GEN_AI_MESSAGE_ROLE = 'gen_ai.message.role'; export const GEN_AI_MESSAGE_CONTENT = 'gen_ai.message.content'; export const GEN_AI_MESSAGE_NAME = 'gen_ai.message.name'; @@ -6,18 +11,38 @@ export const GEN_AI_MESSAGE_FUNCTION_CALL_NAME = 'gen_ai.message.function_call.n export const GEN_AI_MESSAGE_FUNCTION_CALL_ARGUMENTS = 'gen_ai.message.function_call.arguments'; export const GEN_AI_MESSAGE_TOOL_CALLS = 'gen_ai.message.tool_calls'; +// Indexed prompt messages (with {i} for interpolation) export const GEN_AI_PROMPT_ROLE = 'gen_ai.prompt.{i}.role'; export const GEN_AI_PROMPT_CONTENT = 'gen_ai.prompt.{i}.content'; export const GEN_AI_PROMPT_TYPE = 'gen_ai.prompt.{i}.type'; +export const GEN_AI_PROMPT_SPEAKER = 'gen_ai.prompt.{i}.speaker'; +// Indexed function calls (with {i} for interpolation) export const GEN_AI_TOOL_CALL_ID = 'gen_ai.request.tools.{i}.id'; export const GEN_AI_TOOL_CALL_TYPE = 'gen_ai.request.tools.{i}.type'; export const GEN_AI_TOOL_CALL_NAME = 'gen_ai.request.tools.{i}.name'; export const GEN_AI_TOOL_CALL_DESCRIPTION = 'gen_ai.request.tools.{i}.description'; export const GEN_AI_TOOL_CALL_ARGUMENTS = 'gen_ai.request.tools.{i}.arguments'; +// Indexed completions (with {i} for interpolation) export const GEN_AI_COMPLETION_ID = 'gen_ai.completion.{i}.id'; export const GEN_AI_COMPLETION_TYPE = 'gen_ai.completion.{i}.type'; export const GEN_AI_COMPLETION_ROLE = 'gen_ai.completion.{i}.role'; export const GEN_AI_COMPLETION_CONTENT = 'gen_ai.completion.{i}.content'; -export const GEN_AI_COMPLETION_FINISH_REASON = 'gen_ai.completion.{i}.finish_reason'; \ No newline at end of file +export const GEN_AI_COMPLETION_FINISH_REASON = 'gen_ai.completion.{i}.finish_reason'; +export const GEN_AI_COMPLETION_SPEAKER = 'gen_ai.completion.{i}.speaker'; + +// Indexed tool calls (with {i}/{j} for nested interpolation) +export const GEN_AI_COMPLETION_TOOL_CALL_ID = 'gen_ai.completion.{i}.tool_calls.{j}.id'; +export const GEN_AI_COMPLETION_TOOL_CALL_TYPE = 'gen_ai.completion.{i}.tool_calls.{j}.type'; +export const GEN_AI_COMPLETION_TOOL_CALL_STATUS = 'gen_ai.completion.{i}.tool_calls.{j}.status'; +export const GEN_AI_COMPLETION_TOOL_CALL_NAME = 'gen_ai.completion.{i}.tool_calls.{j}.name'; +export const GEN_AI_COMPLETION_TOOL_CALL_DESCRIPTION = 'gen_ai.completion.{i}.tool_calls.{j}.description'; +export const GEN_AI_COMPLETION_TOOL_CALL_ARGUMENTS = 'gen_ai.completion.{i}.tool_calls.{j}.arguments'; + +// Indexed annotations (with {i}/{j} for nested interpolation) +export const GEN_AI_COMPLETION_ANNOTATION_START_INDEX = 'gen_ai.completion.{i}.annotations.{j}.start_index'; +export const GEN_AI_COMPLETION_ANNOTATION_END_INDEX = 'gen_ai.completion.{i}.annotations.{j}.end_index'; +export const GEN_AI_COMPLETION_ANNOTATION_TITLE = 'gen_ai.completion.{i}.annotations.{j}.title'; +export const GEN_AI_COMPLETION_ANNOTATION_TYPE = 'gen_ai.completion.{i}.annotations.{j}.type'; +export const GEN_AI_COMPLETION_ANNOTATION_URL = 'gen_ai.completion.{i}.annotations.{j}.url'; \ No newline at end of file diff --git a/src/semconv/model.ts b/src/semconv/model.ts deleted file mode 100644 index 40cbbb7..0000000 --- a/src/semconv/model.ts +++ /dev/null @@ -1,16 +0,0 @@ - -export const GEN_AI_REQUEST_MODEL = 'gen_ai.request.model'; -export const GEN_AI_REQUEST_MAX_TOKENS = 'gen_ai.request.max_tokens'; -export const GEN_AI_REQUEST_TEMPERATURE = 'gen_ai.request.temperature'; -export const GEN_AI_REQUEST_TOP_P = 'gen_ai.request.top_p'; -export const GEN_AI_REQUEST_FREQUENCY_PENALTY = 'gen_ai.request.frequency_penalty'; -export const GEN_AI_REQUEST_PRESENCE_PENALTY = 'gen_ai.request.presence_penalty'; -export const GEN_AI_REQUEST_STOP_SEQUENCES = 'gen_ai.request.stop_sequences'; - -export const GEN_AI_RESPONSE_MODEL = 'gen_ai.response.model'; -export const GEN_AI_RESPONSE_FINISH_REASONS = 'gen_ai.response.finish_reasons'; - -export const GEN_AI_USAGE_INPUT_TOKENS = 'gen_ai.usage.prompt_tokens'; -export const GEN_AI_USAGE_OUTPUT_TOKENS = 'gen_ai.usage.completion_tokens'; -export const GEN_AI_USAGE_TOTAL_TOKENS = 'gen_ai.usage.total_tokens'; -// TODO cache and reasoning tokens \ No newline at end of file diff --git a/src/tracing.ts b/src/tracing.ts index ef0a0a3..a5f3f38 100644 --- a/src/tracing.ts +++ b/src/tracing.ts @@ -9,6 +9,7 @@ import { Config, LogLevel } from './types'; import { BearerToken } from './api'; import { InstrumentationBase } from './instrumentation/base'; import { logToConsole } from './log'; +import { AISDKExporter } from './instrumentation/ai-sdk/exporter'; const debug = require('debug')('agentops:tracing'); @@ -66,13 +67,17 @@ class Exporter extends OTLPTraceExporter { * @param result - The export result */ private onExportResult(spans: ReadableSpan[], result: ExportResult): void { + debug(`export result: ${result.code}, spans: ${spans.length}`); + if (result.code === ExportResultCode.SUCCESS) { spans.forEach(span => { this.trackExportedTrace(span); }); debug(`exported ${spans.length} span(s)`); + console.log(`✅ Successfully exported ${spans.length} span(s) to AgentOps`); } else { - console.error(`Export failed for ${spans.length} spans: ${result.error?.message || 'Unknown error'}`); + console.error(`❌ Export failed for ${spans.length} spans: ${result.error?.message || 'Unknown error'}`); + console.error(`Export result code: ${result.code}`); } } @@ -99,7 +104,7 @@ class Exporter extends OTLPTraceExporter { */ export class TracingCore { private sdk: OpenTelemetryNodeSDK | null = null; - private exporter: Exporter | null = null; + private exporter: SpanExporter | null = null; private processor: BatchSpanProcessor | null = null; /** @@ -116,13 +121,17 @@ export class TracingCore { private instrumentations: InstrumentationBase[], resource: Resource ) { - this.exporter = new Exporter({ + // Create the base AgentOps exporter + const baseExporter = new Exporter({ url: `${config.otlpEndpoint}/v1/traces`, headers: { authorization: authToken.getAuthHeader(), }, }); + // Wrap with AI SDK exporter to transform attributes + this.exporter = new AISDKExporter(baseExporter); + this.processor = new BatchSpanProcessor(this.exporter, { maxExportBatchSize: MAX_EXPORT_BATCH_SIZE, scheduledDelayMillis: SCHEDULED_DELAY_MILLIS, @@ -132,7 +141,7 @@ export class TracingCore { this.sdk = new OpenTelemetryNodeSDK({ resource: resource, instrumentations: instrumentations, - spanProcessor: this.processor, + spanProcessor: this.processor as any, }); // Configure logging after resource attributes are settled