Skip to content

Commit 27f0f97

Browse files
authored
ref(feedback): raise specific exception for vertex errors and don't log success (getsentry#73550)
The current code raises KeyError when a request to VertexAI fails. https://console.cloud.google.com/logs/query;cursorTimestamp=2024-06-28T13:19:08.894346671Z;duration=P1D;query=resource.type%3D%22k8s_container%22%0Alabels.name%3D~%22sentry.feedback.usecases.create%22%0Atimestamp%3D%222024-06-28T13:19:08.894346671Z%22%0AinsertId%3D%22ht87o5dzkxma0imr%22;summaryFields=:true:32:beginning?project=internal-sentry Also moved the spam_detection counter to create_feedback_issue, so we can see with one metric the T + F + failed (None) decisions.
1 parent 50354c5 commit 27f0f97

File tree

4 files changed

+13
-5
lines changed

4 files changed

+13
-5
lines changed

src/sentry/feedback/usecases/create_feedback.py

+5
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,11 @@ def create_feedback_issue(event, project_id: int, source: FeedbackCreationSource
198198
except Exception:
199199
# until we have LLM error types ironed out, just catch all exceptions
200200
logger.exception("Error checking if message is spam")
201+
metrics.incr(
202+
"feedback.create_feedback_issue.spam_detection",
203+
tags={"is_spam": is_message_spam},
204+
sample_rate=1.0,
205+
)
201206

202207
# Note that some of the fields below like title and subtitle
203208
# are not used by the feedback UI, but are required.

src/sentry/feedback/usecases/spam_detection.py

-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ def is_spam(message: str):
5252
"trimmed_response": trimmed_response,
5353
},
5454
)
55-
metrics.incr("spam-detection", tags={"is_spam": is_spam}, sample_rate=1.0)
5655
return is_spam
5756

5857

src/sentry/llm/exceptions.py

+4
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,7 @@ class InvalidModelError(ValueError):
1212

1313
class InvalidTemperature(ValueError):
1414
pass
15+
16+
17+
class VertexRequestFailed(RuntimeError):
18+
pass

src/sentry/llm/providers/vertex.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import google.auth.transport.requests
55
import requests
66

7+
from sentry.llm.exceptions import VertexRequestFailed
78
from sentry.llm.providers.base import LlmModelBase
89
from sentry.llm.types import UseCaseConfig
910

@@ -50,13 +51,12 @@ def _complete_prompt(
5051

5152
response = requests.post(vertex_url, headers=headers, json=payload)
5253

53-
if response.status_code == 200:
54-
logger.info("Request successful.")
55-
else:
56-
logger.info(
54+
if response.status_code != 200:
55+
logger.error(
5756
"Request failed with status code and response text.",
5857
extra={"status_code": response.status_code, "response_text": response.text},
5958
)
59+
raise VertexRequestFailed(f"Response {response.status_code}: {response.text}")
6060

6161
return response.json()["predictions"][0]["content"]
6262

0 commit comments

Comments
 (0)