Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 16 additions & 3 deletions docs-website/scripts/test_python_snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,12 +331,25 @@ def run_snippet(snippet: Snippet, timeout_seconds: int, cwd: str, skip_unsafe: b
stderr=completed.stderr,
)
except subprocess.TimeoutExpired as exc:
# Handle stderr which might be bytes or str
stderr_text = exc.stderr
if stderr_text is None:
stderr_text = ""
elif isinstance(stderr_text, bytes):
stderr_text = stderr_text.decode("utf-8", errors="replace")
stderr_text = stderr_text + f"\n[timeout after {timeout_seconds}s]"

# Handle stdout which might be bytes or str
stdout_text = exc.stdout
if stdout_text is not None and isinstance(stdout_text, bytes):
stdout_text = stdout_text.decode("utf-8", errors="replace")

return ExecutionResult(
snippet=snippet,
status=ExecutionStatus.FAILED,
reason=f"timeout after {timeout_seconds}s",
stdout=exc.stdout or None,
stderr=(exc.stderr or "") + f"\n[timeout after {timeout_seconds}s]",
stdout=stdout_text,
stderr=stderr_text,
)


Expand Down Expand Up @@ -414,7 +427,7 @@ def main(argv: Optional[list[str]] = None) -> int:
"(defaults to docs and versioned_docs)"
),
)
parser.add_argument("--timeout-seconds", type=int, default=30, help="Timeout per snippet execution (seconds)")
parser.add_argument("--timeout-seconds", type=int, default=600, help="Timeout per snippet execution (seconds)")
parser.add_argument(
"--allow-unsafe", action="store_true", help="Allow execution of snippets with potentially unsafe patterns"
)
Expand Down
2 changes: 1 addition & 1 deletion haystack/components/audio/whisper_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class LocalWhisperTranscriber:

whisper = LocalWhisperTranscriber(model="small")
whisper.warm_up()
transcription = whisper.run(sources=["path/to/audio/file"])
transcription = whisper.run(sources=["test/test_files/audio/answer.wav"])
```
"""

Expand Down
4 changes: 2 additions & 2 deletions haystack/components/audio/whisper_remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ class RemoteWhisperTranscriber:
```python
from haystack.components.audio import RemoteWhisperTranscriber

whisper = RemoteWhisperTranscriber(api_key=Secret.from_token("<your-api-key>"), model="tiny")
transcription = whisper.run(sources=["path/to/audio/file"])
whisper = RemoteWhisperTranscriber(model="whisper-1")
transcription = whisper.run(sources=["test/test_files/audio/answer.wav"])
```
"""

Expand Down
31 changes: 15 additions & 16 deletions haystack/components/builders/chat_prompt_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,10 @@ class ChatPromptBuilder:
from haystack.components.generators.chat import OpenAIChatGenerator
from haystack.dataclasses import ChatMessage
from haystack import Pipeline
from haystack.utils import Secret

# no parameter init, we don't use any runtime template variables
prompt_builder = ChatPromptBuilder()
llm = OpenAIChatGenerator(api_key=Secret.from_token("<your-api-key>"))
llm = OpenAIChatGenerator(model="gpt-5-mini")

pipe = Pipeline()
pipe.add_component("prompt_builder", prompt_builder)
Expand All @@ -90,13 +89,12 @@ class ChatPromptBuilder:
res = pipe.run(data={"prompt_builder": {"template_variables": {"location": location, "language": language},
"template": messages}})
print(res)

>> {'llm': {'replies': [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=[TextContent(text=
"Berlin is the capital city of Germany and one of the most vibrant
and diverse cities in Europe. Here are some key things to know...Enjoy your time exploring the vibrant and dynamic
capital of Germany!")], _name=None, _meta={'model': 'gpt-5-mini',
'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 27, 'completion_tokens': 681, 'total_tokens':
708}})]}}
# >> {'llm': {'replies': [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=[TextContent(text=
# "Berlin is the capital city of Germany and one of the most vibrant
# and diverse cities in Europe. Here are some key things to know...Enjoy your time exploring the vibrant and dynamic
# capital of Germany!")], _name=None, _meta={'model': 'gpt-5-mini',
# 'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 27, 'completion_tokens': 681, 'total_tokens':
# 708}})]}}

messages = [system_message, ChatMessage.from_user("What's the weather forecast for {{location}} in the next
{{day_count}} days?")]
Expand All @@ -105,12 +103,12 @@ class ChatPromptBuilder:
"template": messages}})

print(res)
>> {'llm': {'replies': [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=[TextContent(text=
"Here is the weather forecast for Berlin in the next 5
days:\\n\\nDay 1: Mostly cloudy with a high of 22°C (72°F) and...so it's always a good idea to check for updates
closer to your visit.")], _name=None, _meta={'model': 'gpt-5-mini',
'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 37, 'completion_tokens': 201,
'total_tokens': 238}})]}}
# >> {'llm': {'replies': [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=[TextContent(text=
# "Here is the weather forecast for Berlin in the next 5
# days:\\n\\nDay 1: Mostly cloudy with a high of 22°C (72°F) and...so it's always a good idea to check for updates
# closer to your visit.")], _name=None, _meta={'model': 'gpt-5-mini',
# 'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 37, 'completion_tokens': 201,
# 'total_tokens': 238}})]}}
```

#### String prompt template
Expand All @@ -131,7 +129,8 @@ class ChatPromptBuilder:
{% endmessage %}
\"\"\"

images = [ImageContent.from_file_path("apple.jpg"), ImageContent.from_file_path("orange.jpg")]
images = [ImageContent.from_file_path("test/test_files/images/apple.jpg"),
ImageContent.from_file_path("test/test_files/images/haystack-logo.png")]

builder = ChatPromptBuilder(template=template)
builder.run(user_name="John", images=images)
Expand Down
Loading