diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 881dda180..97ccd5067 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,6 +66,7 @@ jobs: python3 -m venv ./.venv source .venv/bin/activate make full + pip install nemoguardrails nest-asyncio - name: Static analysis with pyright run: | diff --git a/docs/examples/guard-as-action.ipynb b/docs/examples/guard-as-action.ipynb new file mode 100644 index 000000000..3861cfe2e --- /dev/null +++ b/docs/examples/guard-as-action.ipynb @@ -0,0 +1,474 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "bda9eda8b4566a0d", + "metadata": { + "collapsed": false + }, + "source": [ + "# Guard as Actions\n", + "\n", + "This guide will teach you how to use a `Guard` with any of the 60+ GuardrailsAI Validators as an action inside a guardrails configuration. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a5ddc8b17af62afa", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:27:11.284164Z", + "start_time": "2024-01-25T14:27:11.025161Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "# Init: remove any existing configuration\n", + "! rm -r config\n", + "! mkdir config" + ] + }, + { + "cell_type": "markdown", + "id": "724db36201c3d409", + "metadata": { + "collapsed": false + }, + "source": [ + "## Prerequisites\n", + "\n", + "We'll be using an OpenAI model for our LLM in this guide, so set up an OpenAI API key, if not already set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e52b23b90077cf4", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:27:11.418023Z", + "start_time": "2024-01-25T14:27:11.286549Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "! export OPENAI_API_KEY=$OPENAI_API_KEY # Replace with your own key" + ] + }, + { + "cell_type": "markdown", + "id": "4b6fb59034bcb2bb", + "metadata": { + "collapsed": false + }, + "source": [ + "If you're running this inside a notebook, you also need to patch the AsyncIO loop." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "7ba19d5c8bdc57a3", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:27:13.693091Z", + "start_time": "2024-01-25T14:27:13.686555Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "markdown", + "id": "b8b27d3fa09bbe91", + "metadata": { + "collapsed": false + }, + "source": [ + "## Sample Guard\n", + "\n", + "Let's create a sample Guard that can detect PII. First, install guardrails-ai." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5925945d", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install guardrails-ai -q" + ] + }, + { + "cell_type": "markdown", + "id": "2c8fc267", + "metadata": {}, + "source": [ + "Next configure the guardrails cli so we can install the validator we want to use from the Guardrails Hub." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d9cf415", + "metadata": {}, + "outputs": [], + "source": [ + "! guardrails configure" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9a208f1c", + "metadata": {}, + "outputs": [], + "source": [ + "! guardrails hub install hub://guardrails/detect_pii --no-install-local-models -q" + ] + }, + { + "cell_type": "markdown", + "id": "61f4fff5", + "metadata": {}, + "source": [ + "Now we can define our Guard.\n", + "This Guard will use the DetectPII validator to safeguard against leaking personally identifiable information such as names, email addresses, etc..\n", + "\n", + "Once the Guard is defined, we can test it with a static value to make sure it's working how we would expect." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "71aeb10e5fda9040", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:27:13.813566Z", + "start_time": "2024-01-25T14:27:13.693010Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ValidationOutcome(\n", + " call_id='14534730096',\n", + " raw_llm_output='My name is John Doe',\n", + " validation_summaries=[\n", + " ValidationSummary(\n", + " validator_name='DetectPII',\n", + " validator_status='fail',\n", + " property_path='$',\n", + " failure_reason='The following text in your response contains PII:\\nMy name is John Doe',\n", + " error_spans=[\n", + " ErrorSpan(start=11, end=19, reason='PII detected in John Doe')\n", + " ]\n", + " )\n", + " ],\n", + " validated_output='My name is ',\n", + " reask=None,\n", + " validation_passed=True,\n", + " error=None\n", + ")\n" + ] + } + ], + "source": [ + "from guardrails import Guard\n", + "from guardrails.hub import DetectPII\n", + "\n", + "g = Guard(name=\"pii_guard\").use(DetectPII([\"PERSON\", \"EMAIL_ADDRESS\"], on_fail=\"fix\"))\n", + "\n", + "print(g.validate(\"My name is John Doe\"))" + ] + }, + { + "cell_type": "markdown", + "id": "1a0725d977f5589b", + "metadata": { + "collapsed": false + }, + "source": [ + "## Guardrails Configuration \n", + "\n", + "Now we'll use the Guard we defeined above to create an action and a flow. Since we're calling our guard \"pii_guard\", we'll use \"pii_guard_validate\" in order to see if the LLM output is safe." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "a27c15cf3919fa5", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:27:13.820255Z", + "start_time": "2024-01-25T14:27:13.814191Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing config/rails.co\n" + ] + } + ], + "source": [ + "%%writefile config/rails.co\n", + "\n", + "\n", + "define flow detect_pii\n", + " $output = execute pii_guard_validate(text=$bot_message)\n", + "\n", + " if not $output\n", + " bot refuse to respond\n", + " stop\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "53403afb1e1a4b9c", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:27:13.821992Z", + "start_time": "2024-01-25T14:27:13.817004Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing config/config.yml\n" + ] + } + ], + "source": [ + "%%writefile config/config.yml\n", + "models:\n", + " - type: main\n", + " engine: openai\n", + " model: gpt-3.5-turbo-instruct\n", + "\n", + "rails:\n", + " output:\n", + " flows:\n", + " - detect_pii" + ] + }, + { + "cell_type": "markdown", + "id": "d25b3725", + "metadata": {}, + "source": [ + "To hook the Guardrails AI guard up so that it can be read from Colang, we use the integration's `register_guardrails_guard_actions` function.\n", + "This takes a name and registers two actions:\n", + "\n", + "1. [guard_name]_validate: This action is used to detect validation failures in outputs\n", + "2. [guard name]_fix: This action is used to automatically fix validation failures in outputs, when possible" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f2adca21d94e54b9", + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Fetching 5 files: 100%|██████████| 5/5 [00:00<00:00, 109226.67it/s]\n" + ] + } + ], + "source": [ + "from nemoguardrails import RailsConfig, LLMRails\n", + "from nemoguardrails.integrations.guardrails_ai.guard_actions import register_guardrails_guard_actions\n", + "\n", + "config = RailsConfig.from_path(\"./config\")\n", + "rails = LLMRails(config)\n", + "\n", + "register_guardrails_guard_actions(rails, g, \"pii_guard\")" + ] + }, + { + "cell_type": "markdown", + "id": "ade12682dd9d8f0e", + "metadata": { + "collapsed": false + }, + "source": [ + "## Testing\n", + "\n", + "Let's try this out. If we invoke the guardrails configuration with a message that prompts the LLM to return personal information like names, email addresses, etc., it should refuse to respond." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "394311174e678d96", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:27:18.524958Z", + "start_time": "2024-01-25T14:27:18.518176Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I'm sorry, I can't respond to that.\n" + ] + } + ], + "source": [ + "response = rails.generate(\"Who is the current president of the United States, and what was their email address?\")\n", + "print(response)" + ] + }, + { + "cell_type": "markdown", + "id": "0d545fa7", + "metadata": {}, + "source": [ + "Great! So the valdiation-only flow works. Next let's try the fix flow." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "62bac8d3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting config/rails.co\n" + ] + } + ], + "source": [ + "%%writefile config/rails.co\n", + "\n", + "\n", + "define flow detect_pii\n", + " $output = execute pii_guard_fix(text=$bot_message)\n", + "\n", + " if not $output\n", + " bot refuse to respond\n", + " stop\n", + " else\n", + " $bot_message = $output\n" + ] + }, + { + "cell_type": "markdown", + "id": "2fa6d051", + "metadata": {}, + "source": [ + "If we send the same message, we should get a response this time, but any PII will be filtered out." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "ff14d3c0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The current president of the United States is . His official email address is . However, he also has a personal email address, which is .\n" + ] + } + ], + "source": [ + "config = RailsConfig.from_path(\"./config\")\n", + "rails = LLMRails(config)\n", + "\n", + "register_guardrails_guard_actions(rails, g, \"pii_guard\")\n", + "\n", + "response = rails.generate(\"Who is the current president of the United States, and what was their email address?\")\n", + "print(response)" + ] + }, + { + "cell_type": "markdown", + "id": "f6b457ce6e2957fd", + "metadata": { + "collapsed": false + }, + "source": [ + "If however, we prompt the LLM with a message that does not cause it to return PII, we should get the unaltered response." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "70409a3aafe89e95", + "metadata": { + "ExecuteTime": { + "end_time": "2024-01-25T14:29:15.370273Z", + "start_time": "2024-01-25T14:29:14.322661Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hello there! How can I assist you?\n" + ] + } + ], + "source": [ + "response = rails.generate(\"Hello!\")\n", + "print(response)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/examples/rails-as-guard.ipynb b/docs/examples/rails-as-guard.ipynb new file mode 100644 index 000000000..16aaf3cae --- /dev/null +++ b/docs/examples/rails-as-guard.ipynb @@ -0,0 +1,271 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Guardrails as Guards\n", + "This guide will teach you how to add NeMo Guardrails to a GuardrailsAI Guard." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Init: remove any existing configuration\n", + "! rm -r config\n", + "! mkdir config" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prerequisites\n", + "\n", + "We'll be using an OpenAI model for our LLM in this guide, so set up an OpenAI API key, if not already set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "! export OPENAI_API_KEY=$OPENAI_API_KEY # Replace with your own key" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you're running this inside a notebook, you also need to patch the AsyncIO loop." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Sample Guardrails\n", + "We'll start by creating a new guardrails configuration." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing config/config.yml\n" + ] + } + ], + "source": [ + "%%writefile config/config.yml\n", + "models:\n", + " - type: main\n", + " engine: openai\n", + " model: gpt-3.5-turbo-instruct" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll do a quick test to make sure everything is working as expected." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "db0d1ffa109e4961b4d6e19007d676a1", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Fetching 5 files: 0%| | 0/5 [00:00. His email address is . He can also be reached through his personal email at . Additionally, he is active on social media and can be contacted through his official Twitter account . Is there anything else you would like to know about President ?\n" + ] + } + ], + "source": [ + "response = guard(\n", + " messages=[{\n", + " \"role\": \"user\",\n", + " \"content\": \"Who is the current president of the United States, and what was their email address?\"\n", + " }]\n", + ")\n", + "\n", + "print(response.validated_output)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Great! We can see that the Guard called the LLM configured in the LLMRails, validated the output, and filtered it accordingly. If however, we prompt the LLM with a message that does not cause it to return PII, we should get the unaltered response." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hi there! It's nice to meet you. My name is AI Assistant. How can I help you today?\n" + ] + } + ], + "source": [ + "response = guard(\n", + " messages=[{\n", + " \"role\": \"user\",\n", + " \"content\": \"Hello!\"\n", + " }]\n", + ")\n", + "\n", + "print(response.validated_output)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/integrations/nemo_guardrails/config.yml b/docs/integrations/nemo_guardrails/config.yml new file mode 100644 index 000000000..2002a9690 --- /dev/null +++ b/docs/integrations/nemo_guardrails/config.yml @@ -0,0 +1,4 @@ +models: + - type: main + engine: openai + model: gpt-3.5-turbo-instruct \ No newline at end of file diff --git a/docs/integrations/nemo_guardrails/guard_as_action.md b/docs/integrations/nemo_guardrails/guard_as_action.md new file mode 100644 index 000000000..7e0b334a2 --- /dev/null +++ b/docs/integrations/nemo_guardrails/guard_as_action.md @@ -0,0 +1,171 @@ +# Guard as Actions + +This guide will teach you how to use a `Guard` with any of the 60+ GuardrailsAI Validators as an action inside a NeMo Guardrails configuration. + +## Prerequisites + +We'll be using an OpenAI model for our LLM in this guide, so set up an OpenAI API key, if not already set. + +```bash +export OPENAI_API_KEY=$OPENAI_API_KEY # Replace with your own key +``` + +If you're running this inside a notebook, you also need to patch the AsyncIO loop. + +```python +import nest_asyncio + +nest_asyncio.apply() +``` + +## Sample Guard + +Let's create a sample Guard that can detect PII. First, install guardrails-ai. + +```bash +pip install guardrails-ai -q +``` + +Next configure the guardrails cli so we can install the validator we want to use from the Guardrails Hub. + +```bash +guardrails configure +``` + +```bash +guardrails hub install hub://guardrails/detect_pii --no-install-local-models -q +``` + +Now we can define our Guard. +This Guard will use the DetectPII validator to safeguard against leaking personally identifiable information such as names, email addresses, etc.. + +Once the Guard is defined, we can test it with a static value to make sure it's working how we would expect. + +```python +from guardrails import Guard +from guardrails.hub import DetectPII + +g = Guard(name="pii_guard").use(DetectPII(["PERSON", "EMAIL_ADDRESS"], on_fail="fix")) + +print(g.validate("My name is John Doe")) +``` + +``` +ValidationOutcome( + call_id='14534730096', + raw_llm_output='My name is John Doe', + validation_summaries=[ + ValidationSummary( + validator_name='DetectPII', + validator_status='fail', + property_path='$', + failure_reason='The following text in your response contains PII:\nMy name is John Doe', + error_spans=[ + ErrorSpan(start=11, end=19, reason='PII detected in John Doe') + ] + ) + ], + validated_output='My name is ', + reask=None, + validation_passed=True, + error=None +) +``` + +## NeMo Guardrails Configuration + +Now we'll use the Guard we defeined above to create an action and a flow. Since we're calling our guard "pii_guard", we'll use "pii_guard_validate" in order to see if the LLM output is safe. + +```colang +define flow detect_pii + $output = execute pii_guard_validate(text=$bot_message) + + if not $output + bot refuse to respond + stop +``` + +```yaml +models: + - type: main + engine: openai + model: gpt-3.5-turbo-instruct + +rails: + output: + flows: + - detect_pii +``` + +To hook the Guardrails AI guard up so that it can be read from Colang, we use the integration's `register_guardrails_guard_actions` function. +This takes a name and registers two actions: + +1. [guard_name]_validate: This action is used to detect validation failures in outputs +2. [guard name]_fix: This action is used to automatically fix validation failures in outputs, when possible + +```python +from nemoguardrails import RailsConfig, LLMRails +from nemoguardrails.integrations.guardrails_ai.guard_actions import register_guardrails_guard_actions + +config = RailsConfig.from_path("./config") +rails = LLMRails(config) + +register_guardrails_guard_actions(rails, g, "pii_guard") +``` + +``` +Fetching 5 files: 100%|██████████| 5/5 [00:00<00:00, 109226.67it/s] +``` + +## Testing + +Let's try this out. If we invoke the NeMo Guardrails configuration with a message that prompts the LLM to return personal information like names, email addresses, etc., it should refuse to respond. + +```python +response = rails.generate("Who is the current president of the United States, and what was their email address?") +print(response) +``` + +``` +I'm sorry, I can't respond to that. +``` + +Great! So the valdiation-only flow works. Next let's try the fix flow. + +```colang +define flow detect_pii + $output = execute pii_guard_fix(text=$bot_message) + + if not $output + bot refuse to respond + stop + else + $bot_message = $output +``` + +If we send the same message, we should get a response this time, but any PII will be filtered out. + +```python +config = RailsConfig.from_path("./config") +rails = LLMRails(config) + +register_guardrails_guard_actions(rails, g, "pii_guard") + +response = rails.generate("Who is the current president of the United States, and what was their email address?") +print(response) +``` + +``` +The current president of the United States is . His official email address is . However, he also has a personal email address, which is . +``` + +If however, we prompt the LLM with a message that does not cause it to return PII, we should get the unaltered response. + +```python +response = rails.generate("Hello!") +print(response) +``` + +``` +Hello there! How can I assist you? +``` diff --git a/docs/integrations/nemo_guardrails/guardrails_rails.md b/docs/integrations/nemo_guardrails/guardrails_rails.md new file mode 100644 index 000000000..24715facc --- /dev/null +++ b/docs/integrations/nemo_guardrails/guardrails_rails.md @@ -0,0 +1,79 @@ +This guide will teach you how to add guardrails configurations built with NeMo Guardrails to your Guardrails AI application. + +# Overview + +The Guardrails AI library provides a Rails integration that allows you to use a Rails application as an LLM callable. This will result in a Rails application that generates completions that are validated using a GuardrailsAI guard configuration. + +We start by defining a Guardrails AI Guard and a Rails configuration. We'll also install the [ToxicLanguage validator](https://hub.guardrailsai.com/validator/guardrails/toxic_language) from the [Guardrails AI Hub](https://hub.guardrailsai.com/). + +```python +from nemoguardrails import LLMRails, RailsConfig +from guardrails import Guard, install + +install("hub://guardrails/toxic_language") +from guardrails.hub import ToxicLanguage + +# Load a guardrails configuration from the specified path. +config = RailsConfig.from_path("PATH/TO/CONFIG") +rails = LLMRails(config) +``` + +Then, we have the guard validate the completions generated by the Rails application. + +```python +from guardrails.integrations.nemoguardrails.nemoguardrails_guard import ( + NemoguardrailsGuard +) +railsguard = NemoguardrailsGuards(rails).use(ToxicLanguage) + +result = railsguard( + messages: [{ + "role":"user", + "content":"Hello! What can you do for me?" + }] +) +``` + +The `NemoguardrailsGuard` class is a wrapper around the Guard class. Just like a Guard, it can [called](https://www.guardrailsai.com/docs/api_reference_markdown/guards#__call__) with similar parameters to the OpenAI completions API. It also returns a `ValidationOutcome` object (or iterable, in streaming cases). That object can be destructured to get the raw output, the validated output, and other metadata. + +Here, `raw_llm_output` is the output returned by the NeMo Guardrails Rails. + +``` +result.raw_llm_output +result.validated_output +result.validation_passed +``` + +## Expected NeMo Guardrails Rails output + +The NeMo Guardrails Rails may return any serializable type expressable in python using native types or Pydantic. The output must conform to the datatypes expected by the specified Guard. If the output is structured, make sure to initialize the Guardrails AI Guard using pydantic, [following this guide](https://www.guardrailsai.com/docs/how_to_guides/generate_structured_data). + +# Integration with the NeMo Guardrails server + +To wrap a call to the NeMo Guardrails server, we can leverage the OpenAI-style API endpoint available. We can talk to this endpoint directly through the Guard, setting the correct endpoint and config_id. + + +First, start the NeMo Guardrails server: + +```bash +nemoguardrails server [--config PATH/TO/CONFIGS] [--port PORT] +``` + +Then, talk to it using the Guard: + +```python +from guardrails import Guard +guard = Guard.use( + ToxicLanguage() +) + +# invoke the guard using the endpoint and config_id +guard( + endpoint="http://localhost:8000/v1/chat/completions", + config_id="CONFIG_ID", + messages: [{ + "role":"user", + "content":"Hello! What can you do for me?" + }] +) +``` diff --git a/docs/integrations/nemo_guardrails/index.md b/docs/integrations/nemo_guardrails/index.md new file mode 100644 index 000000000..c8da61d8d --- /dev/null +++ b/docs/integrations/nemo_guardrails/index.md @@ -0,0 +1,68 @@ +# Guardrails AI & NeMo Guardrails + +Integrating Guardrails AI with NeMo Guardrails combines the strengths of both frameworks: + +Guardrails AI's extensive hub of validators can enhance NeMo Guardrails' input and output checking capabilities. +NeMo Guardrails' flexible configuration system can provide a powerful context for applying Guardrails AI validators. +Users of both frameworks can benefit from a seamless integration, reducing development time and improving overall safety measures. +This integration allows developers to leverage the best features of both frameworks, creating more robust and secure LLM applications. + +## Registering a Guard as an Action + +```bash +guardrails hub install hub://guardrails/toxic_language +``` + +```python +from guardrails import Guard +from guardrails.hub import ToxicLanguage +from nemoguardrails import RailsConfig, LLMRails +from nemoguardrails.integrations.guardrails_ai.guard_actions import register_guardrails_guard_actions + +guard = Guard().use( + ToxicLanguage() +) + +config = RailsConfig.from_path("path/to/config") +rails = LLMRails(config) + +register_guardrails_guard_actions(rails, guard, "custom_guard_action") +``` + +Now, the `custom_guard_action` can be used as an action within the Rails specification. This action can be used on input or output, and may be used in any number of flows. + +```yaml +define flow + ... + $result = execute custom_guard_action + ... +``` + +## Using LLMRails in a Guard + +```bash +guardrails hub install hub://guardrails/toxic_language +``` + +```yaml +# config.yml +models: + - type: main + engine: openai + model: gpt-3.5-turbo-instruct +``` + +```python +from guardrails import Guard +from guardrails.hub import ToxicLanguage +from nemoguardrails import RailsConfig, LLMRails +from guardrails.integrations.nemoguardrails import NemoguardrailsGuard + +config = RailsConfig.from_path("path/to/config") +rails = LLMRails(config) + +guard = NemoguardrailsGuard(rails) +guard.use( + ToxicLanguage() +) +``` diff --git a/docs/integrations/nemo_guardrails/rails_as_guard.md b/docs/integrations/nemo_guardrails/rails_as_guard.md new file mode 100644 index 000000000..7da83e194 --- /dev/null +++ b/docs/integrations/nemo_guardrails/rails_as_guard.md @@ -0,0 +1,110 @@ +# NeMo Guardrails as Guards +This guide will teach you how to add NeMo Guardrails to a GuardrailsAI Guard. + +## Prerequisites + +We'll be using an OpenAI model for our LLM in this guide, so set up an OpenAI API key, if not already set. + +```bash +export OPENAI_API_KEY=$OPENAI_API_KEY # Replace with your own key +``` + +If you're running this inside a notebook, you also need to patch the AsyncIO loop. + +```python +import nest_asyncio + +nest_asyncio.apply() +``` + +## Sample NeMo Guardrails +We'll start by creating a new NeMo Guardrails configuration. + +```yaml +models: + - type: main + engine: openai + model: gpt-3.5-turbo-instruct +``` + +We'll do a quick test to make sure everything is working as expected. + +```python +from nemoguardrails import RailsConfig, LLMRails + +config = RailsConfig.from_path("./config") +rails = LLMRails(config) + +response = rails.generate("Hello!") + +print(response) +``` + +``` + Fetching 5 files: 0%| | 0/5 [00:00. His email address is . He can also be reached through his personal email at . Additionally, he is active on social media and can be contacted through his official Twitter account . Is there anything else you would like to know about President ? +``` + +Great! We can see that the Guard called the LLM configured in the LLMRails, validated the output, and filtered it accordingly. If however, we prompt the LLM with a message that does not cause it to return PII, we should get the unaltered response. + +```python +response = guard( + messages=[{ + "role": "user", + "content": "Hello!" + }] +) + +print(response.validated_output) +``` + +``` +Hi there! It's nice to meet you. My name is AI Assistant. How can I help you today? +``` diff --git a/docs/integrations/nemo_guardrails/rails_guardrails.md b/docs/integrations/nemo_guardrails/rails_guardrails.md new file mode 100644 index 000000000..4acfcdace --- /dev/null +++ b/docs/integrations/nemo_guardrails/rails_guardrails.md @@ -0,0 +1,52 @@ +::: +note: This will exist in the NeMo Guardrails docs +::: + + +# Introduction + +Integrating Guardrails AI with NeMo Guardrails combines the strengths of both frameworks: + +Guardrails AI's extensive hub of validators can enhance NeMo Guardrails' input and output checking capabilities. +NeMo Guardrails' flexible configuration system can provide a powerful context for applying Guardrails AI validators. +Users of both frameworks can benefit from a seamless integration, reducing development time and improving overall safety measures. +This integration allows developers to leverage the best features of both frameworks, creating more robust and secure LLM applications. + +# Overview +This document provides a guide to using a Guardrails AI Guard as an action within a NeMo Guardrails Rails application. This can be done either by defining an entire Guard and registering it, or by registering a validator directly. + +## Registering a Guard as an action + +First, we install our validators and define our Guard + +```python +from guardrails import Guard, install +install("hub://guardrails/toxic_language") +from guardrails.hub import ToxicLanguage + +guard = Guard().use( + ToxicLanguage() +) +``` + +Next, we register our `guard` using the nemoguardrails registration API + +```python +from nemoguardrails import RailsConfig, LLMRails + +config = RailsConfig.from_path("path/to/config") +rails = LLMRails(config) + +rails.register_action(guard, "custom_guard_action") +``` + +Now, the `custom_guard_action` can be used as an action within the Rails specification. This action can be used on input or output, and may be used in any number of flows. + +```yaml +define flow + ... + $result = execute custom_guard_action + ... +``` + + diff --git a/docusaurus/sidebars.js b/docusaurus/sidebars.js index a737c415e..c93997ba9 100644 --- a/docusaurus/sidebars.js +++ b/docusaurus/sidebars.js @@ -104,6 +104,16 @@ const sidebars = { integrations: [ // "integrations/azure_openai", "integrations/langchain", + { + type: "category", + label: "NeMo Guardrails", + collapsed: false, + items: [ + "integrations/nemo_guardrails/index", + "integrations/nemo_guardrails/guard_as_action", + "integrations/nemo_guardrails/rails_as_guard", + ], + }, { type: "category", label: "Telemetry", diff --git a/guardrails/async_guard.py b/guardrails/async_guard.py index c7ad495ea..1c98f9aaf 100644 --- a/guardrails/async_guard.py +++ b/guardrails/async_guard.py @@ -1,6 +1,7 @@ from builtins import id as object_id import contextvars import inspect +from guardrails.formatters.base_formatter import BaseFormatter from opentelemetry import context as otel_context from typing import ( Any, @@ -99,6 +100,8 @@ def for_pydantic( tracer: Optional[Tracer] = None, name: Optional[str] = None, description: Optional[str] = None, + output_formatter: Optional[Union[str, BaseFormatter]] = None, + **kwargs, ): guard = super().for_pydantic( output_class, @@ -108,6 +111,8 @@ def for_pydantic( tracer=tracer, name=name, description=description, + output_formatter=output_formatter, + **kwargs, ) if guard._output_type == OutputTypes.LIST: return cast(AsyncGuard[List], guard) diff --git a/guardrails/guard.py b/guardrails/guard.py index 3976773f5..fc81ee3c8 100644 --- a/guardrails/guard.py +++ b/guardrails/guard.py @@ -380,7 +380,7 @@ def _for_rail_schema( name: Optional[str] = None, description: Optional[str] = None, ): - guard = cls( + guard = cls._init_guard_for_cls_method( name=name, description=description, output_schema=schema.json_schema, @@ -526,6 +526,25 @@ def for_rail_string( def from_pydantic(cls, output_class: ModelOrListOfModels, *args, **kwargs): return cls.for_pydantic(output_class, **kwargs) + @classmethod + def _init_guard_for_cls_method( + cls, + *, + id: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + validators: Optional[List[ValidatorReference]] = None, + output_schema: Optional[Dict[str, Any]] = None, + **kwargs, + ): + return cls( + id=id, + name=name, + description=description, + output_schema=output_schema, + validators=validators, + ) + @classmethod def for_pydantic( cls, @@ -538,6 +557,7 @@ def for_pydantic( name: Optional[str] = None, description: Optional[str] = None, output_formatter: Optional[Union[str, BaseFormatter]] = None, + **kwargs, ): """Create a Guard instance using a Pydantic model to specify the output schema. @@ -574,11 +594,12 @@ def for_pydantic( reask_messages=reask_messages, messages=messages, ) - guard = cls( + guard = cls._init_guard_for_cls_method( name=name, description=description, output_schema=schema.json_schema, validators=schema.validators, + **kwargs, ) if schema.output_type == OutputTypes.LIST: guard = cast(Guard[List], guard) @@ -1306,7 +1327,7 @@ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional["Guard"]: i_guard.output_schema.to_dict() if i_guard.output_schema else None ) - guard = cls( + guard = cls._init_guard_for_cls_method( id=i_guard.id, name=i_guard.name, description=i_guard.description, diff --git a/guardrails/integrations/nemoguardrails/__init__.py b/guardrails/integrations/nemoguardrails/__init__.py new file mode 100644 index 000000000..f2ebf7223 --- /dev/null +++ b/guardrails/integrations/nemoguardrails/__init__.py @@ -0,0 +1,6 @@ +from guardrails.integrations.nemoguardrails.nemoguardrails_guard import ( + NemoguardrailsGuard, + AsyncNemoguardrailsGuard, +) + +__all__ = ["NemoguardrailsGuard", "AsyncNemoguardrailsGuard"] diff --git a/guardrails/integrations/nemoguardrails/nemoguardrails_guard.py b/guardrails/integrations/nemoguardrails/nemoguardrails_guard.py new file mode 100644 index 000000000..f5f6c74cb --- /dev/null +++ b/guardrails/integrations/nemoguardrails/nemoguardrails_guard.py @@ -0,0 +1,228 @@ +import inspect +from functools import partial +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Dict, + Generic, + Iterable, + List, + Optional, + Union, + cast, +) +from typing_extensions import deprecated + +from guardrails.classes.output_type import OT, OutputTypes +from guardrails.classes.validation_outcome import ValidationOutcome +from guardrails.classes.validation.validator_reference import ValidatorReference + +from guardrails import Guard, AsyncGuard + +from guardrails.formatters.base_formatter import BaseFormatter +from guardrails.types.pydantic import ModelOrListOfModels + +from guardrails.stores.context import Tracer + +try: + from nemoguardrails import LLMRails +except ImportError: + raise ImportError( + "Could not import nemoguardrails, please install it with " + "`pip install nemoguardrails`." + ) + +try: + import nest_asyncio + + nest_asyncio.apply() + import asyncio +except ImportError: + raise ImportError( + "Could not import nest_asyncio, please install it with " + "`pip install nest_asyncio`." + ) + + +class NemoguardrailsGuard(Guard, Generic[OT]): + def __init__( + self, + nemorails: LLMRails, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self._nemorails = nemorails + self._generate = self._nemorails.generate + + def _custom_nemo_callable(self, *args, generate_kwargs, **kwargs): + # .generate doesn't like temp + kwargs.pop("temperature", None) + + messages = kwargs.pop("messages", None) + + if messages == [] or messages is None: + raise ValueError("messages must be passed during a call.") + + if not generate_kwargs: + generate_kwargs = {} + + response = self._generate(messages=messages, **generate_kwargs) + + if inspect.iscoroutine(response): + response = asyncio.run(response) + + return response[ # type: ignore + "content" + ] + + def __call__( + self, + llm_api: Optional[Callable] = None, + generate_kwargs: Optional[Dict] = None, + *args, + **kwargs, + ) -> Union[ValidationOutcome[OT], Iterable[ValidationOutcome[OT]]]: + # peel llm_api off of kwargs + llm_api = kwargs.pop("llm_api", None) + + # if llm_api is defined, throw an error + if llm_api is not None: + raise ValueError( + """llm_api should not be passed to a NemoguardrailsGuard object. + The Nemoguardrails LLMRails object passed in will be used as the LLM.""" + ) + + # peel off messages from kwargs + messages = kwargs.get("messages", None) + + # if messages is not defined, throw an error + if messages is None: + raise ValueError( + """messages should be passed to a NemoguardrailsGuard object. + The messages to be passed to the LLM should be passed in as a list of + dictionaries, where each dictionary has a 'role' key and a 'content' key.""" + ) + + llm_api = partial(self._custom_nemo_callable, generate_kwargs=generate_kwargs) + + return super().__call__(llm_api=llm_api, *args, **kwargs) + + @classmethod + def _init_guard_for_cls_method( + cls, + *, + name: Optional[str] = None, + description: Optional[str] = None, + validators: Optional[List[ValidatorReference]] = None, + output_schema: Optional[Dict[str, Any]] = None, + nemorails: LLMRails, + **kwargs, + ): + return cls( + nemorails, + name=name, + description=description, + output_schema=output_schema, + validators=validators, + ) + + @classmethod + def for_pydantic( + cls, + output_class: ModelOrListOfModels, + *, + nemorails: LLMRails, + num_reasks: Optional[int] = None, + reask_messages: Optional[List[Dict]] = None, + messages: Optional[List[Dict]] = None, + tracer: Optional[Tracer] = None, + name: Optional[str] = None, + description: Optional[str] = None, + output_formatter: Optional[Union[str, BaseFormatter]] = None, + **kwargs, + ): + guard = super().for_pydantic( + output_class, + num_reasks=num_reasks, + messages=messages, + reask_messages=reask_messages, + tracer=tracer, + name=name, + description=description, + output_formatter=output_formatter, + nemorails=nemorails, + ) + if guard._output_type == OutputTypes.LIST: + return cast(NemoguardrailsGuard[List], guard) + else: + return cast(NemoguardrailsGuard[Dict], guard) + + @deprecated( + "Use `for_rail_string` instead. This method will be removed in 0.6.x.", + category=None, + ) + @classmethod + def from_rail_string(cls, *args, **kwargs): + raise NotImplementedError("""\ +`from_rail_string` is not implemented for NemoguardrailsGuard. +We recommend using the main constructor `NemoGuardrailsGuard(nemorails=nemorails)` +or the `from_pydantic` method.""") + + @classmethod + def for_rail_string(cls, *args, **kwargs): + raise NotImplementedError("""\ +`for_rail_string` is not implemented for NemoguardrailsGuard. +We recommend using the main constructor `NemoGuardrailsGuard(nemorails=nemorails)` +or the `from_pydantic` method.""") + + @deprecated( + "Use `for_rail` instead. This method will be removed in 0.6.x.", + category=None, + ) + @classmethod + def from_rail(cls, *args, **kwargs): + raise NotImplementedError("""\ +`from_rail` is not implemented for NemoguardrailsGuard. +We recommend using the main constructor `NemoGuardrailsGuard(nemorails=nemorails)` +or the `from_pydantic` method.""") + + @classmethod + def for_rail(cls, *args, **kwargs): + raise NotImplementedError("""\ +`for_rail` is not implemented for NemoguardrailsGuard. +We recommend using the main constructor `NemoGuardrailsGuard(nemorails=nemorails)` +or the `from_pydantic` method.""") + + +class AsyncNemoguardrailsGuard(NemoguardrailsGuard, AsyncGuard, Generic[OT]): + def __init__( + self, + nemorails: LLMRails, + *args, + **kwargs, + ): + super().__init__(nemorails, *args, **kwargs) + self._generate = self._nemorails.generate_async + + async def _custom_nemo_callable(self, *args, generate_kwargs, **kwargs): + return super()._custom_nemo_callable( + *args, generate_kwargs=generate_kwargs, **kwargs + ) + + async def __call__( # type: ignore + self, + llm_api: Optional[Callable] = None, + generate_kwargs: Optional[Dict] = None, + *args, + **kwargs, + ) -> Union[ + ValidationOutcome[OT], + Awaitable[ValidationOutcome[OT]], + AsyncIterator[ValidationOutcome[OT]], + ]: + return await super().__call__( + llm_api=llm_api, generate_kwargs=generate_kwargs, *args, **kwargs + ) # type: ignore diff --git a/pyproject.toml b/pyproject.toml index 12421dc97..da8518823 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,7 +109,6 @@ pillow = "^10.1.0" cairosvg = "^2.7.1" mkdocs-glightbox = "^0.3.4" - [[tool.poetry.source]] name = "PyPI"