Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 75 additions & 1 deletion tests/agent/models/test_agent.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import pytest
from pydantic import TypeAdapter
from pydantic import TypeAdapter, ValidationError

from uipath.agent.models.agent import (
AgentBuiltInValidatorGuardrail,
Expand Down Expand Up @@ -2755,3 +2755,77 @@ def test_is_conversational_false_by_default(self):
)

assert config.is_conversational is False


class TestAgentDefinitionValidation:
"""Negative/validation tests for AgentDefinition."""

_VALID_AGENT = {
"name": "test-agent",
"settings": {
"model": "gpt-4o",
"maxTokens": 4096,
"temperature": 0,
"engine": "basic-v1",
},
"messages": [{"role": "system", "content": "hi"}],
"inputSchema": {"type": "object", "properties": {}},
"outputSchema": {"type": "object", "properties": {}},
}

@pytest.mark.parametrize(
"omit_key",
["settings", "messages", "inputSchema", "outputSchema"],
)
def test_missing_required_field_raises(self, omit_key):
"""AgentDefinition rejects payloads missing any required field."""
data = {k: v for k, v in self._VALID_AGENT.items() if k != omit_key}
with pytest.raises(ValidationError):
TypeAdapter(AgentDefinition).validate_python(data)

@pytest.mark.parametrize(
"settings_override",
[
{"maxTokens": 4096, "temperature": 0, "engine": "basic-v1"},
{"model": "gpt-4o", "maxTokens": 4096, "temperature": 0},
],
ids=["missing-model", "missing-engine"],
)
def test_settings_missing_required_subfield_raises(self, settings_override):
"""Settings must include both model and engine."""
data = {**self._VALID_AGENT, "settings": settings_override}
with pytest.raises(ValidationError):
TypeAdapter(AgentDefinition).validate_python(data)

def test_unknown_types_normalized_gracefully(self):
"""Unknown resource, tool, and guardrail types are wrapped, not rejected."""
data = {
**self._VALID_AGENT,
"resources": [
{
"$resourceType": "futuristic",
"name": "future-resource",
"description": "unknown resource type",
},
{
"$resourceType": "tool",
"type": "FutureToolType",
"name": "future-tool",
"description": "unknown tool type",
"inputSchema": {"type": "object", "properties": {}},
},
],
"guardrails": [
{
"$guardrailType": "futureGuardrail",
"someField": "someValue",
}
],
}
config = TypeAdapter(AgentDefinition).validate_python(data)

assert len(config.resources) == 2
assert isinstance(config.resources[0], AgentUnknownResourceConfig)
assert isinstance(config.resources[1], AgentUnknownToolResourceConfig)
assert len(config.guardrails) == 1
assert isinstance(config.guardrails[0], AgentUnknownGuardrail)
25 changes: 25 additions & 0 deletions tests/agent/react/test_conversational_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,3 +390,28 @@ def test_full_settings_json_format(self):
assert json_data["company"] == "Big Corp"
assert json_data["country"] == "UK"
assert json_data["timezone"] == "Europe/London"


class TestSpecialCharacterHandling:
"""Test handling of special characters in prompts."""

def test_unicode_and_emoji_preserved_in_user_context(self):
"""Unicode and emoji characters in user settings are preserved."""
settings = PromptUserSettings(name="日本太郎 🚀", email="taro@example.jp")
result = _get_user_settings_template(settings)

assert "USER CONTEXT" in result
assert "日本太郎 🚀" in result

def test_template_syntax_preserved_in_prompt(self):
"""Curly braces, double curlies, and newlines pass through the template."""
prompt = get_chat_system_prompt(
model="claude-3-sonnet",
system_message="Output {key: value}\nLine 2",
agent_name="Agent {{v2}}",
user_settings=None,
)

assert "Output {key: value}" in prompt
assert "You are Agent {{v2}}." in prompt
assert "Line 2" in prompt
66 changes: 66 additions & 0 deletions tests/agent/utils/test_load_agent_definition.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,3 +324,69 @@ def test_load_agent_definition_missing_evaluators_directory(self, temp_project_d
assert result.id == "test-agent-5"
assert result.evaluators is None or len(result.evaluators or []) == 0
assert result.evaluation_sets is None or len(result.evaluation_sets or []) == 0


class TestLoadAgentDefinitionErrors:
"""Error path tests for load_agent_definition."""

@pytest.fixture
def temp_project_dir(self):
"""Create a temporary project directory for testing."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)

def test_missing_agent_json_raises(self, temp_project_dir):
"""Loading from a directory without agent.json raises FileNotFoundError."""
with pytest.raises(FileNotFoundError):
load_agent_definition(temp_project_dir)

def test_malformed_json_in_agent_file_raises(self, temp_project_dir):
"""Invalid JSON in agent.json raises json.JSONDecodeError."""
agent_file = temp_project_dir / "agent.json"
agent_file.write_text("{ not valid json !!!")

with pytest.raises(json.JSONDecodeError):
load_agent_definition(temp_project_dir)

@pytest.mark.parametrize(
"subdir,filename",
[
("resources", "bad_resource.json"),
("evaluations/evaluators", "broken.json"),
],
ids=["malformed-resource", "malformed-evaluator"],
)
def test_malformed_sidecar_file_is_skipped(
self, temp_project_dir, subdir, filename
):
"""Malformed JSON in resources/ or evaluators/ is skipped, not fatal."""
agent_data = {
"id": "test-malformed",
"name": "Agent with bad sidecar file",
"version": "1.0.0",
"messages": [{"role": "system", "content": "hi"}],
"inputSchema": {"type": "object", "properties": {}},
"outputSchema": {"type": "object", "properties": {}},
"settings": {
"model": "gpt-4o",
"maxTokens": 2048,
"temperature": 0.7,
"engine": "basic-v1",
},
}

with open(temp_project_dir / "agent.json", "w") as f:
json.dump(agent_data, f)

target_dir = temp_project_dir / subdir
target_dir.mkdir(parents=True)
(target_dir / filename).write_text("NOT JSON AT ALL")

# Also create eval-sets if testing evaluators (required sibling dir)
if "evaluations" in subdir:
(temp_project_dir / "evaluations" / "eval-sets").mkdir(
parents=True, exist_ok=True
)

result = load_agent_definition(temp_project_dir)
assert result.id == "test-malformed"
58 changes: 58 additions & 0 deletions tests/agent/utils/test_text_tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,3 +277,61 @@ def test_boolean(self):
"""Test serializing booleans (JSON-style lowercase)."""
assert serialize_argument(True) == "true"
assert serialize_argument(False) == "false"


class TestBuildStringFromTokensEdgeCases:
"""Edge case tests for build_string_from_tokens."""

def test_non_ascii_in_tools_and_input(self):
"""Non-ASCII characters in tool names and input values resolve correctly."""
tokens = [
TextToken(type=TextTokenType.SIMPLE_TEXT, raw_string="Use "),
TextToken(type=TextTokenType.VARIABLE, raw_string="tools.données"),
TextToken(type=TextTokenType.SIMPLE_TEXT, raw_string=" for "),
TextToken(type=TextTokenType.VARIABLE, raw_string="input.name"),
]
result = build_string_from_tokens(
tokens, {"name": "日本語テスト"}, tool_names=["données"]
)
assert result == "Use données for 日本語テスト"

def test_unresolved_tool_reference_without_tool_names(self):
"""Tool references return raw string when tool_names is None or empty."""
tokens = [
TextToken(type=TextTokenType.VARIABLE, raw_string="tools.search"),
]
assert build_string_from_tokens(tokens, {}, tool_names=None) == "tools.search"
assert build_string_from_tokens(tokens, {}, tool_names=[]) == "tools.search"

@pytest.mark.parametrize(
"input_data,raw_string,expected",
[
({"user": {"email": None}}, "input.user.email", "input.user.email"),
({"name": ""}, "input.name", ""),
({"count": 0}, "input.count", "0"),
({"active": False}, "input.active", "false"),
],
ids=["nested-none", "empty-string", "zero", "false"],
)
def test_falsy_input_values(self, input_data, raw_string, expected):
"""Falsy values (None, empty, 0, False) are handled distinctly."""
tokens = [TextToken(type=TextTokenType.VARIABLE, raw_string=raw_string)]
result = build_string_from_tokens(tokens, input_data)
assert result == expected


class TestSafeGetNestedEdgeCases:
"""Edge case tests for safe_get_nested."""

@pytest.mark.parametrize(
"data,path,expected",
[
({}, "any.path", None),
({"a": "not_a_dict"}, "a.b", None),
({"a": [1, 2, 3]}, "a.0", None),
],
ids=["empty-dict", "non-dict-intermediate", "list-intermediate"],
)
def test_unreachable_paths_return_none(self, data, path, expected):
"""Paths through non-dict intermediates return None."""
assert safe_get_nested(data, path) is expected
Loading