Skip to content
Secure Private AI for Enterprises and Developers - amazee.ai

Hooks

This example shows how to use the hook_ai_autoevals_evaluation_sets_alter() hook to control when evaluations are triggered.

Hooks allow you to filter evaluation sets based on custom criteria before they are used. This is the most flexible way to add conditional evaluation logic.

Only evaluate content in English.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Filter evaluations to only process English content.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentLanguage = \Drupal::languageManager()->getCurrentLanguage()->getId();
if ($currentLanguage !== 'en') {
// Empty the array to skip all evaluations for non-English content
$evaluation_sets = [];
// Log the decision
\Drupal::logger('mymodule')->notice(
'Filtered @count evaluation sets due to non-English language: @lang',
[
'@count' => count($evaluation_sets),
'@lang' => $currentLanguage,
]
);
}
}

Support multiple languages with evaluation set routing.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Route evaluations to language-specific evaluation sets.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentLanguage = \Drupal::languageManager()->getCurrentLanguage()->getId();
$supportedLanguages = ['en', 'es', 'fr', 'de'];
// If language is supported, only allow matching sets
if (in_array($currentLanguage, $supportedLanguages, TRUE)) {
foreach (array_keys($evaluation_sets) as $setId) {
$evaluationSet = $evaluation_sets[$setId];
$metadata = $evaluationSet->getMetadata();
// Check if this set is configured for current language
if (isset($metadata['supported_languages'])) {
if (!in_array($currentLanguage, $metadata['supported_languages'], TRUE)) {
unset($evaluation_sets[$setId]);
}
}
else {
// If no language metadata, check if ID contains language code
$languagePattern = "/_{$currentLanguage}$/i";
if (!preg_match($languagePattern, $setId)) {
unset($evaluation_sets[$setId]);
}
}
}
}
else {
// Unsupported language - skip all evaluations
$evaluation_sets = [];
\Drupal::logger('mymodule')->warning(
'Language @lang not supported, skipping evaluations',
['@lang' => $currentLanguage]
);
}
}

Create evaluation sets with language-specific IDs:

english_evaluation.yml
id: english_evaluation
label: 'English Content Evaluation'
metadata:
supported_languages: ['en']
# spanish_evaluation.yml
id: spanish_evaluation
label: 'Spanish Content Evaluation'
metadata:
supported_languages: ['es']
# french_evaluation.yml
id: french_evaluation
label: 'French Content Evaluation'
metadata:
supported_languages: ['fr']

Only evaluate for specific user roles.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Restrict evaluations based on user roles.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentUser = \Drupal::currentUser();
// Always allow administrators
if (in_array('administrator', $currentUser->getRoles(), TRUE)) {
return;
}
// Only allow premium users
if (!in_array('premium_user', $currentUser->getRoles(), TRUE)) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->debug(
'Skipping evaluations for non-premium user: @uid',
['@uid' => $currentUser->id()]
);
}
else {
// Premium users get limited evaluation sets
$allowedSets = [
'basic_quality_check',
'content_safety',
];
foreach (array_keys($evaluation_sets) as $setId) {
if (!in_array($setId, $allowedSets, TRUE)) {
unset($evaluation_sets[$setId]);
}
}
}
}

Skip evaluation for sensitive or internal content.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Skip evaluation for sensitive content.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$inputText = $context['input_text'] ?? '';
// Sensitive content patterns
$sensitivePatterns = [
'password',
'credit card',
'social security',
'confidential',
'internal only',
'restricted',
];
foreach ($sensitivePatterns as $pattern) {
if (stripos($inputText, $pattern) !== FALSE) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->notice(
'Skipping evaluation for sensitive content containing: @pattern',
['@pattern' => $pattern]
);
return;
}
}
// Skip for test/debug content
$testPatterns = [
'/test/',
'/debug/',
'this is a test',
'example query',
];
foreach ($testPatterns as $pattern) {
if (stripos($inputText, $pattern) !== FALSE) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->debug(
'Skipping evaluation for test content'
);
return;
}
}
}

Only evaluate during business hours.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Restrict evaluations to business hours.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentTime = time();
$currentHour = (int) date('H', $currentTime);
// Business hours: 9 AM to 6 PM (9:00-18:00)
$isBusinessHours = $currentHour >= 9 && $currentHour < 18;
// Check current day (skip weekends)
$currentDay = (int) date('N', $currentTime); // 1=Monday, 7=Sunday
$isWeekday = $currentDay >= 1 && $currentDay <= 5;
if (!$isBusinessHours || !$isWeekday) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->notice(
'Skipping evaluation outside business hours: @time, day @day',
[
'@time' => date('H:i', $currentTime),
'@day' => $currentDay,
]
);
}
}

Skip evaluation if user has exceeded rate limit.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Implement rate limiting for evaluations.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentUser = \Drupal::currentUser();
if ($currentUser->isAnonymous()) {
return; // Don't rate limit anonymous users
}
$database = \Drupal::database();
// Count evaluations in last hour
$oneHourAgo = time() - 3600;
$recentEvaluations = $database
->select('ai_autoevals_evaluation_result', 'e')
->condition('created', $oneHourAgo, '>=')
->countQuery()
->execute()
->fetchField();
// Rate limit: max 100 evaluations per hour per user
$maxEvaluationsPerHour = 100;
if ($recentEvaluations >= $maxEvaluationsPerHour) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->warning(
'Rate limit exceeded for user @uid: @count evaluations in last hour',
[
'@uid' => $currentUser->id(),
'@count' => $recentEvaluations,
]
);
}
}

Route evaluations to different sets for A/B testing.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Route evaluations for A/B testing.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
// Use user ID for consistent assignment
$userId = \Drupal::currentUser()->id();
// Hash user ID to get consistent assignment
$hash = md5((string) $userId);
$bucket = hexdec(substr($hash, 0, 1)) % 10;
// Split into A (0-4) and B (5-9) buckets
$isBucketA = $bucket < 5;
if ($isBucketA) {
// Group A: Use original evaluation sets
$allowedSets = [
'standard_evaluation',
'quality_check',
];
foreach (array_keys($evaluation_sets) as $setId) {
if (!in_array($setId, $allowedSets, TRUE)) {
unset($evaluation_sets[$setId]);
}
}
\Drupal::logger('mymodule')->debug(
'User @uid assigned to bucket A (group: @bucket)',
[
'@uid' => $userId,
'@bucket' => $bucket,
]
);
}
else {
// Group B: Use experimental evaluation sets
$allowedSets = [
'experimental_evaluation',
'experimental_quality_check',
];
foreach (array_keys($evaluation_sets) as $setId) {
if (!in_array($setId, $allowedSets, TRUE)) {
unset($evaluation_sets[$setId]);
}
}
\Drupal::logger('mymodule')->debug(
'User @uid assigned to bucket B (group: @bucket)',
[
'@uid' => $userId,
'@bucket' => $bucket,
]
);
}
}

Different evaluation rules for different environments.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Apply different evaluation rules based on environment.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$environment = \Drupal::config('mymodule.settings')->get('environment');
if ($environment === 'production') {
// Production: Use all strict evaluations
return;
}
if ($environment === 'staging') {
// Staging: Use medium-strictness evaluations
$allowedSets = [
'standard_evaluation',
'moderate_quality_check',
];
foreach (array_keys($evaluation_sets) as $setId) {
if (!in_array($setId, $allowedSets, TRUE)) {
unset($evaluation_sets[$setId]);
}
}
return;
}
if ($environment === 'development') {
// Development: Only evaluate critical content
$inputText = $context['input_text'] ?? '';
$criticalKeywords = [
'delete',
'remove',
'destroy',
'critical',
];
$hasCriticalContent = FALSE;
foreach ($criticalKeywords as $keyword) {
if (stripos($inputText, $keyword) !== FALSE) {
$hasCriticalContent = TRUE;
break;
}
}
if (!$hasCriticalContent) {
$evaluation_sets = [];
}
return;
}
// Unknown environment: skip all evaluations
$evaluation_sets = [];
}

Multiple conditions working together.

<?php
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Complex multi-condition filtering.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentUser = \Drupal::currentUser();
$currentLanguage = \Drupal::languageManager()->getCurrentLanguage()->getId();
$inputText = $context['input_text'] ?? '';
// Condition 1: Language filter
$supportedLanguages = ['en', 'es', 'fr'];
if (!in_array($currentLanguage, $supportedLanguages, TRUE)) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->notice('Unsupported language: @lang', ['@lang' => $currentLanguage]);
return;
}
// Condition 2: Skip for administrators
if (in_array('administrator', $currentUser->getRoles(), TRUE)) {
return;
}
// Condition 3: Skip sensitive content
$sensitiveKeywords = ['confidential', 'secret', 'private'];
foreach ($sensitiveKeywords as $keyword) {
if (stripos($inputText, $keyword) !== FALSE) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->notice('Sensitive content detected, skipping evaluation');
return;
}
}
// Condition 4: Rate limit for non-admin users
if (!$currentUser->hasPermission('bypass rate limit')) {
$recentEvaluations = \Drupal::database()
->select('ai_autoevals_evaluation_result', 'e')
->condition('created', time() - 3600, '>=')
->countQuery()
->execute()
->fetchField();
if ($recentEvaluations > 50) {
$evaluation_sets = [];
\Drupal::logger('mymodule')->warning('Rate limit exceeded for user @uid', ['@uid' => $currentUser->id()]);
return;
}
}
// Condition 5: Filter sets based on metadata
foreach (array_keys($evaluation_sets) as $setId) {
$evaluationSet = $evaluation_sets[$setId];
$metadata = $evaluationSet->getMetadata();
// Skip sets that require premium access
if (isset($metadata['requires_premium']) && $metadata['requires_premium'] === TRUE) {
if (!in_array('premium_user', $currentUser->getRoles(), TRUE)) {
unset($evaluation_sets[$setId]);
}
}
// Skip sets for different languages
if (isset($metadata['language']) && $metadata['language'] !== $currentLanguage) {
unset($evaluation_sets[$setId]);
}
}
}

Both hooks and keywords can filter evaluations, but they serve different purposes:

FeatureKeywordsHook
ConfigurationUI-based, non-technicalCode-based, requires developer
FlexibilityPattern matching onlyAny PHP logic
Use CaseSimple keyword matchingComplex business rules
PerformanceVery fastSlight overhead
MaintenanceEasy to change in UIRequires code deployment

When to use Keywords:

  • Simple “contains/doesn’t contain” conditions
  • Non-technical users need to manage rules
  • Pattern matching is sufficient

When to use Hooks:

  • Need to check external services (user roles, database)
  • Complex conditional logic
  • Integration with other modules
  • Language-based routing
  • Rate limiting or quota management
<?php
namespace Drupal\Tests\mymodule\Kernel;
use Drupal\KernelTests\KernelTestBase;
/**
* Tests hook_ai_autoevals_evaluation_sets_alter().
*/
class HookTest extends KernelTestBase {
protected static $modules = ['mymodule', 'ai_autoevals', 'user'];
/**
* Tests language filtering.
*/
public function testLanguageFiltering(): void {
// Create test evaluation sets
$set1 = EvaluationSet::create([
'id' => 'test_set_1',
'label' => 'Test Set 1',
'enabled' => TRUE,
]);
$set1->save();
// Set language to Spanish
\Drupal::languageManager()->setConfigurableLanguages(['es']);
// Get matching sets - should return NULL due to language filter
$evaluationManager = \Drupal::service('ai_autoevals.evaluation_manager');
$matchingSet = $evaluationManager->getMatchingEvaluationSetWithHook(
[],
'chat',
'test input',
NULL,
);
$this->assertNull($matchingSet, 'No sets should match for Spanish language');
}
/**
* Tests user role filtering.
*/
public function testUserRoleFiltering(): void {
// Create test user with premium role
$user = User::create([
'name' => 'test_user',
'mail' => 'test@example.com',
]);
$user->addRole('premium_user');
$user->save();
// Log in as this user
$this->setCurrentUser($user);
// Create test evaluation sets
$set1 = EvaluationSet::create([
'id' => 'basic_set',
'label' => 'Basic Set',
'enabled' => TRUE,
]);
$set1->save();
$set2 = EvaluationSet::create([
'id' => 'premium_set',
'label' => 'Premium Set',
'enabled' => TRUE,
]);
$set2->save();
// Get matching sets - should return only premium set
$evaluationManager = \Drupal::service('ai_autoevals.evaluation_manager');
$matchingSet = $evaluationManager->getMatchingEvaluationSetWithHook(
[],
'chat',
'test input',
NULL,
);
$this->assertNotNull($matchingSet);
$this->assertEquals('premium_set', $matchingSet->id());
}
}
<?php
/**
* Debug helper to test hook behavior.
*/
function test_ai_autoevals_hook(): void {
$evaluationManager = \Drupal::service('ai_autoevals.evaluation_manager');
// Create some test context
$context = [
'operation_type' => 'chat',
'tags' => ['test' => TRUE],
'input_text' => 'What is the capital of France?',
'output_text' => NULL,
];
// Get all active evaluation sets
$evaluationSets = $evaluationManager->getActiveEvaluationSets();
print "Original sets: " . count($evaluationSets) . "\n";
foreach ($evaluationSets as $set) {
print " - {$set->id()}\n";
}
// Now test with hook
$matchingSet = $evaluationManager->getMatchingEvaluationSetWithHook(
$context['tags'],
$context['operation_type'],
$context['input_text'],
$context['output_text'],
);
print "\nFinal matching set: " . ($matchingSet ? $matchingSet->id() : 'NULL') . "\n";
}
// Good
if ($language !== 'en') {
$evaluation_sets = [];
return;
}
// Bad - unnecessary processing
if ($language !== 'en') {
unset($evaluation_sets['set1']);
unset($evaluation_sets['set2']);
// ... more unnecessary code
}
// Good
$inputText = $context['input_text'] ?? '';
// Bad
$inputText = isset($context['input_text']) ? $context['input_text'] : '';
// Good - helps with debugging
if ($language !== 'en') {
\Drupal::logger('mymodule')->notice(
'Filtered @count evaluation sets for language: @lang',
['@count' => count($evaluation_sets), '@lang' => $language]
);
$evaluation_sets = [];
}
// Bad - silent filtering
if ($language !== 'en') {
$evaluation_sets = [];
}
// Good
if (!empty($context['input_text'])) {
$inputText = $context['input_text'];
}
// Bad - might cause warnings
if (stripos($context['input_text'], 'test') !== FALSE) {
// ...
}
// Good - only filter array
foreach (array_keys($evaluation_sets) as $id) {
if ($id !== 'allowed_set') {
unset($evaluation_sets[$id]);
}
}
// Bad - modifies entity state
foreach ($evaluation_sets as $set) {
$set->setEnabled(FALSE); // This modifies the config entity!
}

Symptoms: Evaluation always runs, hook doesn’t seem to execute.

Solutions:

  1. Clear Drupal cache: drush cache:rebuild
  2. Ensure module is enabled: drush pm:list | grep mymodule
  3. Check function name matches pattern: mymodule_ai_autoevals_evaluation_sets_alter()
  4. Verify function is in .module file (not in a class)

Symptoms: Too many or too few sets are being filtered.

Solutions:

  1. Add logging to see which sets are being filtered
  2. Check evaluation set IDs match your logic
  3. Verify context values are what you expect
  4. Test with drush ev "print_r(\$context)" to see context

Symptoms: Site slows down when evaluations trigger.

Solutions:

  1. Keep hook logic simple and fast
  2. Cache expensive operations (e.g., database queries)
  3. Consider using keywords instead for simple pattern matching
  4. Profile your code with XHProf or Tideways