Skip to content
Secure Private AI for Enterprises and Developers - amazee.ai

Extending

This guide covers advanced ways to extend AI AutoEvals with custom functionality.

AI AutoEvals is designed to be extensible. You can extend it in several ways:

  1. Hooks: Filter evaluation sets based on custom criteria
  2. Custom Plugins: Create specialized fact extractors
  3. Event Subscribers: React to evaluation events
  4. Custom Services: Integrate with external systems
  5. Theme Override: Customize the UI
  6. Field Storage: Extend evaluation data model

AI AutoEvals provides a hook system that allows you to filter evaluation sets before they are used. This is the most flexible way to add custom triggering conditions.

The hook_ai_autoevals_evaluation_sets_alter() hook is invoked after built-in matching criteria (operation type, tags) have been checked, but before keyword matching is applied.

The hook fires during the evaluation flow:

AI Request Generated
PreGenerateResponseEvent
Get Active Evaluation Sets
Filter by: operation_type + tags
INVOKE hook_ai_autoevals_evaluation_sets_alter() ← YOUR HOOK HERE
Check keyword matching (inclusion/exclusion)
Store pending evaluation
function hook_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
// Your filtering logic here
}
  • &$evaluation_sets (by reference): Array of evaluation set entities that matched operation type and tags. You can remove sets from this array to prevent them from being used.
  • $context: Associative array containing:
    • operation_type: The AI operation type (e.g., ‘chat’, ‘text_completion’)
    • tags: Array of tags from the AI request
    • input_text: The user’s input text (if available)
    • output_text: The AI response text (NULL during pre-response check)
  • Filter evaluations by current language
  • Restrict evaluations to specific user roles
  • Exclude sensitive content from evaluation
  • Implement complex routing logic based on multiple factors
  • Add custom business rules to evaluation triggering
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Only run evaluations for English content.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentLanguage = \Drupal::languageManager()->getCurrentLanguage()->getId();
if ($currentLanguage !== 'en') {
$evaluation_sets = [];
}
}
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Only evaluate for administrators or premium users.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentUser = \Drupal::currentUser();
if (!in_array('administrator', $currentUser->getRoles(), TRUE) &&
!in_array('premium_user', $currentUser->getRoles(), TRUE)) {
$evaluation_sets = [];
}
}
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Skip evaluation for sensitive content.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$inputText = $context['input_text'] ?? '';
$sensitiveKeywords = [
'password',
'credit card',
'social security',
'confidential',
];
foreach ($sensitiveKeywords as $keyword) {
if (stripos($inputText, $keyword) !== FALSE) {
$evaluation_sets = [];
return;
}
}
}
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Use evaluation set metadata for filtering.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentLanguage = \Drupal::languageManager()->getCurrentLanguage()->getId();
foreach ($evaluation_sets as $id => $evaluation_set) {
$metadata = $evaluation_set->getMetadata();
// Check for allowed languages in metadata
if (isset($metadata['allowed_languages']) &&
!in_array($currentLanguage, $metadata['allowed_languages'], TRUE)) {
unset($evaluation_sets[$id]);
}
// Check for disallowed languages
if (isset($metadata['disallowed_languages']) &&
in_array($currentLanguage, $metadata['disallowed_languages'], TRUE)) {
unset($evaluation_sets[$id]);
}
}
}
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Only allow specific evaluation sets.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$allowedSets = [
'strict_quality_check',
'factuality_validation',
'content_safety',
];
foreach (array_keys($evaluation_sets) as $id) {
if (!in_array($id, $allowedSets, TRUE)) {
unset($evaluation_sets[$id]);
}
}
}
/**
* Implements hook_ai_autoevals_evaluation_sets_alter().
*
* Multiple conditions combined.
*/
function mymodule_ai_autoevals_evaluation_sets_alter(array &$evaluation_sets, array $context): void {
$currentLanguage = \Drupal::languageManager()->getCurrentLanguage()->getId();
$currentUser = \Drupal::currentUser();
$inputText = $context['input_text'] ?? '';
// Condition 1: Language must be English or German
$allowedLanguages = ['en', 'de'];
if (!in_array($currentLanguage, $allowedLanguages, TRUE)) {
$evaluation_sets = [];
return;
}
// Condition 2: Skip for administrators (optional)
if (in_array('administrator', $currentUser->getRoles(), TRUE)) {
return; // Allow all sets for admins
}
// Condition 3: Skip sensitive content
if (stripos($inputText, 'confidential') !== FALSE) {
$evaluation_sets = [];
return;
}
// Condition 4: Only allow specific evaluation sets for regular users
$allowedSets = ['basic_quality_check'];
foreach (array_keys($evaluation_sets) as $id) {
if (!in_array($id, $allowedSets, TRUE)) {
unset($evaluation_sets[$id]);
}
}
}
  1. Early Return: If you’re clearing all evaluation sets, return immediately for clarity.

    // Good
    if ($language !== 'en') {
    $evaluation_sets = [];
    return;
    }
  2. Use isset(): Always check if context values exist before using them.

    $inputText = $context['input_text'] ?? '';
  3. Log Decisions: Add logging for debugging and monitoring.

    if ($language !== 'en') {
    \Drupal::logger('mymodule')->notice(
    'Filtered @count evaluation sets due to language: @lang',
    [
    '@count' => count($evaluation_sets),
    '@lang' => $language,
    ]
    );
    $evaluation_sets = [];
    }
  4. Don’t Modify Entities: Only remove sets from the array, don’t modify the entity properties.

    // Bad - modifies entity
    foreach ($evaluation_sets as $evaluation_set) {
    $evaluation_set->setEnabled(FALSE);
    }
    // Good - removes from array
    foreach (array_keys($evaluation_sets) as $id) {
    unset($evaluation_sets[$id]);
    }
FeatureHookEvents
WhenBefore evaluation is queued (pre-response)During/after evaluation processing
PurposeFilter which sets are availableReact to evaluation lifecycle
AccessInput text, tags, operation typeFull evaluation data, results
Can Skip?Yes (remove sets from array)Yes (PreEvaluationEvent::skipEvaluation())
Use CaseRouting logic, conditional evaluationNotifications, logging, post-processing
<?php
namespace Drupal\Tests\mymodule\Kernel;
use Drupal\KernelTests\KernelTestBase;
/**
* Tests hook_ai_autoevals_evaluation_sets_alter().
*/
class HookTest extends KernelTestBase {
/**
* {@inheritdoc}
*/
protected static $modules = ['mymodule', 'ai_autoevals'];
/**
* Tests language filtering.
*/
public function testLanguageFiltering(): void {
// Create test evaluation sets
$set1 = EvaluationSet::create([
'id' => 'set1',
'label' => 'Test Set 1',
'enabled' => TRUE,
]);
$set1->save();
$set2 = EvaluationSet::create([
'id' => 'set2',
'label' => 'Test Set 2',
'enabled' => TRUE,
]);
$set2->save();
// Set language to Spanish
\Drupal::languageManager()->setConfigurableLanguages(['es']);
// Get matching sets with context
$evaluationManager = \Drupal::service('ai_autoevals.evaluation_manager');
$matchingSet = $evaluationManager->getMatchingEvaluationSetWithHook(
[],
'chat',
'test input',
NULL,
);
// Assert no sets returned due to Spanish language
$this->assertNull($matchingSet);
}
}

Create specialized fact extraction plugins for your domain. See Plugin Development for detailed instructions.

Use the centralized keyword matching service in your custom code:

<?php
namespace Drupal\my_module\Service;
use Drupal\ai_autoevals\Service\KeywordMatcher;
use Drupal\Core\Logger\LoggerChannelFactoryInterface;
/**
* Service for custom keyword-based filtering.
*/
class CustomFilterService {
public function __construct(
protected KeywordMatcher $keywordMatcher,
protected LoggerChannelFactoryInterface $loggerFactory
) {}
/**
* Check if content should be filtered based on keywords.
*/
public function shouldFilterContent(string $content, array $exclusions): bool {
// Use any mode - match if ANY exclusion keyword found
return $this->keywordMatcher->matchesAny($content, $exclusions);
}
/**
* Check if content contains all required keywords.
*/
public function containsAllRequired(string $content, array $required): bool {
// Use all mode - ALL keywords must be present
return $this->keywordMatcher->matchesAll($content, $required);
}
}

Register the service in my_module.services.yml:

services:
my_module.custom_filter:
class: Drupal\my_module\Service\CustomFilterService
arguments:
- '@ai_autoevals.keyword_matcher'
- '@logger.factory'

Integrate with Drupal’s content moderation system:

<?php
namespace Drupal\my_module\EventSubscriber;
use Drupal\ai_autoevals\Event\PostEvaluationEvent;
use Drupal\Core\Entity\EntityTypeManagerInterface;
use Drupal\Core\Logger\LoggerChannelFactoryInterface;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
class ContentModerationSubscriber implements EventSubscriberInterface {
public function __construct(
protected EntityTypeManagerInterface $entityTypeManager,
protected LoggerChannelFactoryInterface $loggerFactory
) {}
public static function getSubscribedEvents(): array {
return [
PostEvaluationEvent::EVENT_NAME => ['onPostEvaluation', 0],
];
}
public function onPostEvaluation(PostEvaluationEvent $event): void {
$evaluation = $event->getEvaluationResult();
$score = $event->getScore();
// Only moderate if score is below threshold
if ($score === null || $score < 0.5) {
$this->flagForModeration($evaluation);
}
}
protected function flagForModeration($evaluation): void {
// Get related content node (assuming it's stored in metadata)
$metadata = $evaluation->getMetadata();
if (empty($metadata['node_id'])) {
return;
}
/** @var \Drupal\node\NodeInterface $node */
$node = $this->entityTypeManager->getStorage('node')->load($metadata['node_id']);
if ($node && $node->hasField('moderation_state')) {
// Change moderation state to "needs_review"
$node->set('moderation_state', 'needs_review');
$node->save();
// Log
$this->loggerFactory->get('my_module')->info(
'Node @nid flagged for review due to low AI evaluation score',
['@nid' => $node->id()]
);
}
}
}

Send notifications for low-scoring content:

<?php
namespace Drupal\my_module\EventSubscriber;
use Drupal\ai_autoevals\Event\PostEvaluationEvent;
use Drupal\Core\Mail\MailManagerInterface;
use Drupal\Core\Logger\LoggerChannelFactoryInterface;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
class NotificationSubscriber implements EventSubscriberInterface {
public function __construct(
protected MailManagerInterface $mailManager,
protected LoggerChannelFactoryInterface $loggerFactory
) {}
public static function getSubscribedEvents(): array {
return [
PostEvaluationEvent::EVENT_NAME => ['onPostEvaluation', -10],
];
}
public function onPostEvaluation(PostEvaluationEvent $event): void {
$evaluation = $event->getEvaluationResult();
$score = $event->getScore();
// Send alert for very low scores
if ($score !== null && $score < 0.3) {
$this->sendLowScoreAlert($evaluation);
}
}
protected function sendLowScoreAlert($evaluation): void {
$to = $this->getNotificationEmail();
$subject = 'Low AI Evaluation Score Alert';
$params = [
'evaluation' => $evaluation,
'score' => $evaluation->getScore(),
'input' => $evaluation->getInput(),
'output' => $evaluation->getOutput(),
];
$result = $this->mailManager->mail('my_module', 'low_score_alert', $to, 'en', $params);
if ($result['result']) {
$this->loggerFactory->get('my_module')->info(
'Low score alert sent for evaluation @id',
['@id' => $evaluation->id()]
);
}
}
protected function getNotificationEmail(): string {
return \Drupal::config('my_module.settings')->get('notification_email');
}
}

Create a service that integrates with external evaluation systems:

<?php
namespace Drupal\my_module\Service;
use Drupal\ai_autoevals\Service\EvaluationManager;
use Drupal\Core\Logger\LoggerChannelFactoryInterface;
use GuzzleHttp\ClientInterface;
/**
* Service for external evaluation integration.
*/
class ExternalEvaluationService {
public function __construct(
protected EvaluationManager $evaluationManager,
protected LoggerChannelFactoryInterface $loggerFactory,
protected ClientInterface $httpClient
) {}
/**
* Send evaluation results to external system.
*/
public function sendToExternalSystem($evaluation): void {
$data = [
'evaluation_id' => $evaluation->id(),
'score' => $evaluation->getScore(),
'input' => $evaluation->getInput(),
'output' => $evaluation->getOutput(),
'facts' => $evaluation->getFacts(),
'timestamp' => $evaluation->getCreatedTime(),
];
try {
$response = $this->httpClient->post('https://external-api.example.com/evaluations', [
'json' => $data,
'headers' => [
'Authorization' => 'Bearer ' . $this->getApiKey(),
],
]);
$this->loggerFactory->get('my_module')->info(
'Evaluation @id sent to external system',
['@id' => $evaluation->id()]
);
}
catch (\Exception $e) {
$this->loggerFactory->get('my_module')->error(
'Failed to send evaluation @id to external system: @message',
[
'@id' => $evaluation->id(),
'@message' => $e->getMessage(),
]
);
}
}
/**
* Sync evaluation results from external system.
*/
public function syncFromExternalSystem(): void {
try {
$response = $this->httpClient->get('https://external-api.example.com/evaluations', [
'headers' => [
'Authorization' => 'Bearer ' . $this->getApiKey(),
],
]);
$evaluations = json_decode($response->getBody(), TRUE);
foreach ($evaluations as $data) {
$this->updateEvaluationFromExternalData($data);
}
}
catch (\Exception $e) {
$this->loggerFactory->get('my_module')->error(
'Failed to sync evaluations from external system: @message',
['@message' => $e->getMessage()]
);
}
}
/**
* Update evaluation from external data.
*/
protected function updateEvaluationFromExternalData(array $data): void {
// Logic to update evaluation based on external data
}
/**
* Get API key from config.
*/
protected function getApiKey(): string {
return \Drupal::config('my_module.settings')->get('api_key');
}
/**
* Get AI configuration.
*/
protected function getAiConfig(): array {
$aiConfig = \Drupal::service('ai_autoevals.config');
return [
'provider_id' => $aiConfig->getProviderId(),
'model_id' => $aiConfig->getModelId(),
'configured' => $aiConfig->isConfigured(),
];
}
}

Register the service in my_module.services.yml:

services:
my_module.external_evaluation:
class: Drupal\my_module\Service\ExternalEvaluationService
arguments:
- '@ai_autoevals.evaluation_manager'
- '@logger.factory'
- '@http_client'

Create a custom theme override for the dashboard:

<?php
/**
* Implements hook_theme().
*/
function my_module_theme(): array {
return [
'ai_autoevals_dashboard' => [
'path' => \Drupal::service('extension.path.resolver')->getPath('module', 'my_module') . '/templates',
'template' => 'ai-autoevals-dashboard',
'variables' => [
'total_evaluations' => 0,
'average_score' => 0,
'by_status' => [],
'by_evaluation_set' => [],
'recent_evaluations' => [],
'score_distribution' => [],
],
],
];
}

Create templates/ai-autoevals-dashboard.html.twig:

{#
/**
* @file
* Custom dashboard template.
*/
#}
<div class="ai-autoevals-dashboard">
<h2>{{ 'AI Evaluations Dashboard'|t }}</h2>
<div class="dashboard-metrics">
<div class="metric">
<h3>{{ 'Total Evaluations'|t }}</h3>
<p class="value">{{ total_evaluations }}</p>
</div>
<div class="metric">
<h3>{{ 'Average Score'|t }}</h3>
<p class="value score-{{ average_score|round }}">{{ average_score|number_format(2) }}</p>
</div>
</div>
<div class="dashboard-sections">
<div class="section">
<h3>{{ 'By Status'|t }}</h3>
<ul>
{% for status, count in by_status %}
<li>{{ status }}: {{ count }}</li>
{% endfor %}
</ul>
</div>
<div class="section">
<h3>{{ 'Recent Evaluations'|t }}</h3>
<table>
<thead>
<tr>
<th>{{ 'ID'|t }}</th>
<th>{{ 'Score'|t }}</th>
<th>{{ 'Status'|t }}</th>
</tr>
</thead>
<tbody>
{% for evaluation in recent_evaluations %}
<tr>
<td>{{ evaluation.id }}</td>
<td>{{ evaluation.score }}</td>
<td>{{ evaluation.status }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>

Add custom CSS:

.ai-autoevals-dashboard {
padding: 20px;
}
.dashboard-metrics {
display: flex;
gap: 20px;
margin-bottom: 30px;
}
.metric {
background: #f5f5f5;
padding: 20px;
border-radius: 8px;
flex: 1;
}
.metric .value {
font-size: 32px;
font-weight: bold;
}
.dashboard-sections {
display: flex;
gap: 20px;
}
.section {
flex: 1;
}
.score-1 {
color: green;
}
.score-0 {
color: red;
}

Add a custom field to store additional data:

<?php
/**
* Implements hook_entity_base_field_info_alter().
*/
function my_module_entity_base_field_info_alter(&$fields, \Drupal\Core\Entity\EntityTypeInterface $entity_type) {
if ($entity_type->id() === 'ai_autoevals_evaluation_result') {
// Add custom field
$fields['external_id'] = \Drupal\Core\Field\BaseFieldDefinition::create('string')
->setLabel(t('External ID'))
->setDescription(t('ID from external evaluation system.'))
->setSettings([
'max_length' => 255,
'text_processing' => 0,
])
->setDefaultValue('')
->setDisplayOptions('view', [
'label' => 'above',
'type' => 'string',
'weight' => -5,
])
->setDisplayOptions('form', [
'type' => 'string_textfield',
'weight' => -5,
])
->setDisplayConfigurable('form', TRUE)
->setDisplayConfigurable('view', TRUE);
}
}

Update entity schema:

Terminal window
drush entity:updates
drush updatedb

Use the custom field:

$evaluation = EvaluationResult::load($evaluation_id);
$evaluation->set('external_id', 'EXT-12345');
$evaluation->save();

Create a custom batch operation for evaluations:

<?php
namespace Drupal\my_module\Form;
use Drupal\Core\Form\FormBase;
use Drupal\Core\Form\FormStateInterface;
use Drupal\Core\Batch\BatchBuilder;
/**
* Custom batch operation form.
*/
class CustomBatchForm extends FormBase {
/**
* {@inheritdoc}
*/
public function getFormId(): string {
return 'my_module_custom_batch_form';
}
/**
* {@inheritdoc}
*/
public function buildForm(array $form, FormStateInterface $form_state): array {
$form['description'] = [
'#markup' => '<p>' . $this->t('Process evaluations with custom logic.') . '</p>',
];
$form['actions'] = [
'#type' => 'actions',
];
$form['actions']['submit'] = [
'#type' => 'submit',
'#value' => $this->t('Process Evaluations'),
];
return $form;
}
/**
* {@inheritdoc}
*/
public function submitForm(array &$form, FormStateInterface $form_state): void {
$batch = new BatchBuilder();
$batch
->setTitle($this->t('Processing Evaluations'))
->setFinishCallback([__CLASS__, 'batchFinished'])
->setInitMessage($this->t('Starting batch processing...'))
->setProgressMessage($this->t('Processing... @current of @total.'))
->setErrorMessage($this->t('An error occurred during processing.'));
// Add evaluations to batch
$evaluation_ids = $this->getEvaluationIds();
foreach ($evaluation_ids as $id) {
$batch->addOperation([__CLASS__, 'processEvaluation'], [$id]);
}
batch_set($batch->toArray());
}
/**
* Batch operation callback.
*/
public static function processEvaluation($id, &$context): void {
// Process evaluation
$evaluation = EvaluationResult::load($id);
if ($evaluation) {
// Your custom processing logic
// ...
}
}
/**
* Batch finished callback.
*/
public static function batchFinished($success, $results, $operations): void {
if ($success) {
\Drupal::messenger()->addMessage(t('All evaluations processed successfully.'));
}
else {
\Drupal::messenger()->addError(t('An error occurred during processing.'));
}
}
/**
* Get evaluation IDs to process.
*/
protected function getEvaluationIds(): array {
return \Drupal::entityQuery('ai_autoevals_evaluation_result')
->accessCheck(FALSE)
->condition('status', 'completed')
->execute();
}
}

Create a custom REST API endpoint for evaluations:

<?php
namespace Drupal\my_module\Plugin\rest\resource;
use Drupal\rest\Plugin\ResourceBase;
use Drupal\rest\ResourceResponse;
use Drupal\ai_autoevals\Entity\EvaluationResult;
/**
* REST resource for evaluations.
*
* @RestResource(
* id = "my_module_evaluations",
* label = @Translation("Evaluations"),
* uri_paths = {
* "canonical" = "/api/my-module/evaluations/{id}"
* }
* )
*/
class EvaluationResource extends ResourceBase {
/**
* Responds to GET requests.
*/
public function get($id): ResourceResponse {
$evaluation = EvaluationResult::load($id);
if (!$evaluation) {
return new ResourceResponse(['error' => 'Evaluation not found'], 404);
}
$data = [
'id' => $evaluation->id(),
'score' => $evaluation->getScore(),
'input' => $evaluation->getInput(),
'output' => $evaluation->getOutput(),
'facts' => $evaluation->getFacts(),
'status' => $evaluation->getStatus(),
];
return new ResourceResponse($data);
}
}