Skip to main content

API Reference

Complete API documentation for all Fair Forge modules.

Modules

Import Patterns

Metrics

from fair_forge.metrics.toxicity import Toxicity
from fair_forge.metrics.bias import Bias
from fair_forge.metrics.context import Context
from fair_forge.metrics.conversational import Conversational
from fair_forge.metrics.humanity import Humanity
from fair_forge.metrics.best_of import BestOf

Core

from fair_forge.core.retriever import Retriever
from fair_forge.core.base import FairForge
from fair_forge.core.guardian import Guardian
from fair_forge.core.sentiment import SentimentAnalyzer

Schemas

from fair_forge.schemas.common import Dataset, Batch
from fair_forge.schemas.toxicity import ToxicityMetric
from fair_forge.schemas.bias import BiasMetric, GuardianLLMConfig

Statistical

from fair_forge.statistical import FrequentistMode, BayesianMode
from fair_forge.statistical.base import StatisticalMode

Generators

from fair_forge.generators import (
    BaseGenerator,
    create_markdown_loader,
    SequentialStrategy,
    RandomSamplingStrategy,
)

Runners

from fair_forge.runners import AlquimiaRunner
from fair_forge.schemas.runner import BaseRunner

Storage

from fair_forge.storage import (
    create_local_storage,
    create_lakefs_storage,
)

Guardians

from fair_forge.guardians import LLamaGuard, IBMGranite
from fair_forge.guardians.llms.providers import OpenAIGuardianProvider

Quick Reference

Run a Metric

results = Metric.run(
    RetrieverClass,
    **metric_parameters,
    verbose=True,
)

Create a Retriever

class MyRetriever(Retriever):
    def load_dataset(self) -> list[Dataset]:
        return [Dataset(...)]

Generate Test Data

datasets = await generator.generate_dataset(
    context_loader=loader,
    source="./docs",
    assistant_id="my-assistant",
    num_queries_per_chunk=3,
)

Execute Tests

updated_dataset, summary = await runner.run_dataset(dataset)

Next Steps