Source code for models.mistral_api_model
"""
Script
------
mistral_api_model.py
Path
----
models/mistral_api_model.py
Purpose
-------
Mistral AI API integration for orchestration workflows.
Supports models via Mistral's REST API with proper authentication.
API-based only (not Le Chat Pro manual access).
Inputs
------
model_name (str): Mistral model identifier
messages (list): Conversation messages in API format
max_tokens (int): Maximum response length
temperature (float): Sampling temperature
Outputs
-------
Dictionary: {output, model, tokens_used, provider}
Compliance
----------
API-based orchestration (compliant with Mistral ToS)
Requires API key authentication (environment variable)
Not for Le Chat Pro automation
Configuration
-------------
MISTRAL_API_KEY: API key for authentication (via env var)
MISTRAL_MODEL: Model identifier
Failure Modes
-------------
- Missing API key ComplianceError
- Invalid model API error
- Rate limit exceeded error dict
- Timeout error dict
Author: Julen Gamboa <julen.gamboa.ds@gmail.com>
Created
-------
2026-02-14
Status
------
PLACEHOLDER - Not yet implemented
Ready for implementation in Phase 2
"""
from typing import Any, Dict, List, Optional
import os
import logging
logger = logging.getLogger(__name__)
[docs]
class MistralAPIModel:
"""Mistral AI API provider with model selector.
Supports multiple Mistral model options from budget-friendly to high-capability.
Model Options (use short names or full identifiers):
- "small" mistral-medium-latest (recommended, good balance, cheap)
- "medium" mistral-large-2411 (most capable, standard pricing)
- "mini" ministral-3b (cheapest, edge deployment)
- "code" codestral-2508 (coding-focused, cheap)
- "devstral" devstral-2 (coding-focused, cheap)
- Full identifier: "mistral-large-2411" (use as-is)
Pricing Guide:
- ministral-3b: $0.1 input / $0.5 output per 1M tokens (cheapest)
- ministral-14b: $0.5 input / $2.5 output per 1M tokens
- codestral-2508: $0.5 input / $2.5 output per 1M tokens
- mistral-medium-latest: $1.0 input / $5.0 output per 1M tokens
- mistral-large-2411: $3.0 input / $15.0 output per 1M tokens (most capable)
Examples:
# Using short names (recommended)
small = MistralAPIModel(model="small")
code = MistralAPIModel(model="code")
# Using full identifiers
custom = MistralAPIModel(model="mistral-large-2411")
"""
# Model selector mapping: short names full identifiers
MODEL_ALIASES = {
"small": "mistral-medium-latest",
"medium": "mistral-large-2411",
"mini": "ministral-3b",
"code": "codestral-2508",
"devstral": "devstral-2",
}
[docs]
def __init__(
self,
model: str = "small",
api_key: Optional[str] = None,
base_url: str = "https://api.mistral.ai/v1"
):
"""
Initialize Mistral API provider.
Args:
model: Model to use. Can be:
- Short name: "small", "medium", "mini", "code", "devstral"
- Full identifier: "mistral-large-2411"
api_key: API key (defaults to MISTRAL_API_KEY env var)
base_url: API endpoint base URL
Raises:
ValueError: If API key not provided
"""
# Resolve model alias if short name provided
self.model_name = self.MODEL_ALIASES.get(model, model)
self.api_key = api_key or os.getenv("MISTRAL_API_KEY")
self.base_url = base_url
if not self.api_key:
raise ValueError(
"Mistral API key required: set MISTRAL_API_KEY env var"
)
[docs]
def call(
self,
prompt: str,
messages: Optional[List[Dict[str, str]]] = None,
**kwargs
) -> Dict[str, Any]:
"""
Call Mistral API (placeholder - not implemented).
Args:
prompt: User prompt
messages: Message history
**kwargs: Additional parameters
Returns:
Dictionary with response (not implemented)
Status:
PLACEHOLDER - raises NotImplementedError
"""
raise NotImplementedError(
"Mistral API model not yet implemented. "
"Scheduled for Phase 2 development."
)