|
| 1 | +"""OpenRouter pricing service with dynamic pricing from API.""" |
| 2 | + |
| 3 | +import logging |
| 4 | +from typing import Any |
| 5 | + |
| 6 | +import httpx |
| 7 | +from rsb.models.base_model import BaseModel |
| 8 | + |
| 9 | +from agentle.responses.pricing.modality import Modality |
| 10 | + |
| 11 | +logger = logging.getLogger(__name__) |
| 12 | + |
| 13 | + |
| 14 | +class OpenRouterPricingService(BaseModel): |
| 15 | + """ |
| 16 | + OpenRouter implementation of PricingService using dynamic pricing from API. |
| 17 | +
|
| 18 | + This service fetches pricing information from OpenRouter's /models API endpoint |
| 19 | + and caches it for performance. Pricing is fetched lazily on first request. |
| 20 | +
|
| 21 | + The OpenRouter API returns pricing per token, which is converted to per million tokens |
| 22 | + for consistency with other pricing services. |
| 23 | +
|
| 24 | + Attributes: |
| 25 | + api_key: OpenRouter API key for authentication |
| 26 | + base_url: Base URL for OpenRouter API (defaults to https://openrouter.ai/api/v1) |
| 27 | + http_client: Optional custom HTTP client for requests |
| 28 | + _models_cache: Internal cache of model pricing data |
| 29 | + """ |
| 30 | + |
| 31 | + type: str = "openrouter" |
| 32 | + api_key: str | None = None |
| 33 | + base_url: str = "https://openrouter.ai/api/v1" |
| 34 | + http_client: httpx.AsyncClient | None = None |
| 35 | + _models_cache: dict[str, dict[str, Any]] | None = None |
| 36 | + |
| 37 | + def __init__( |
| 38 | + self, |
| 39 | + api_key: str | None = None, |
| 40 | + base_url: str = "https://openrouter.ai/api/v1", |
| 41 | + http_client: httpx.AsyncClient | None = None, |
| 42 | + ): |
| 43 | + """ |
| 44 | + Initialize the OpenRouter pricing service. |
| 45 | +
|
| 46 | + Args: |
| 47 | + api_key: OpenRouter API key. If not provided, reads from OPENROUTER_API_KEY env var. |
| 48 | + base_url: Base URL for OpenRouter API |
| 49 | + http_client: Optional custom HTTP client for requests |
| 50 | + """ |
| 51 | + super().__init__() |
| 52 | + self.api_key = api_key |
| 53 | + self.base_url = base_url |
| 54 | + self.http_client = http_client |
| 55 | + self._models_cache = None |
| 56 | + |
| 57 | + async def _fetch_models(self) -> dict[str, dict[str, Any]]: |
| 58 | + """ |
| 59 | + Fetch available models from OpenRouter API and cache them. |
| 60 | +
|
| 61 | + Returns: |
| 62 | + Dictionary mapping model IDs to model information including pricing |
| 63 | +
|
| 64 | + Raises: |
| 65 | + Exception: If API request fails |
| 66 | + """ |
| 67 | + if self._models_cache is not None: |
| 68 | + return self._models_cache |
| 69 | + |
| 70 | + # Get API key from instance or environment |
| 71 | + from os import getenv |
| 72 | + |
| 73 | + _api_key = self.api_key or getenv("OPENROUTER_API_KEY") |
| 74 | + if not _api_key: |
| 75 | + logger.warning( |
| 76 | + "No OpenRouter API key provided, pricing will not be available" |
| 77 | + ) |
| 78 | + self._models_cache = {} |
| 79 | + return self._models_cache |
| 80 | + |
| 81 | + headers = { |
| 82 | + "Authorization": f"Bearer {_api_key}", |
| 83 | + "Content-Type": "application/json", |
| 84 | + } |
| 85 | + |
| 86 | + client = self.http_client or httpx.AsyncClient() |
| 87 | + |
| 88 | + try: |
| 89 | + response = await client.get( |
| 90 | + f"{self.base_url}/models", |
| 91 | + headers=headers, |
| 92 | + timeout=30.0, |
| 93 | + ) |
| 94 | + response.raise_for_status() |
| 95 | + |
| 96 | + models_response = response.json() |
| 97 | + self._models_cache = { |
| 98 | + model["id"]: model for model in models_response.get("data", []) |
| 99 | + } |
| 100 | + |
| 101 | + logger.debug( |
| 102 | + f"Fetched pricing for {len(self._models_cache)} models from OpenRouter" |
| 103 | + ) |
| 104 | + |
| 105 | + return self._models_cache |
| 106 | + except Exception as e: |
| 107 | + logger.warning(f"Failed to fetch models from OpenRouter: {e}") |
| 108 | + # Return empty cache on failure |
| 109 | + self._models_cache = {} |
| 110 | + return self._models_cache |
| 111 | + finally: |
| 112 | + if self.http_client is None: |
| 113 | + await client.aclose() |
| 114 | + |
| 115 | + async def get_input_price_per_million( |
| 116 | + self, |
| 117 | + model: str, |
| 118 | + modality: Modality = "text", |
| 119 | + cached: bool = False, |
| 120 | + ) -> float | None: |
| 121 | + """ |
| 122 | + Get the input token price per million tokens for a given model. |
| 123 | +
|
| 124 | + Fetches pricing from OpenRouter's /models API endpoint and converts |
| 125 | + from per-token to per-million-tokens pricing. |
| 126 | +
|
| 127 | + Args: |
| 128 | + model: The model identifier (e.g., "anthropic/claude-3-opus") |
| 129 | + modality: The type of input ("text", "image", "audio", "video") |
| 130 | + Note: OpenRouter primarily uses "text" modality for prompt pricing |
| 131 | + cached: Whether this is cached input (for models that support caching) |
| 132 | + Note: OpenRouter has input_cache_read/write pricing for some models |
| 133 | +
|
| 134 | + Returns: |
| 135 | + Price per million input tokens in USD, or None if pricing is unknown |
| 136 | + """ |
| 137 | + try: |
| 138 | + models = await self._fetch_models() |
| 139 | + |
| 140 | + if model not in models: |
| 141 | + logger.debug( |
| 142 | + f"OpenRouter model '{model}' not found in models list. Available models: {len(models)}" |
| 143 | + ) |
| 144 | + return None |
| 145 | + |
| 146 | + model_info = models[model] |
| 147 | + pricing = model_info.get("pricing", {}) |
| 148 | + |
| 149 | + # Handle cached input pricing if requested |
| 150 | + if cached: |
| 151 | + # Check for input_cache_read pricing (for prompt caching) |
| 152 | + input_cache_read = pricing.get("input_cache_read") |
| 153 | + if input_cache_read is not None: |
| 154 | + # Convert string to float if needed |
| 155 | + if isinstance(input_cache_read, str): |
| 156 | + try: |
| 157 | + input_cache_read = float(input_cache_read) |
| 158 | + except ValueError: |
| 159 | + logger.warning( |
| 160 | + f"Could not parse input_cache_read price '{input_cache_read}' for model {model}" |
| 161 | + ) |
| 162 | + return None |
| 163 | + |
| 164 | + # OpenRouter returns price per token, convert to per million |
| 165 | + return float(input_cache_read) * 1_000_000 |
| 166 | + |
| 167 | + # Get standard prompt pricing |
| 168 | + prompt_price = pricing.get("prompt", 0.0) |
| 169 | + |
| 170 | + # Convert string prices to float if needed |
| 171 | + if isinstance(prompt_price, str): |
| 172 | + try: |
| 173 | + prompt_price = float(prompt_price) |
| 174 | + except ValueError: |
| 175 | + logger.warning( |
| 176 | + f"Could not parse prompt price '{prompt_price}' for model {model}" |
| 177 | + ) |
| 178 | + return None |
| 179 | + |
| 180 | + # OpenRouter returns price per token, convert to price per million tokens |
| 181 | + return float(prompt_price) * 1_000_000 |
| 182 | + |
| 183 | + except Exception as e: |
| 184 | + logger.error( |
| 185 | + f"Error fetching input pricing for model {model}: {e}. Returning None" |
| 186 | + ) |
| 187 | + return None |
| 188 | + |
| 189 | + async def get_output_price_per_million( |
| 190 | + self, model: str, modality: Modality = "text" |
| 191 | + ) -> float | None: |
| 192 | + """ |
| 193 | + Get the output token price per million tokens for a given model. |
| 194 | +
|
| 195 | + Fetches pricing from OpenRouter's /models API endpoint and converts |
| 196 | + from per-token to per-million-tokens pricing. |
| 197 | +
|
| 198 | + Args: |
| 199 | + model: The model identifier (e.g., "anthropic/claude-3-opus") |
| 200 | + modality: The type of output ("text", "image", "audio", "video") |
| 201 | + Note: OpenRouter primarily uses "text" modality for completion pricing |
| 202 | +
|
| 203 | + Returns: |
| 204 | + Price per million output tokens in USD, or None if pricing is unknown |
| 205 | + """ |
| 206 | + try: |
| 207 | + models = await self._fetch_models() |
| 208 | + |
| 209 | + if model not in models: |
| 210 | + logger.debug( |
| 211 | + f"OpenRouter model '{model}' not found in models list. Available models: {len(models)}" |
| 212 | + ) |
| 213 | + return None |
| 214 | + |
| 215 | + model_info = models[model] |
| 216 | + pricing = model_info.get("pricing", {}) |
| 217 | + completion_price = pricing.get("completion", 0.0) |
| 218 | + |
| 219 | + # Convert string prices to float if needed |
| 220 | + if isinstance(completion_price, str): |
| 221 | + try: |
| 222 | + completion_price = float(completion_price) |
| 223 | + except ValueError: |
| 224 | + logger.warning( |
| 225 | + f"Could not parse completion price '{completion_price}' for model {model}" |
| 226 | + ) |
| 227 | + return None |
| 228 | + |
| 229 | + # OpenRouter returns price per token, convert to price per million tokens |
| 230 | + return float(completion_price) * 1_000_000 |
| 231 | + |
| 232 | + except Exception as e: |
| 233 | + logger.error( |
| 234 | + f"Error fetching output pricing for model {model}: {e}. Returning None" |
| 235 | + ) |
| 236 | + return None |
| 237 | + |
| 238 | + def clear_cache(self) -> None: |
| 239 | + """ |
| 240 | + Clear the cached model pricing data. |
| 241 | +
|
| 242 | + Useful for forcing a refresh of pricing information from the API. |
| 243 | + """ |
| 244 | + self._models_cache = None |
| 245 | + logger.debug("Cleared OpenRouter pricing cache") |
0 commit comments