Skip to content

Commit 4350389

Browse files
authored
feat(llma): add prompt management (#417)
* feat(llma): add prompt management * chore(llma): bump version * fix(llma): use SDK session with retry logic for prompt fetching Use _get_session() from posthog/request.py instead of raw requests.get() to benefit from the SDK's existing retry configuration on transient network failures.
1 parent c32c783 commit 4350389

File tree

5 files changed

+858
-1
lines changed

5 files changed

+858
-1
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
# 7.8.0 - 2026-01-28
2+
3+
feat(llma): add prompt management
4+
5+
Adds the Prompt Management feature. At the time of release, this feature is in a closed alpha.
6+
17
# 7.7.0 - 2026-01-15
28

39
feat(ai): Add OpenAI Agents SDK integration

posthog/ai/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from posthog.ai.prompts import Prompts
2+
3+
__all__ = ["Prompts"]

posthog/ai/prompts.py

Lines changed: 271 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,271 @@
1+
"""
2+
Prompt management for PostHog AI SDK.
3+
4+
Fetch and compile LLM prompts from PostHog with caching and fallback support.
5+
"""
6+
7+
import logging
8+
import re
9+
import time
10+
import urllib.parse
11+
from typing import Any, Dict, Optional, Union
12+
13+
from posthog.request import DEFAULT_HOST, USER_AGENT, _get_session
14+
from posthog.utils import remove_trailing_slash
15+
16+
log = logging.getLogger("posthog")
17+
18+
DEFAULT_CACHE_TTL_SECONDS = 300 # 5 minutes
19+
20+
PromptVariables = Dict[str, Union[str, int, float, bool]]
21+
22+
23+
class CachedPrompt:
24+
"""Cached prompt with metadata."""
25+
26+
def __init__(self, prompt: str, fetched_at: float):
27+
self.prompt = prompt
28+
self.fetched_at = fetched_at
29+
30+
31+
def _is_prompt_api_response(data: Any) -> bool:
32+
"""Check if the response is a valid prompt API response."""
33+
return (
34+
isinstance(data, dict)
35+
and "prompt" in data
36+
and isinstance(data.get("prompt"), str)
37+
)
38+
39+
40+
class Prompts:
41+
"""
42+
Fetch and compile LLM prompts from PostHog.
43+
44+
Can be initialized with a PostHog client or with direct options.
45+
46+
Examples:
47+
```python
48+
from posthog import Posthog
49+
from posthog.ai.prompts import Prompts
50+
51+
# With PostHog client
52+
posthog = Posthog('phc_xxx', host='https://us.i.posthog.com', personal_api_key='phx_xxx')
53+
prompts = Prompts(posthog)
54+
55+
# Or with direct options (no PostHog client needed)
56+
prompts = Prompts(personal_api_key='phx_xxx', host='https://us.i.posthog.com')
57+
58+
# Fetch with caching and fallback
59+
template = prompts.get('support-system-prompt', fallback='You are a helpful assistant.')
60+
61+
# Compile with variables
62+
system_prompt = prompts.compile(template, {
63+
'company': 'Acme Corp',
64+
'tier': 'premium',
65+
})
66+
```
67+
"""
68+
69+
def __init__(
70+
self,
71+
posthog: Optional[Any] = None,
72+
*,
73+
personal_api_key: Optional[str] = None,
74+
host: Optional[str] = None,
75+
default_cache_ttl_seconds: Optional[int] = None,
76+
):
77+
"""
78+
Initialize Prompts.
79+
80+
Args:
81+
posthog: PostHog client instance (optional if personal_api_key provided)
82+
personal_api_key: Direct API key (optional if posthog provided)
83+
host: PostHog host (defaults to US ingestion endpoint)
84+
default_cache_ttl_seconds: Default cache TTL (defaults to 300)
85+
"""
86+
self._default_cache_ttl_seconds = (
87+
default_cache_ttl_seconds or DEFAULT_CACHE_TTL_SECONDS
88+
)
89+
self._cache: Dict[str, CachedPrompt] = {}
90+
91+
if posthog is not None:
92+
self._personal_api_key = getattr(posthog, "personal_api_key", None) or ""
93+
self._host = remove_trailing_slash(
94+
getattr(posthog, "raw_host", None) or DEFAULT_HOST
95+
)
96+
else:
97+
self._personal_api_key = personal_api_key or ""
98+
self._host = remove_trailing_slash(host or DEFAULT_HOST)
99+
100+
def get(
101+
self,
102+
name: str,
103+
*,
104+
cache_ttl_seconds: Optional[int] = None,
105+
fallback: Optional[str] = None,
106+
) -> str:
107+
"""
108+
Fetch a prompt by name from the PostHog API.
109+
110+
Caching behavior:
111+
1. If cache is fresh, return cached value
112+
2. If fetch fails and cache exists (stale), return stale cache with warning
113+
3. If fetch fails and fallback provided, return fallback with warning
114+
4. If fetch fails with no cache/fallback, raise exception
115+
116+
Args:
117+
name: The name of the prompt to fetch
118+
cache_ttl_seconds: Cache TTL in seconds (defaults to instance default)
119+
fallback: Fallback prompt to use if fetch fails and no cache available
120+
121+
Returns:
122+
The prompt string
123+
124+
Raises:
125+
Exception: If the prompt cannot be fetched and no fallback is available
126+
"""
127+
ttl = (
128+
cache_ttl_seconds
129+
if cache_ttl_seconds is not None
130+
else self._default_cache_ttl_seconds
131+
)
132+
133+
# Check cache first
134+
cached = self._cache.get(name)
135+
now = time.time()
136+
137+
if cached is not None:
138+
is_fresh = (now - cached.fetched_at) < ttl
139+
140+
if is_fresh:
141+
return cached.prompt
142+
143+
# Try to fetch from API
144+
try:
145+
prompt = self._fetch_prompt_from_api(name)
146+
fetched_at = time.time()
147+
148+
# Update cache
149+
self._cache[name] = CachedPrompt(prompt=prompt, fetched_at=fetched_at)
150+
151+
return prompt
152+
153+
except Exception as error:
154+
# Fallback order:
155+
# 1. Return stale cache (with warning)
156+
if cached is not None:
157+
log.warning(
158+
'[PostHog Prompts] Failed to fetch prompt "%s", using stale cache: %s',
159+
name,
160+
error,
161+
)
162+
return cached.prompt
163+
164+
# 2. Return fallback (with warning)
165+
if fallback is not None:
166+
log.warning(
167+
'[PostHog Prompts] Failed to fetch prompt "%s", using fallback: %s',
168+
name,
169+
error,
170+
)
171+
return fallback
172+
173+
# 3. Raise error
174+
raise
175+
176+
def compile(self, prompt: str, variables: PromptVariables) -> str:
177+
"""
178+
Replace {{variableName}} placeholders with values.
179+
180+
Unmatched variables are left unchanged.
181+
Supports variable names with hyphens and dots (e.g., user-id, company.name).
182+
183+
Args:
184+
prompt: The prompt template string
185+
variables: Object containing variable values
186+
187+
Returns:
188+
The compiled prompt string
189+
"""
190+
191+
def replace_variable(match: re.Match) -> str:
192+
variable_name = match.group(1)
193+
194+
if variable_name in variables:
195+
return str(variables[variable_name])
196+
197+
return match.group(0)
198+
199+
return re.sub(r"\{\{([\w.-]+)\}\}", replace_variable, prompt)
200+
201+
def clear_cache(self, name: Optional[str] = None) -> None:
202+
"""
203+
Clear cached prompts.
204+
205+
Args:
206+
name: Specific prompt to clear. If None, clears all cached prompts.
207+
"""
208+
if name is not None:
209+
self._cache.pop(name, None)
210+
else:
211+
self._cache.clear()
212+
213+
def _fetch_prompt_from_api(self, name: str) -> str:
214+
"""
215+
Fetch prompt from PostHog API.
216+
217+
Endpoint: {host}/api/projects/@current/llm_prompts/name/{encoded_name}/
218+
Auth: Bearer {personal_api_key}
219+
220+
Args:
221+
name: The name of the prompt to fetch
222+
223+
Returns:
224+
The prompt string
225+
226+
Raises:
227+
Exception: If the prompt cannot be fetched
228+
"""
229+
if not self._personal_api_key:
230+
raise Exception(
231+
"[PostHog Prompts] personal_api_key is required to fetch prompts. "
232+
"Please provide it when initializing the Prompts instance."
233+
)
234+
235+
encoded_name = urllib.parse.quote(name, safe="")
236+
url = f"{self._host}/api/projects/@current/llm_prompts/name/{encoded_name}/"
237+
238+
headers = {
239+
"Authorization": f"Bearer {self._personal_api_key}",
240+
"User-Agent": USER_AGENT,
241+
}
242+
243+
response = _get_session().get(url, headers=headers, timeout=10)
244+
245+
if not response.ok:
246+
if response.status_code == 404:
247+
raise Exception(f'[PostHog Prompts] Prompt "{name}" not found')
248+
249+
if response.status_code == 403:
250+
raise Exception(
251+
f'[PostHog Prompts] Access denied for prompt "{name}". '
252+
"Check that your personal_api_key has the correct permissions and the LLM prompts feature is enabled."
253+
)
254+
255+
raise Exception(
256+
f'[PostHog Prompts] Failed to fetch prompt "{name}": HTTP {response.status_code}'
257+
)
258+
259+
try:
260+
data = response.json()
261+
except Exception:
262+
raise Exception(
263+
f'[PostHog Prompts] Invalid response format for prompt "{name}"'
264+
)
265+
266+
if not _is_prompt_api_response(data):
267+
raise Exception(
268+
f'[PostHog Prompts] Invalid response format for prompt "{name}"'
269+
)
270+
271+
return data["prompt"]

0 commit comments

Comments
 (0)