Skip to content

Commit 2f5809c

Browse files
lukehindsyrobla
andauthored
Cline Support (#463)
* Cline Support This should be considered experimental until tester more widely by the community. I have it working with Anthropic and Ollama so far. * Remove list check * start adding lm_studio * fix message extraaction * fix secret redaction * fix readme fix lint fix lint * fix problems with codegate version and cline detection fix lint * fix ollama for cline --------- Co-authored-by: Yolanda Robla <[email protected]> Co-authored-by: Yolanda Robla Mota <[email protected]>
1 parent dd2fc4c commit 2f5809c

File tree

18 files changed

+207
-42
lines changed

18 files changed

+207
-42
lines changed

README.md

+10
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,16 @@ With Aider, you can choose from two leading AI model providers:
8282
- 💻 Local LLMs with [Ollama](https://ollama.com/)
8383
- 🧠 [OpenAI API](https://openai.com/api/)
8484

85+
- **[Cline](https://github.com/cline/cline)**
86+
87+
With Cline, you can choose between differnet leading AI model providers:
88+
89+
- 🤖 [Anthropic API](https://www.anthropic.com/api)
90+
- 🧠 [OpenAI API](https://openai.com/api/)
91+
- 💻 [LM Studio](https://lmstudio.ai/)
92+
- 💻 Local LLMs with [Ollama](https://ollama.com/)
93+
94+
8595
### Privacy first
8696

8797
Unlike E.T., your code never phones home! 🛸 CodeGate is designed with privacy

src/codegate/config.py

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
"anthropic": "https://api.anthropic.com/v1",
2121
"vllm": "http://localhost:8000", # Base URL without /v1 path
2222
"ollama": "http://localhost:11434", # Default Ollama server URL
23+
"lm_studio": "http://localhost:1234",
2324
}
2425

2526

src/codegate/pipeline/cli/cli.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import re
12
import shlex
23

34
from litellm import ChatCompletionRequest
@@ -76,12 +77,19 @@ async def process(
7677

7778
if last_user_message is not None:
7879
last_user_message_str, _ = last_user_message
79-
splitted_message = last_user_message_str.lower().split(" ")
80+
cleaned_message_str = re.sub(r"<.*?>", "", last_user_message_str).strip()
81+
splitted_message = cleaned_message_str.lower().split(" ")
8082
# We expect codegate as the first word in the message
8183
if splitted_message[0] == "codegate":
8284
context.shortcut_response = True
83-
args = shlex.split(last_user_message_str)
85+
args = shlex.split(cleaned_message_str)
8486
cmd_out = await codegate_cli(args[1:])
87+
88+
if cleaned_message_str != last_user_message_str:
89+
# it came from Cline, need to wrap into tags
90+
cmd_out = (
91+
f"<attempt_completion><result>{cmd_out}</result></attempt_completion>\n"
92+
)
8593
return PipelineResult(
8694
response=PipelineResponse(
8795
step_name=self.name,

src/codegate/pipeline/codegate_context_retriever/codegate.py

+24-5
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def generate_context_str(self, objects: list[object], context: PipelineContext)
3636
matched_packages = []
3737
for obj in objects:
3838
# The object is already a dictionary with 'properties'
39-
package_obj = obj["properties"]
39+
package_obj = obj["properties"] # type: ignore
4040
matched_packages.append(f"{package_obj['name']} ({package_obj['type']})")
4141
# Add one alert for each package found
4242
context.add_alert(
@@ -91,13 +91,16 @@ async def process(
9191
) # type: ignore
9292
logger.info(f"Found {len(bad_snippet_packages)} bad packages in code snippets.")
9393

94-
# Remove code snippets from the user messages and search for bad packages
94+
# Remove code snippets and file listing from the user messages and search for bad packages
9595
# in the rest of the user query/messsages
9696
user_messages = re.sub(r"```.*?```", "", user_message, flags=re.DOTALL)
9797
user_messages = re.sub(r"⋮...*?⋮...\n\n", "", user_messages, flags=re.DOTALL)
98+
user_messages = re.sub(
99+
r"<environment_details>.*?</environment_details>", "", user_messages, flags=re.DOTALL
100+
)
98101

99102
# split messages into double newlines, to avoid passing so many content in the search
100-
split_messages = user_messages.split("\n\n")
103+
split_messages = re.split(r"</?task>|(\n\n)", user_messages)
101104
collected_bad_packages = []
102105
for item_message in split_messages:
103106
# Vector search to find bad packages
@@ -126,10 +129,26 @@ async def process(
126129
# Make a copy of the request
127130
new_request = request.copy()
128131

129-
# Add the context to the last user message
130132
# Format: "Context: {context_str} \n Query: {last user message content}"
131133
message = new_request["messages"][last_user_idx]
132-
context_msg = f'Context: {context_str} \n\n Query: {message["content"]}' # type: ignore
134+
message_str = str(message["content"]) # type: ignore
135+
# Add the context to the last user message
136+
if message_str.strip().startswith("<task>"):
137+
# formatting of cline
138+
match = re.match(r"(<task>)(.*?)(</task>)(.*)", message_str, re.DOTALL)
139+
if match:
140+
task_start, task_content, task_end, rest_of_message = match.groups()
141+
142+
# Embed the context into the task block
143+
updated_task_content = (
144+
f"{task_start}Context: {context_str}\n"
145+
+ f"Query: {task_content.strip()}</details>{task_end}"
146+
)
147+
148+
# Combine the updated task block with the rest of the message
149+
context_msg = updated_task_content + rest_of_message
150+
else:
151+
context_msg = f"Context: {context_str} \n\n Query: {message_str}" # type: ignore
133152
message["content"] = context_msg
134153

135154
logger.debug("Final context message", context_message=context_msg)

src/codegate/pipeline/extract_snippets/extract_snippets.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,8 @@ def extract_snippets(message: str) -> List[CodeSnippet]:
125125

126126
#  just correct the typescript exception
127127
lang_map = {"typescript": "javascript"}
128-
lang = lang_map.get(lang, lang)
128+
if lang:
129+
lang = lang_map.get(lang, lang)
129130
snippets.append(CodeSnippet(filepath=filename, code=content, language=lang))
130131

131132
return snippets

src/codegate/pipeline/secrets/secrets.py

+20-6
Original file line numberDiff line numberDiff line change
@@ -451,17 +451,31 @@ async def process_chunk(
451451
):
452452
return [chunk]
453453

454+
is_cline_client = any(
455+
"Cline" in str(message.trigger_string or "")
456+
for message in input_context.alerts_raised or []
457+
)
458+
454459
# Check if this is the first chunk (delta role will be present, others will not)
455460
if len(chunk.choices) > 0 and chunk.choices[0].delta.role:
456461
redacted_count = input_context.metadata["redacted_secrets_count"]
457462
secret_text = "secret" if redacted_count == 1 else "secrets"
458463
# Create notification chunk
459-
notification_chunk = self._create_chunk(
460-
chunk,
461-
f"\n🛡️ [CodeGate prevented {redacted_count} {secret_text}]"
462-
f"(http://localhost:9090/?search=codegate-secrets) from being leaked "
463-
f"by redacting them.\n\n",
464-
)
464+
if is_cline_client:
465+
notification_chunk = self._create_chunk(
466+
chunk,
467+
f"<thinking>\n🛡️ [CodeGate prevented {redacted_count} {secret_text}]"
468+
f"(http://localhost:9090/?search=codegate-secrets) from being leaked "
469+
f"by redacting them.</thinking>\n\n",
470+
)
471+
notification_chunk.choices[0].delta.role = "assistant"
472+
else:
473+
notification_chunk = self._create_chunk(
474+
chunk,
475+
f"\n🛡️ [CodeGate prevented {redacted_count} {secret_text}]"
476+
f"(http://localhost:9090/?search=codegate-secrets) from being leaked "
477+
f"by redacting them.\n\n",
478+
)
465479

466480
# Reset the counter
467481
input_context.metadata["redacted_secrets_count"] = 0

src/codegate/pipeline/secrets/signatures.py

+10-4
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import re
33
from pathlib import Path
44
from threading import Lock
5-
from typing import ClassVar, Dict, List, NamedTuple, Optional
5+
from typing import ClassVar, Dict, List, NamedTuple, Optional, Union
66

77
import structlog
88
import yaml
@@ -215,16 +215,22 @@ def _load_signatures(cls) -> None:
215215
raise
216216

217217
@classmethod
218-
def find_in_string(cls, text: str) -> List[Match]:
219-
"""Search for secrets in the provided string."""
218+
def find_in_string(cls, text: Union[str, List[str]]) -> List[Match]:
219+
"""Search for secrets in the provided string or list of strings."""
220220
if not text:
221221
return []
222222

223223
if not cls._yaml_path:
224224
raise RuntimeError("SecretFinder not initialized.")
225225

226226
matches = []
227-
lines = text.splitlines()
227+
228+
# Split text into lines for processing
229+
try:
230+
lines = text.splitlines()
231+
except Exception as e:
232+
logger.warning(f"Error splitting text into lines: {e}")
233+
return []
228234

229235
for line_num, line in enumerate(lines, start=1):
230236
for group in cls._signature_groups:

src/codegate/pipeline/systemmsg.py

+12-1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ def get_existing_system_message(request: ChatCompletionRequest) -> Optional[dict
1616
Returns:
1717
The existing system message if found, otherwise None.
1818
"""
19+
1920
for message in request.get("messages", []):
2021
if message["role"] == "system":
2122
return message
@@ -50,8 +51,18 @@ def add_or_update_system_message(
5051
context.add_alert("add-system-message", trigger_string=json.dumps(system_message))
5152
new_request["messages"].insert(0, system_message)
5253
else:
54+
# Handle both string and list content types (needed for Cline (sends list)
55+
existing_content = request_system_message["content"]
56+
new_content = system_message["content"]
57+
58+
# Convert list to string if necessary (needed for Cline (sends list)
59+
if isinstance(existing_content, list):
60+
existing_content = "\n".join(str(item) for item in existing_content)
61+
if isinstance(new_content, list):
62+
new_content = "\n".join(str(item) for item in new_content)
63+
5364
# Update existing system message
54-
updated_content = request_system_message["content"] + "\n\n" + system_message["content"]
65+
updated_content = existing_content + "\n\n" + new_content
5566
context.add_alert("update-system-message", trigger_string=updated_content)
5667
request_system_message["content"] = updated_content
5768

src/codegate/providers/anthropic/completion_handler.py

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ async def execute_completion(
1616
api_key: Optional[str],
1717
stream: bool = False,
1818
is_fim_request: bool = False,
19+
base_tool: Optional[str] = "",
1920
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
2021
"""
2122
Ensures the model name is prefixed with 'anthropic/' to explicitly route to Anthropic's API.

src/codegate/providers/anthropic/provider.py

+5
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,14 @@ def _setup_routes(self):
3232
Sets up the /messages route for the provider as expected by the Anthropic
3333
API. Extracts the API key from the "x-api-key" header and passes it to the
3434
completion handler.
35+
36+
There are two routes:
37+
- /messages: This is the route that is used by the Anthropic API with Continue.dev
38+
- /v1/messages: This is the route that is used by the Anthropic API with Cline
3539
"""
3640

3741
@self.router.post(f"/{self.provider_route_name}/messages")
42+
@self.router.post(f"/{self.provider_route_name}/v1/messages")
3843
async def create_message(
3944
request: Request,
4045
x_api_key: str = Header(None),

src/codegate/providers/base.py

+18-6
Original file line numberDiff line numberDiff line change
@@ -220,20 +220,32 @@ async def complete(
220220
data.get("base_url"),
221221
is_fim_request,
222222
)
223-
if input_pipeline_result.response:
223+
if input_pipeline_result.response and input_pipeline_result.context:
224224
return await self._pipeline_response_formatter.handle_pipeline_response(
225225
input_pipeline_result.response, streaming, context=input_pipeline_result.context
226226
)
227227

228-
provider_request = self._input_normalizer.denormalize(input_pipeline_result.request)
228+
if input_pipeline_result.request:
229+
provider_request = self._input_normalizer.denormalize(input_pipeline_result.request)
229230
if is_fim_request:
230-
provider_request = self._fim_normalizer.denormalize(provider_request)
231+
provider_request = self._fim_normalizer.denormalize(provider_request) # type: ignore
231232

232233
# Execute the completion and translate the response
233234
# This gives us either a single response or a stream of responses
234235
# based on the streaming flag
236+
is_cline_client = any(
237+
"Cline" in str(message.get("content", "")) for message in data.get("messages", [])
238+
)
239+
base_tool = ""
240+
if is_cline_client:
241+
base_tool = "cline"
242+
235243
model_response = await self._completion_handler.execute_completion(
236-
provider_request, api_key=api_key, stream=streaming, is_fim_request=is_fim_request
244+
provider_request,
245+
api_key=api_key,
246+
stream=streaming,
247+
is_fim_request=is_fim_request,
248+
base_tool=base_tool,
237249
)
238250
if not streaming:
239251
normalized_response = self._output_normalizer.normalize(model_response)
@@ -242,9 +254,9 @@ async def complete(
242254
return self._output_normalizer.denormalize(pipeline_output)
243255

244256
pipeline_output_stream = await self._run_output_stream_pipeline(
245-
input_pipeline_result.context, model_response, is_fim_request=is_fim_request
257+
input_pipeline_result.context, model_response, is_fim_request=is_fim_request # type: ignore
246258
)
247-
return self._cleanup_after_streaming(pipeline_output_stream, input_pipeline_result.context)
259+
return self._cleanup_after_streaming(pipeline_output_stream, input_pipeline_result.context) # type: ignore
248260

249261
def get_routes(self) -> APIRouter:
250262
return self.router

src/codegate/providers/completion/base.py

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ async def execute_completion(
2020
api_key: Optional[str],
2121
stream: bool = False, # TODO: remove this param?
2222
is_fim_request: bool = False,
23+
base_tool: Optional[str] = "",
2324
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
2425
"""Execute the completion request"""
2526
pass

src/codegate/providers/litellmshim/litellmshim.py

+1
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ async def execute_completion(
4343
api_key: Optional[str],
4444
stream: bool = False,
4545
is_fim_request: bool = False,
46+
base_tool: Optional[str] = "",
4647
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
4748
"""
4849
Execute the completion request with LiteLLM's API

src/codegate/providers/llamacpp/completion_handler.py

+1
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ async def execute_completion(
5252
api_key: Optional[str],
5353
stream: bool = False,
5454
is_fim_request: bool = False,
55+
base_tool: Optional[str] = "",
5556
) -> Union[ModelResponse, AsyncIterator[ModelResponse]]:
5657
"""
5758
Execute the completion request with inference engine API

0 commit comments

Comments
 (0)