Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions tests/v1/kv_offload/cpu/test_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
make_offload_key,
)
from vllm.v1.kv_offload.cpu.common import CPULoadStoreSpec
from vllm.v1.kv_offload.cpu.manager import CPUOffloadingManager
from vllm.v1.kv_offload.cpu.manager import (
CPUOffloadingManager,
FilterReusedOffloadingManager,
)
from vllm.v1.kv_offload.cpu.policies.arc import ARCCachePolicy
from vllm.v1.kv_offload.reuse_manager import FilterReusedOffloadingManager


def make_req_context(kv_transfer_params: dict | None = None) -> ReqContext:
Expand Down
10 changes: 10 additions & 0 deletions vllm/v1/kv_offload/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,16 @@ def take_events(self) -> Iterable[OffloadingEvent]:
"""
return ()

def request_finished(self, req_id: str) -> bool:
Comment thread
hickeyma marked this conversation as resolved.
"""
Called when a request has finished, before its GPU blocks are freed.

Returns:
True if the manager still has async work in flight for this request
(e.g. a pending store) and GPU blocks must not be freed yet.
"""
return False

def shutdown(self) -> None:
"""Shutdown the manager and release any resources."""
return
Expand Down
104 changes: 104 additions & 0 deletions vllm/v1/kv_offload/cpu/manager.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections import OrderedDict
from collections.abc import Collection, Iterable
from typing import Literal

Expand Down Expand Up @@ -198,3 +199,106 @@ def take_events(self) -> Iterable[OffloadingEvent]:
if self.events is not None:
yield from self.events
self.events.clear()


class FilterReusedOffloadingManager(OffloadingManager):
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was actually thinking to remove FilterReusedOffloadingManager and instead copy its logic inside CPUOffloadingManager

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@orozery Do you want it to be done in this PR or in a separate PR?

"""An :class:`OffloadingManager` decorator that skips storing blocks
whose reuse frequency is below *store_threshold*.

All methods are delegated to the *backing* manager. Two methods are
intercepted:

* ``prepare_store`` — filters out keys that have not yet
* ``lookup`` — records the visited key in an internal LRU
counter, then delegates to the backing manager.
crossed the threshold *before* calling the backing
``prepare_store``.
Comment thread
hickeyma marked this conversation as resolved.

Args:
backing: The underlying ``OffloadingManager`` to delegate to.
store_threshold: A block must be seen at least this many times in
``lookup()`` before it is eligible for offloading. Must be >= 2
(a value of 1 would be equivalent to no filtering).
max_tracker_size: Maximum entries in the internal tracker's LRU table.
"""

def __init__(
self,
backing: OffloadingManager,
store_threshold: int = 2,
max_tracker_size: int = 64_000,
):
if store_threshold < 2:
raise ValueError(
"FilterReusedOffloadingManager store_threshold must be >= 2, "
f"got {store_threshold}"
)
if max_tracker_size < 1:
raise ValueError(
"FilterReusedOffloadingManager max_tracker_size must be >= 1, "
f"got {max_tracker_size}"
)
self._backing = backing
self.store_threshold = store_threshold
self.max_tracker_size = max_tracker_size
# Ordered so we can evict the LRU entry in O(1).
self.counts: OrderedDict[OffloadKey, int] = OrderedDict()

# ------------------------------------------------------------------
# Intercepted methods
# ------------------------------------------------------------------

def lookup(self, key: OffloadKey, req_context: ReqContext) -> bool | None:
"""Record the key, then delegate lookup to backing manager."""
if key in self.counts:
self.counts.move_to_end(key)
self.counts[key] += 1
else:
if len(self.counts) >= self.max_tracker_size:
self.counts.popitem(last=False) # evict LRU
self.counts[key] = 1
return self._backing.lookup(key, req_context)

def prepare_store(
self, keys: Collection[OffloadKey], req_context: ReqContext
) -> PrepareStoreOutput | None:
"""Filter out blocks below threshold, then delegate to backing.

Filtering is evaluated *before* calling the backing manager's
``prepare_store`` so that blocks that would be skipped do not
consume any CPU offload capacity.
"""
eligible = [
key for key in keys if self.counts.get(key, 0) >= self.store_threshold
]

# Passing an empty list is intentional and safe — CPUOffloadingManager
# handles it correctly, returning a PrepareStoreOutput with empty lists.
# Delegate to the backing manager with only the eligible keys.
return self._backing.prepare_store(eligible, req_context)

# ------------------------------------------------------------------
# Delegated methods
# ------------------------------------------------------------------

def prepare_load(
self, keys: Collection[OffloadKey], req_context: ReqContext
) -> LoadStoreSpec:
return self._backing.prepare_load(keys, req_context)

def touch(self, keys: Collection[OffloadKey]) -> None:
return self._backing.touch(keys)

def complete_load(self, keys: Collection[OffloadKey]) -> None:
return self._backing.complete_load(keys)

def complete_store(
self, keys: Collection[OffloadKey], success: bool = True
) -> None:
return self._backing.complete_store(keys, success)

def take_events(self) -> Iterable[OffloadingEvent]:
return self._backing.take_events()

def request_finished(self, req_id: str) -> bool:
return self._backing.request_finished(req_id)
Comment thread
hickeyma marked this conversation as resolved.
6 changes: 4 additions & 2 deletions vllm/v1/kv_offload/cpu/spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,10 @@
)
from vllm.v1.kv_offload.cpu.common import CPULoadStoreSpec
from vllm.v1.kv_offload.cpu.gpu_worker import CpuGpuOffloadingHandlers
from vllm.v1.kv_offload.cpu.manager import CPUOffloadingManager
from vllm.v1.kv_offload.reuse_manager import FilterReusedOffloadingManager
from vllm.v1.kv_offload.cpu.manager import (
CPUOffloadingManager,
FilterReusedOffloadingManager,
)
from vllm.v1.kv_offload.worker.worker import OffloadingHandler


Expand Down
120 changes: 0 additions & 120 deletions vllm/v1/kv_offload/reuse_manager.py

This file was deleted.

Loading