Skip to content

Commit dd3e6d5

Browse files
committed
mm: add support for async page locking
Normally waiting for a page to become unlocked, or locking the page, requires waiting for IO to complete. Add support for lock_page_async() and wait_on_page_locked_async(), which are callback based instead. This allows a caller to get notified when a page becomes unlocked, rather than wait for it. We add a new iocb field, ki_waitq, to pass in the necessary data for this to happen. We can unionize this with ki_cookie, since that is only used for polled IO. Polled IO can never co-exist with async callbacks, as it is (by definition) polled completions. struct wait_page_key is made public, and we define struct wait_page_async as the interface between the caller and the core. Acked-by: Johannes Weiner <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent c7510ab commit dd3e6d5

File tree

3 files changed

+67
-2
lines changed

3 files changed

+67
-2
lines changed

include/linux/fs.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -315,6 +315,8 @@ enum rw_hint {
315315
#define IOCB_SYNC (1 << 5)
316316
#define IOCB_WRITE (1 << 6)
317317
#define IOCB_NOWAIT (1 << 7)
318+
/* iocb->ki_waitq is valid */
319+
#define IOCB_WAITQ (1 << 8)
318320

319321
struct kiocb {
320322
struct file *ki_filp;
@@ -328,7 +330,10 @@ struct kiocb {
328330
int ki_flags;
329331
u16 ki_hint;
330332
u16 ki_ioprio; /* See linux/ioprio.h */
331-
unsigned int ki_cookie; /* for ->iopoll */
333+
union {
334+
unsigned int ki_cookie; /* for ->iopoll */
335+
struct wait_page_queue *ki_waitq; /* for async buffered IO */
336+
};
332337

333338
randomized_struct_fields_end
334339
};

include/linux/pagemap.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -535,6 +535,7 @@ static inline int wake_page_match(struct wait_page_queue *wait_page,
535535

536536
extern void __lock_page(struct page *page);
537537
extern int __lock_page_killable(struct page *page);
538+
extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
538539
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
539540
unsigned int flags);
540541
extern void unlock_page(struct page *page);
@@ -571,6 +572,22 @@ static inline int lock_page_killable(struct page *page)
571572
return 0;
572573
}
573574

575+
/*
576+
* lock_page_async - Lock the page, unless this would block. If the page
577+
* is already locked, then queue a callback when the page becomes unlocked.
578+
* This callback can then retry the operation.
579+
*
580+
* Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
581+
* was already locked and the callback defined in 'wait' was queued.
582+
*/
583+
static inline int lock_page_async(struct page *page,
584+
struct wait_page_queue *wait)
585+
{
586+
if (!trylock_page(page))
587+
return __lock_page_async(page, wait);
588+
return 0;
589+
}
590+
574591
/*
575592
* lock_page_or_retry - Lock the page, unless this would block and the
576593
* caller indicated that it can handle a retry.

mm/filemap.c

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1180,6 +1180,36 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
11801180
}
11811181
EXPORT_SYMBOL(wait_on_page_bit_killable);
11821182

1183+
static int __wait_on_page_locked_async(struct page *page,
1184+
struct wait_page_queue *wait, bool set)
1185+
{
1186+
struct wait_queue_head *q = page_waitqueue(page);
1187+
int ret = 0;
1188+
1189+
wait->page = page;
1190+
wait->bit_nr = PG_locked;
1191+
1192+
spin_lock_irq(&q->lock);
1193+
__add_wait_queue_entry_tail(q, &wait->wait);
1194+
SetPageWaiters(page);
1195+
if (set)
1196+
ret = !trylock_page(page);
1197+
else
1198+
ret = PageLocked(page);
1199+
/*
1200+
* If we were succesful now, we know we're still on the
1201+
* waitqueue as we're still under the lock. This means it's
1202+
* safe to remove and return success, we know the callback
1203+
* isn't going to trigger.
1204+
*/
1205+
if (!ret)
1206+
__remove_wait_queue(q, &wait->wait);
1207+
else
1208+
ret = -EIOCBQUEUED;
1209+
spin_unlock_irq(&q->lock);
1210+
return ret;
1211+
}
1212+
11831213
/**
11841214
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
11851215
* @page: The page to wait for.
@@ -1342,6 +1372,11 @@ int __lock_page_killable(struct page *__page)
13421372
}
13431373
EXPORT_SYMBOL_GPL(__lock_page_killable);
13441374

1375+
int __lock_page_async(struct page *page, struct wait_page_queue *wait)
1376+
{
1377+
return __wait_on_page_locked_async(page, wait, true);
1378+
}
1379+
13451380
/*
13461381
* Return values:
13471382
* 1 - page is locked; mmap_lock is still held.
@@ -2131,6 +2166,11 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
21312166
}
21322167

21332168
readpage:
2169+
if (iocb->ki_flags & IOCB_NOWAIT) {
2170+
unlock_page(page);
2171+
put_page(page);
2172+
goto would_block;
2173+
}
21342174
/*
21352175
* A previous I/O error may have been due to temporary
21362176
* failures, eg. multipath errors.
@@ -2150,7 +2190,10 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
21502190
}
21512191

21522192
if (!PageUptodate(page)) {
2153-
error = lock_page_killable(page);
2193+
if (iocb->ki_flags & IOCB_WAITQ)
2194+
error = lock_page_async(page, iocb->ki_waitq);
2195+
else
2196+
error = lock_page_killable(page);
21542197
if (unlikely(error))
21552198
goto readpage_error;
21562199
if (!PageUptodate(page)) {

0 commit comments

Comments
 (0)