Skip to content

Commit 55c4478

Browse files
cxiaoyirafaeljw
authored andcommitted
PM: hibernate: Batch hibernate and resume IO requests
Hibernate and resume process submits individual IO requests for each page of the data, so use blk_plug to improve the batching of these requests. Testing this change with hibernate and resumes consistently shows merging of the IO requests and more than an order of magnitude improvement in hibernate and resume speed is observed. One hibernate and resume cycle for 16GB RAM out of 32GB in use takes around 21 minutes before the change, and 1 minutes after the change on a system with limited storage IOPS. Signed-off-by: Xiaoyi Chen <cxiaoyi@amazon.com> Co-Developed-by: Anchal Agarwal <anchalag@amazon.com> Signed-off-by: Anchal Agarwal <anchalag@amazon.com> [ rjw: Subject and changelog edits, white space damage fixes ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
1 parent ba4f184 commit 55c4478

1 file changed

Lines changed: 15 additions & 0 deletions

File tree

kernel/power/swap.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -226,13 +226,20 @@ struct hib_bio_batch {
226226
atomic_t count;
227227
wait_queue_head_t wait;
228228
blk_status_t error;
229+
struct blk_plug plug;
229230
};
230231

231232
static void hib_init_batch(struct hib_bio_batch *hb)
232233
{
233234
atomic_set(&hb->count, 0);
234235
init_waitqueue_head(&hb->wait);
235236
hb->error = BLK_STS_OK;
237+
blk_start_plug(&hb->plug);
238+
}
239+
240+
static void hib_finish_batch(struct hib_bio_batch *hb)
241+
{
242+
blk_finish_plug(&hb->plug);
236243
}
237244

238245
static void hib_end_io(struct bio *bio)
@@ -294,6 +301,10 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
294301

295302
static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
296303
{
304+
/*
305+
* We are relying on the behavior of blk_plug that a thread with
306+
* a plug will flush the plug list before sleeping.
307+
*/
297308
wait_event(hb->wait, atomic_read(&hb->count) == 0);
298309
return blk_status_to_errno(hb->error);
299310
}
@@ -561,6 +572,7 @@ static int save_image(struct swap_map_handle *handle,
561572
nr_pages++;
562573
}
563574
err2 = hib_wait_io(&hb);
575+
hib_finish_batch(&hb);
564576
stop = ktime_get();
565577
if (!ret)
566578
ret = err2;
@@ -854,6 +866,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
854866
pr_info("Image saving done\n");
855867
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
856868
out_clean:
869+
hib_finish_batch(&hb);
857870
if (crc) {
858871
if (crc->thr)
859872
kthread_stop(crc->thr);
@@ -1084,6 +1097,7 @@ static int load_image(struct swap_map_handle *handle,
10841097
nr_pages++;
10851098
}
10861099
err2 = hib_wait_io(&hb);
1100+
hib_finish_batch(&hb);
10871101
stop = ktime_get();
10881102
if (!ret)
10891103
ret = err2;
@@ -1447,6 +1461,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
14471461
}
14481462
swsusp_show_speed(start, stop, nr_to_read, "Read");
14491463
out_clean:
1464+
hib_finish_batch(&hb);
14501465
for (i = 0; i < ring_size; i++)
14511466
free_page((unsigned long)page[i]);
14521467
if (crc) {

0 commit comments

Comments
 (0)