@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
132132
133133#define BLKBACK_INVALID_HANDLE (~0)
134134
135- /* Number of free pages to remove on each call to gnttab_free_pages */
136- #define NUM_BATCH_FREE_PAGES 10
137-
138135static inline bool persistent_gnt_timeout (struct persistent_gnt * persistent_gnt )
139136{
140137 return pgrant_timeout && (jiffies - persistent_gnt -> last_used >=
141138 HZ * pgrant_timeout );
142139}
143140
144- static inline int get_free_page (struct xen_blkif_ring * ring , struct page * * page )
145- {
146- unsigned long flags ;
147-
148- spin_lock_irqsave (& ring -> free_pages_lock , flags );
149- if (list_empty (& ring -> free_pages )) {
150- BUG_ON (ring -> free_pages_num != 0 );
151- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
152- return gnttab_alloc_pages (1 , page );
153- }
154- BUG_ON (ring -> free_pages_num == 0 );
155- page [0 ] = list_first_entry (& ring -> free_pages , struct page , lru );
156- list_del (& page [0 ]-> lru );
157- ring -> free_pages_num -- ;
158- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
159-
160- return 0 ;
161- }
162-
163- static inline void put_free_pages (struct xen_blkif_ring * ring , struct page * * page ,
164- int num )
165- {
166- unsigned long flags ;
167- int i ;
168-
169- spin_lock_irqsave (& ring -> free_pages_lock , flags );
170- for (i = 0 ; i < num ; i ++ )
171- list_add (& page [i ]-> lru , & ring -> free_pages );
172- ring -> free_pages_num += num ;
173- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
174- }
175-
176- static inline void shrink_free_pagepool (struct xen_blkif_ring * ring , int num )
177- {
178- /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
179- struct page * page [NUM_BATCH_FREE_PAGES ];
180- unsigned int num_pages = 0 ;
181- unsigned long flags ;
182-
183- spin_lock_irqsave (& ring -> free_pages_lock , flags );
184- while (ring -> free_pages_num > num ) {
185- BUG_ON (list_empty (& ring -> free_pages ));
186- page [num_pages ] = list_first_entry (& ring -> free_pages ,
187- struct page , lru );
188- list_del (& page [num_pages ]-> lru );
189- ring -> free_pages_num -- ;
190- if (++ num_pages == NUM_BATCH_FREE_PAGES ) {
191- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
192- gnttab_free_pages (num_pages , page );
193- spin_lock_irqsave (& ring -> free_pages_lock , flags );
194- num_pages = 0 ;
195- }
196- }
197- spin_unlock_irqrestore (& ring -> free_pages_lock , flags );
198- if (num_pages != 0 )
199- gnttab_free_pages (num_pages , page );
200- }
201-
202141#define vaddr (page ) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
203142
204143static int do_block_io_op (struct xen_blkif_ring * ring , unsigned int * eoi_flags );
@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *ro
331270 unmap_data .count = segs_to_unmap ;
332271 BUG_ON (gnttab_unmap_refs_sync (& unmap_data ));
333272
334- put_free_pages (ring , pages , segs_to_unmap );
273+ gnttab_page_cache_put (& ring -> free_pages , pages ,
274+ segs_to_unmap );
335275 segs_to_unmap = 0 ;
336276 }
337277
@@ -371,15 +311,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
371311 if (++ segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ) {
372312 unmap_data .count = segs_to_unmap ;
373313 BUG_ON (gnttab_unmap_refs_sync (& unmap_data ));
374- put_free_pages (ring , pages , segs_to_unmap );
314+ gnttab_page_cache_put (& ring -> free_pages , pages ,
315+ segs_to_unmap );
375316 segs_to_unmap = 0 ;
376317 }
377318 kfree (persistent_gnt );
378319 }
379320 if (segs_to_unmap > 0 ) {
380321 unmap_data .count = segs_to_unmap ;
381322 BUG_ON (gnttab_unmap_refs_sync (& unmap_data ));
382- put_free_pages ( ring , pages , segs_to_unmap );
323+ gnttab_page_cache_put ( & ring -> free_pages , pages , segs_to_unmap );
383324 }
384325}
385326
@@ -664,9 +605,10 @@ int xen_blkif_schedule(void *arg)
664605
665606 /* Shrink the free pages pool if it is too large. */
666607 if (time_before (jiffies , blkif -> buffer_squeeze_end ))
667- shrink_free_pagepool ( ring , 0 );
608+ gnttab_page_cache_shrink ( & ring -> free_pages , 0 );
668609 else
669- shrink_free_pagepool (ring , max_buffer_pages );
610+ gnttab_page_cache_shrink (& ring -> free_pages ,
611+ max_buffer_pages );
670612
671613 if (log_stats && time_after (jiffies , ring -> st_print ))
672614 print_stats (ring );
@@ -697,7 +639,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
697639 ring -> persistent_gnt_c = 0 ;
698640
699641 /* Since we are shutting down remove all pages from the buffer */
700- shrink_free_pagepool ( ring , 0 /* All */ );
642+ gnttab_page_cache_shrink ( & ring -> free_pages , 0 /* All */ );
701643}
702644
703645static unsigned int xen_blkbk_unmap_prepare (
@@ -736,7 +678,7 @@ static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_
736678 but is this the best way to deal with this? */
737679 BUG_ON (result );
738680
739- put_free_pages ( ring , data -> pages , data -> count );
681+ gnttab_page_cache_put ( & ring -> free_pages , data -> pages , data -> count );
740682 make_response (ring , pending_req -> id ,
741683 pending_req -> operation , pending_req -> status );
742684 free_req (ring , pending_req );
@@ -803,7 +745,8 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
803745 if (invcount ) {
804746 ret = gnttab_unmap_refs (unmap , NULL , unmap_pages , invcount );
805747 BUG_ON (ret );
806- put_free_pages (ring , unmap_pages , invcount );
748+ gnttab_page_cache_put (& ring -> free_pages , unmap_pages ,
749+ invcount );
807750 }
808751 pages += batch ;
809752 num -= batch ;
@@ -850,7 +793,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
850793 pages [i ]-> page = persistent_gnt -> page ;
851794 pages [i ]-> persistent_gnt = persistent_gnt ;
852795 } else {
853- if (get_free_page (ring , & pages [i ]-> page ))
796+ if (gnttab_page_cache_get (& ring -> free_pages ,
797+ & pages [i ]-> page ))
854798 goto out_of_memory ;
855799 addr = vaddr (pages [i ]-> page );
856800 pages_to_gnt [segs_to_map ] = pages [i ]-> page ;
@@ -883,7 +827,8 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
883827 BUG_ON (new_map_idx >= segs_to_map );
884828 if (unlikely (map [new_map_idx ].status != 0 )) {
885829 pr_debug ("invalid buffer -- could not remap it\n" );
886- put_free_pages (ring , & pages [seg_idx ]-> page , 1 );
830+ gnttab_page_cache_put (& ring -> free_pages ,
831+ & pages [seg_idx ]-> page , 1 );
887832 pages [seg_idx ]-> handle = BLKBACK_INVALID_HANDLE ;
888833 ret |= 1 ;
889834 goto next ;
@@ -944,7 +889,7 @@ static int xen_blkbk_map(struct xen_blkif_ring *ring,
944889
945890out_of_memory :
946891 pr_alert ("%s: out of memory\n" , __func__ );
947- put_free_pages ( ring , pages_to_gnt , segs_to_map );
892+ gnttab_page_cache_put ( & ring -> free_pages , pages_to_gnt , segs_to_map );
948893 for (i = last_map ; i < num ; i ++ )
949894 pages [i ]-> handle = BLKBACK_INVALID_HANDLE ;
950895 return - ENOMEM ;
0 commit comments