@@ -3716,50 +3716,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
37163716 return 0 ;
37173717}
37183718
3719+ static void scrub_workers_put (struct btrfs_fs_info * fs_info )
3720+ {
3721+ if (refcount_dec_and_mutex_lock (& fs_info -> scrub_workers_refcnt ,
3722+ & fs_info -> scrub_lock )) {
3723+ struct btrfs_workqueue * scrub_workers = NULL ;
3724+ struct btrfs_workqueue * scrub_wr_comp = NULL ;
3725+ struct btrfs_workqueue * scrub_parity = NULL ;
3726+
3727+ scrub_workers = fs_info -> scrub_workers ;
3728+ scrub_wr_comp = fs_info -> scrub_wr_completion_workers ;
3729+ scrub_parity = fs_info -> scrub_parity_workers ;
3730+
3731+ fs_info -> scrub_workers = NULL ;
3732+ fs_info -> scrub_wr_completion_workers = NULL ;
3733+ fs_info -> scrub_parity_workers = NULL ;
3734+ mutex_unlock (& fs_info -> scrub_lock );
3735+
3736+ btrfs_destroy_workqueue (scrub_workers );
3737+ btrfs_destroy_workqueue (scrub_wr_comp );
3738+ btrfs_destroy_workqueue (scrub_parity );
3739+ }
3740+ }
3741+
37193742/*
37203743 * get a reference count on fs_info->scrub_workers. start worker if necessary
37213744 */
37223745static noinline_for_stack int scrub_workers_get (struct btrfs_fs_info * fs_info ,
37233746 int is_dev_replace )
37243747{
3748+ struct btrfs_workqueue * scrub_workers = NULL ;
3749+ struct btrfs_workqueue * scrub_wr_comp = NULL ;
3750+ struct btrfs_workqueue * scrub_parity = NULL ;
37253751 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND ;
37263752 int max_active = fs_info -> thread_pool_size ;
3753+ int ret = - ENOMEM ;
37273754
3728- lockdep_assert_held (& fs_info -> scrub_lock );
3755+ if (refcount_inc_not_zero (& fs_info -> scrub_workers_refcnt ))
3756+ return 0 ;
37293757
3730- if (refcount_read (& fs_info -> scrub_workers_refcnt ) == 0 ) {
3731- ASSERT (fs_info -> scrub_workers == NULL );
3732- fs_info -> scrub_workers = btrfs_alloc_workqueue (fs_info , "scrub" ,
3733- flags , is_dev_replace ? 1 : max_active , 4 );
3734- if (!fs_info -> scrub_workers )
3735- goto fail_scrub_workers ;
3736-
3737- ASSERT (fs_info -> scrub_wr_completion_workers == NULL );
3738- fs_info -> scrub_wr_completion_workers =
3739- btrfs_alloc_workqueue (fs_info , "scrubwrc" , flags ,
3740- max_active , 2 );
3741- if (!fs_info -> scrub_wr_completion_workers )
3742- goto fail_scrub_wr_completion_workers ;
3758+ scrub_workers = btrfs_alloc_workqueue (fs_info , "scrub" , flags ,
3759+ is_dev_replace ? 1 : max_active , 4 );
3760+ if (!scrub_workers )
3761+ goto fail_scrub_workers ;
37433762
3744- ASSERT (fs_info -> scrub_parity_workers == NULL );
3745- fs_info -> scrub_parity_workers =
3746- btrfs_alloc_workqueue (fs_info , "scrubparity" , flags ,
3763+ scrub_wr_comp = btrfs_alloc_workqueue (fs_info , "scrubwrc" , flags ,
37473764 max_active , 2 );
3748- if (!fs_info -> scrub_parity_workers )
3749- goto fail_scrub_parity_workers ;
3765+ if (!scrub_wr_comp )
3766+ goto fail_scrub_wr_completion_workers ;
37503767
3768+ scrub_parity = btrfs_alloc_workqueue (fs_info , "scrubparity" , flags ,
3769+ max_active , 2 );
3770+ if (!scrub_parity )
3771+ goto fail_scrub_parity_workers ;
3772+
3773+ mutex_lock (& fs_info -> scrub_lock );
3774+ if (refcount_read (& fs_info -> scrub_workers_refcnt ) == 0 ) {
3775+ ASSERT (fs_info -> scrub_workers == NULL &&
3776+ fs_info -> scrub_wr_completion_workers == NULL &&
3777+ fs_info -> scrub_parity_workers == NULL );
3778+ fs_info -> scrub_workers = scrub_workers ;
3779+ fs_info -> scrub_wr_completion_workers = scrub_wr_comp ;
3780+ fs_info -> scrub_parity_workers = scrub_parity ;
37513781 refcount_set (& fs_info -> scrub_workers_refcnt , 1 );
3752- } else {
3753- refcount_inc ( & fs_info -> scrub_workers_refcnt ) ;
3782+ mutex_unlock ( & fs_info -> scrub_lock );
3783+ return 0 ;
37543784 }
3755- return 0 ;
3785+ /* Other thread raced in and created the workers for us */
3786+ refcount_inc (& fs_info -> scrub_workers_refcnt );
3787+ mutex_unlock (& fs_info -> scrub_lock );
37563788
3789+ ret = 0 ;
3790+ btrfs_destroy_workqueue (scrub_parity );
37573791fail_scrub_parity_workers :
3758- btrfs_destroy_workqueue (fs_info -> scrub_wr_completion_workers );
3792+ btrfs_destroy_workqueue (scrub_wr_comp );
37593793fail_scrub_wr_completion_workers :
3760- btrfs_destroy_workqueue (fs_info -> scrub_workers );
3794+ btrfs_destroy_workqueue (scrub_workers );
37613795fail_scrub_workers :
3762- return - ENOMEM ;
3796+ return ret ;
37633797}
37643798
37653799int btrfs_scrub_dev (struct btrfs_fs_info * fs_info , u64 devid , u64 start ,
@@ -3770,9 +3804,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
37703804 int ret ;
37713805 struct btrfs_device * dev ;
37723806 unsigned int nofs_flag ;
3773- struct btrfs_workqueue * scrub_workers = NULL ;
3774- struct btrfs_workqueue * scrub_wr_comp = NULL ;
3775- struct btrfs_workqueue * scrub_parity = NULL ;
37763807
37773808 if (btrfs_fs_closing (fs_info ))
37783809 return - EAGAIN ;
@@ -3819,13 +3850,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
38193850 if (IS_ERR (sctx ))
38203851 return PTR_ERR (sctx );
38213852
3853+ ret = scrub_workers_get (fs_info , is_dev_replace );
3854+ if (ret )
3855+ goto out_free_ctx ;
3856+
38223857 mutex_lock (& fs_info -> fs_devices -> device_list_mutex );
38233858 dev = btrfs_find_device (fs_info -> fs_devices , devid , NULL , NULL , true);
38243859 if (!dev || (test_bit (BTRFS_DEV_STATE_MISSING , & dev -> dev_state ) &&
38253860 !is_dev_replace )) {
38263861 mutex_unlock (& fs_info -> fs_devices -> device_list_mutex );
38273862 ret = - ENODEV ;
3828- goto out_free_ctx ;
3863+ goto out ;
38293864 }
38303865
38313866 if (!is_dev_replace && !readonly &&
@@ -3834,7 +3869,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
38343869 btrfs_err_in_rcu (fs_info , "scrub: device %s is not writable" ,
38353870 rcu_str_deref (dev -> name ));
38363871 ret = - EROFS ;
3837- goto out_free_ctx ;
3872+ goto out ;
38383873 }
38393874
38403875 mutex_lock (& fs_info -> scrub_lock );
@@ -3843,7 +3878,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
38433878 mutex_unlock (& fs_info -> scrub_lock );
38443879 mutex_unlock (& fs_info -> fs_devices -> device_list_mutex );
38453880 ret = - EIO ;
3846- goto out_free_ctx ;
3881+ goto out ;
38473882 }
38483883
38493884 down_read (& fs_info -> dev_replace .rwsem );
@@ -3854,17 +3889,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
38543889 mutex_unlock (& fs_info -> scrub_lock );
38553890 mutex_unlock (& fs_info -> fs_devices -> device_list_mutex );
38563891 ret = - EINPROGRESS ;
3857- goto out_free_ctx ;
3892+ goto out ;
38583893 }
38593894 up_read (& fs_info -> dev_replace .rwsem );
38603895
3861- ret = scrub_workers_get (fs_info , is_dev_replace );
3862- if (ret ) {
3863- mutex_unlock (& fs_info -> scrub_lock );
3864- mutex_unlock (& fs_info -> fs_devices -> device_list_mutex );
3865- goto out_free_ctx ;
3866- }
3867-
38683896 sctx -> readonly = readonly ;
38693897 dev -> scrub_ctx = sctx ;
38703898 mutex_unlock (& fs_info -> fs_devices -> device_list_mutex );
@@ -3917,24 +3945,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
39173945
39183946 mutex_lock (& fs_info -> scrub_lock );
39193947 dev -> scrub_ctx = NULL ;
3920- if (refcount_dec_and_test (& fs_info -> scrub_workers_refcnt )) {
3921- scrub_workers = fs_info -> scrub_workers ;
3922- scrub_wr_comp = fs_info -> scrub_wr_completion_workers ;
3923- scrub_parity = fs_info -> scrub_parity_workers ;
3924-
3925- fs_info -> scrub_workers = NULL ;
3926- fs_info -> scrub_wr_completion_workers = NULL ;
3927- fs_info -> scrub_parity_workers = NULL ;
3928- }
39293948 mutex_unlock (& fs_info -> scrub_lock );
39303949
3931- btrfs_destroy_workqueue (scrub_workers );
3932- btrfs_destroy_workqueue (scrub_wr_comp );
3933- btrfs_destroy_workqueue (scrub_parity );
3950+ scrub_workers_put (fs_info );
39343951 scrub_put_ctx (sctx );
39353952
39363953 return ret ;
3937-
3954+ out :
3955+ scrub_workers_put (fs_info );
39383956out_free_ctx :
39393957 scrub_free_ctx (sctx );
39403958
0 commit comments