@@ -224,6 +224,7 @@ struct pool {
224
224
225
225
struct pool_features pf ;
226
226
bool low_water_triggered :1 ; /* A dm event has been sent */
227
+ bool suspended :1 ;
227
228
228
229
struct dm_bio_prison * prison ;
229
230
struct dm_kcopyd_client * copier ;
@@ -2575,6 +2576,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2575
2576
INIT_LIST_HEAD (& pool -> prepared_discards );
2576
2577
INIT_LIST_HEAD (& pool -> active_thins );
2577
2578
pool -> low_water_triggered = false;
2579
+ pool -> suspended = true;
2578
2580
2579
2581
pool -> shared_read_ds = dm_deferred_set_create ();
2580
2582
if (!pool -> shared_read_ds ) {
@@ -3119,12 +3121,36 @@ static void pool_resume(struct dm_target *ti)
3119
3121
3120
3122
spin_lock_irqsave (& pool -> lock , flags );
3121
3123
pool -> low_water_triggered = false;
3124
+ pool -> suspended = false;
3122
3125
spin_unlock_irqrestore (& pool -> lock , flags );
3126
+
3123
3127
requeue_bios (pool );
3124
3128
3125
3129
do_waker (& pool -> waker .work );
3126
3130
}
3127
3131
3132
+ static void pool_presuspend (struct dm_target * ti )
3133
+ {
3134
+ struct pool_c * pt = ti -> private ;
3135
+ struct pool * pool = pt -> pool ;
3136
+ unsigned long flags ;
3137
+
3138
+ spin_lock_irqsave (& pool -> lock , flags );
3139
+ pool -> suspended = true;
3140
+ spin_unlock_irqrestore (& pool -> lock , flags );
3141
+ }
3142
+
3143
+ static void pool_presuspend_undo (struct dm_target * ti )
3144
+ {
3145
+ struct pool_c * pt = ti -> private ;
3146
+ struct pool * pool = pt -> pool ;
3147
+ unsigned long flags ;
3148
+
3149
+ spin_lock_irqsave (& pool -> lock , flags );
3150
+ pool -> suspended = false;
3151
+ spin_unlock_irqrestore (& pool -> lock , flags );
3152
+ }
3153
+
3128
3154
static void pool_postsuspend (struct dm_target * ti )
3129
3155
{
3130
3156
struct pool_c * pt = ti -> private ;
@@ -3592,6 +3618,8 @@ static struct target_type pool_target = {
3592
3618
.ctr = pool_ctr ,
3593
3619
.dtr = pool_dtr ,
3594
3620
.map = pool_map ,
3621
+ .presuspend = pool_presuspend ,
3622
+ .presuspend_undo = pool_presuspend_undo ,
3595
3623
.postsuspend = pool_postsuspend ,
3596
3624
.preresume = pool_preresume ,
3597
3625
.resume = pool_resume ,
@@ -3721,18 +3749,18 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3721
3749
if (get_pool_mode (tc -> pool ) == PM_FAIL ) {
3722
3750
ti -> error = "Couldn't open thin device, Pool is in fail mode" ;
3723
3751
r = - EINVAL ;
3724
- goto bad_thin_open ;
3752
+ goto bad_pool ;
3725
3753
}
3726
3754
3727
3755
r = dm_pool_open_thin_device (tc -> pool -> pmd , tc -> dev_id , & tc -> td );
3728
3756
if (r ) {
3729
3757
ti -> error = "Couldn't open thin internal device" ;
3730
- goto bad_thin_open ;
3758
+ goto bad_pool ;
3731
3759
}
3732
3760
3733
3761
r = dm_set_target_max_io_len (ti , tc -> pool -> sectors_per_block );
3734
3762
if (r )
3735
- goto bad_target_max_io_len ;
3763
+ goto bad ;
3736
3764
3737
3765
ti -> num_flush_bios = 1 ;
3738
3766
ti -> flush_supported = true;
@@ -3747,14 +3775,16 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3747
3775
ti -> split_discard_bios = true;
3748
3776
}
3749
3777
3750
- dm_put (pool_md );
3751
-
3752
3778
mutex_unlock (& dm_thin_pool_table .mutex );
3753
3779
3754
- atomic_set (& tc -> refcount , 1 );
3755
- init_completion (& tc -> can_destroy );
3756
-
3757
3780
spin_lock_irqsave (& tc -> pool -> lock , flags );
3781
+ if (tc -> pool -> suspended ) {
3782
+ spin_unlock_irqrestore (& tc -> pool -> lock , flags );
3783
+ mutex_lock (& dm_thin_pool_table .mutex ); /* reacquire for __pool_dec */
3784
+ ti -> error = "Unable to activate thin device while pool is suspended" ;
3785
+ r = - EINVAL ;
3786
+ goto bad ;
3787
+ }
3758
3788
list_add_tail_rcu (& tc -> list , & tc -> pool -> active_thins );
3759
3789
spin_unlock_irqrestore (& tc -> pool -> lock , flags );
3760
3790
/*
@@ -3765,11 +3795,16 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3765
3795
*/
3766
3796
synchronize_rcu ();
3767
3797
3798
+ dm_put (pool_md );
3799
+
3800
+ atomic_set (& tc -> refcount , 1 );
3801
+ init_completion (& tc -> can_destroy );
3802
+
3768
3803
return 0 ;
3769
3804
3770
- bad_target_max_io_len :
3805
+ bad :
3771
3806
dm_pool_close_thin_device (tc -> td );
3772
- bad_thin_open :
3807
+ bad_pool :
3773
3808
__pool_dec (tc -> pool );
3774
3809
bad_pool_lookup :
3775
3810
dm_put (pool_md );
0 commit comments