@@ -129,7 +129,8 @@ struct iv_elephant_private {
129129 */
130130enum flags { DM_CRYPT_SUSPENDED , DM_CRYPT_KEY_VALID ,
131131 DM_CRYPT_SAME_CPU , DM_CRYPT_NO_OFFLOAD ,
132- DM_CRYPT_NO_READ_WORKQUEUE , DM_CRYPT_NO_WRITE_WORKQUEUE };
132+ DM_CRYPT_NO_READ_WORKQUEUE , DM_CRYPT_NO_WRITE_WORKQUEUE ,
133+ DM_CRYPT_WRITE_INLINE };
133134
134135enum cipher_flags {
135136 CRYPT_MODE_INTEGRITY_AEAD , /* Use authenticated mode for cihper */
@@ -1919,9 +1920,32 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
19191920 spin_unlock_irqrestore (& cc -> write_thread_lock , flags );
19201921}
19211922
1923+ static bool kcryptd_crypt_write_inline (struct crypt_config * cc ,
1924+ struct convert_context * ctx )
1925+
1926+ {
1927+ if (!test_bit (DM_CRYPT_WRITE_INLINE , & cc -> flags ))
1928+ return false;
1929+
1930+ /*
1931+ * Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
1932+ * constraints so they do not need to be issued inline by
1933+ * kcryptd_crypt_write_convert().
1934+ */
1935+ switch (bio_op (ctx -> bio_in )) {
1936+ case REQ_OP_WRITE :
1937+ case REQ_OP_WRITE_SAME :
1938+ case REQ_OP_WRITE_ZEROES :
1939+ return true;
1940+ default :
1941+ return false;
1942+ }
1943+ }
1944+
19221945static void kcryptd_crypt_write_convert (struct dm_crypt_io * io )
19231946{
19241947 struct crypt_config * cc = io -> cc ;
1948+ struct convert_context * ctx = & io -> ctx ;
19251949 struct bio * clone ;
19261950 int crypt_finished ;
19271951 sector_t sector = io -> sector ;
@@ -1931,7 +1955,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
19311955 * Prevent io from disappearing until this function completes.
19321956 */
19331957 crypt_inc_pending (io );
1934- crypt_convert_init (cc , & io -> ctx , NULL , io -> base_bio , sector );
1958+ crypt_convert_init (cc , ctx , NULL , io -> base_bio , sector );
19351959
19361960 clone = crypt_alloc_buffer (io , io -> base_bio -> bi_iter .bi_size );
19371961 if (unlikely (!clone )) {
@@ -1945,11 +1969,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
19451969 sector += bio_sectors (clone );
19461970
19471971 crypt_inc_pending (io );
1948- r = crypt_convert (cc , & io -> ctx ,
1972+ r = crypt_convert (cc , ctx ,
19491973 test_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags ));
19501974 if (r )
19511975 io -> error = r ;
1952- crypt_finished = atomic_dec_and_test (& io -> ctx .cc_pending );
1976+ crypt_finished = atomic_dec_and_test (& ctx -> cc_pending );
1977+ if (!crypt_finished && kcryptd_crypt_write_inline (cc , ctx )) {
1978+ /* Wait for completion signaled by kcryptd_async_done() */
1979+ wait_for_completion (& ctx -> restart );
1980+ crypt_finished = 1 ;
1981+ }
19531982
19541983 /* Encryption was already finished, submit io now */
19551984 if (crypt_finished ) {
@@ -2021,10 +2050,21 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
20212050 if (!atomic_dec_and_test (& ctx -> cc_pending ))
20222051 return ;
20232052
2024- if (bio_data_dir (io -> base_bio ) == READ )
2053+ /*
2054+ * The request is fully completed: for inline writes, let
2055+ * kcryptd_crypt_write_convert() do the IO submission.
2056+ */
2057+ if (bio_data_dir (io -> base_bio ) == READ ) {
20252058 kcryptd_crypt_read_done (io );
2026- else
2027- kcryptd_crypt_write_io_submit (io , 1 );
2059+ return ;
2060+ }
2061+
2062+ if (kcryptd_crypt_write_inline (cc , ctx )) {
2063+ complete (& ctx -> restart );
2064+ return ;
2065+ }
2066+
2067+ kcryptd_crypt_write_io_submit (io , 1 );
20282068}
20292069
20302070static void kcryptd_crypt (struct work_struct * work )
@@ -2936,6 +2976,21 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
29362976 return 0 ;
29372977}
29382978
2979+ #ifdef CONFIG_BLK_DEV_ZONED
2980+
2981+ static int crypt_report_zones (struct dm_target * ti ,
2982+ struct dm_report_zones_args * args , unsigned int nr_zones )
2983+ {
2984+ struct crypt_config * cc = ti -> private ;
2985+ sector_t sector = cc -> start + dm_target_offset (ti , args -> next_sector );
2986+
2987+ args -> start = cc -> start ;
2988+ return blkdev_report_zones (cc -> dev -> bdev , sector , nr_zones ,
2989+ dm_report_zones_cb , args );
2990+ }
2991+
2992+ #endif
2993+
29392994/*
29402995 * Construct an encryption mapping:
29412996 * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
@@ -3069,6 +3124,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
30693124 }
30703125 cc -> start = tmpll ;
30713126
3127+ /*
3128+ * For zoned block devices, we need to preserve the issuer write
3129+ * ordering. To do so, disable write workqueues and force inline
3130+ * encryption completion.
3131+ */
3132+ if (bdev_is_zoned (cc -> dev -> bdev )) {
3133+ set_bit (DM_CRYPT_NO_WRITE_WORKQUEUE , & cc -> flags );
3134+ set_bit (DM_CRYPT_WRITE_INLINE , & cc -> flags );
3135+ }
3136+
30723137 if (crypt_integrity_aead (cc ) || cc -> integrity_iv_size ) {
30733138 ret = crypt_integrity_ctr (cc , ti );
30743139 if (ret )
@@ -3358,6 +3423,10 @@ static struct target_type crypt_target = {
33583423 .module = THIS_MODULE ,
33593424 .ctr = crypt_ctr ,
33603425 .dtr = crypt_dtr ,
3426+ #ifdef CONFIG_BLK_DEV_ZONED
3427+ .features = DM_TARGET_ZONED_HM ,
3428+ .report_zones = crypt_report_zones ,
3429+ #endif
33613430 .map = crypt_map ,
33623431 .status = crypt_status ,
33633432 .postsuspend = crypt_postsuspend ,
0 commit comments