1818#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
1919#endif
2020
21+ #ifdef CONFIG_RSEQ
22+ #define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \
23+ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
24+ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
25+ #else
26+ #define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
27+ #endif
28+
2129#define MEMBARRIER_CMD_BITMASK \
2230 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
2331 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
@@ -30,6 +38,11 @@ static void ipi_mb(void *info)
3038 smp_mb (); /* IPIs should be serializing but paranoid. */
3139}
3240
41+ static void ipi_rseq (void * info )
42+ {
43+ rseq_preempt (current );
44+ }
45+
3346static void ipi_sync_rq_state (void * info )
3447{
3548 struct mm_struct * mm = (struct mm_struct * ) info ;
@@ -129,19 +142,27 @@ static int membarrier_global_expedited(void)
129142 return 0 ;
130143}
131144
132- static int membarrier_private_expedited (int flags )
145+ static int membarrier_private_expedited (int flags , int cpu_id )
133146{
134- int cpu ;
135147 cpumask_var_t tmpmask ;
136148 struct mm_struct * mm = current -> mm ;
149+ smp_call_func_t ipi_func = ipi_mb ;
137150
138- if (flags & MEMBARRIER_FLAG_SYNC_CORE ) {
151+ if (flags == MEMBARRIER_FLAG_SYNC_CORE ) {
139152 if (!IS_ENABLED (CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE ))
140153 return - EINVAL ;
141154 if (!(atomic_read (& mm -> membarrier_state ) &
142155 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY ))
143156 return - EPERM ;
157+ } else if (flags == MEMBARRIER_FLAG_RSEQ ) {
158+ if (!IS_ENABLED (CONFIG_RSEQ ))
159+ return - EINVAL ;
160+ if (!(atomic_read (& mm -> membarrier_state ) &
161+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY ))
162+ return - EPERM ;
163+ ipi_func = ipi_rseq ;
144164 } else {
165+ WARN_ON_ONCE (flags );
145166 if (!(atomic_read (& mm -> membarrier_state ) &
146167 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY ))
147168 return - EPERM ;
@@ -156,35 +177,59 @@ static int membarrier_private_expedited(int flags)
156177 */
157178 smp_mb (); /* system call entry is not a mb. */
158179
159- if (!zalloc_cpumask_var (& tmpmask , GFP_KERNEL ))
180+ if (cpu_id < 0 && !zalloc_cpumask_var (& tmpmask , GFP_KERNEL ))
160181 return - ENOMEM ;
161182
162183 cpus_read_lock ();
163- rcu_read_lock ();
164- for_each_online_cpu ( cpu ) {
184+
185+ if ( cpu_id >= 0 ) {
165186 struct task_struct * p ;
166187
167- /*
168- * Skipping the current CPU is OK even through we can be
169- * migrated at any point. The current CPU, at the point
170- * where we read raw_smp_processor_id(), is ensured to
171- * be in program order with respect to the caller
172- * thread. Therefore, we can skip this CPU from the
173- * iteration.
174- */
175- if (cpu == raw_smp_processor_id ())
176- continue ;
177- p = rcu_dereference (cpu_rq (cpu )-> curr );
178- if (p && p -> mm == mm )
179- __cpumask_set_cpu (cpu , tmpmask );
188+ if (cpu_id >= nr_cpu_ids || !cpu_online (cpu_id ))
189+ goto out ;
190+ if (cpu_id == raw_smp_processor_id ())
191+ goto out ;
192+ rcu_read_lock ();
193+ p = rcu_dereference (cpu_rq (cpu_id )-> curr );
194+ if (!p || p -> mm != mm ) {
195+ rcu_read_unlock ();
196+ goto out ;
197+ }
198+ rcu_read_unlock ();
199+ } else {
200+ int cpu ;
201+
202+ rcu_read_lock ();
203+ for_each_online_cpu (cpu ) {
204+ struct task_struct * p ;
205+
206+ /*
207+ * Skipping the current CPU is OK even through we can be
208+ * migrated at any point. The current CPU, at the point
209+ * where we read raw_smp_processor_id(), is ensured to
210+ * be in program order with respect to the caller
211+ * thread. Therefore, we can skip this CPU from the
212+ * iteration.
213+ */
214+ if (cpu == raw_smp_processor_id ())
215+ continue ;
216+ p = rcu_dereference (cpu_rq (cpu )-> curr );
217+ if (p && p -> mm == mm )
218+ __cpumask_set_cpu (cpu , tmpmask );
219+ }
220+ rcu_read_unlock ();
180221 }
181- rcu_read_unlock ();
182222
183223 preempt_disable ();
184- smp_call_function_many (tmpmask , ipi_mb , NULL , 1 );
224+ if (cpu_id >= 0 )
225+ smp_call_function_single (cpu_id , ipi_func , NULL , 1 );
226+ else
227+ smp_call_function_many (tmpmask , ipi_func , NULL , 1 );
185228 preempt_enable ();
186229
187- free_cpumask_var (tmpmask );
230+ out :
231+ if (cpu_id < 0 )
232+ free_cpumask_var (tmpmask );
188233 cpus_read_unlock ();
189234
190235 /*
@@ -283,11 +328,18 @@ static int membarrier_register_private_expedited(int flags)
283328 set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED ,
284329 ret ;
285330
286- if (flags & MEMBARRIER_FLAG_SYNC_CORE ) {
331+ if (flags == MEMBARRIER_FLAG_SYNC_CORE ) {
287332 if (!IS_ENABLED (CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE ))
288333 return - EINVAL ;
289334 ready_state =
290335 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY ;
336+ } else if (flags == MEMBARRIER_FLAG_RSEQ ) {
337+ if (!IS_ENABLED (CONFIG_RSEQ ))
338+ return - EINVAL ;
339+ ready_state =
340+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY ;
341+ } else {
342+ WARN_ON_ONCE (flags );
291343 }
292344
293345 /*
@@ -299,6 +351,8 @@ static int membarrier_register_private_expedited(int flags)
299351 return 0 ;
300352 if (flags & MEMBARRIER_FLAG_SYNC_CORE )
301353 set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE ;
354+ if (flags & MEMBARRIER_FLAG_RSEQ )
355+ set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ ;
302356 atomic_or (set_state , & mm -> membarrier_state );
303357 ret = sync_runqueues_membarrier_state (mm );
304358 if (ret )
@@ -310,8 +364,15 @@ static int membarrier_register_private_expedited(int flags)
310364
311365/**
312366 * sys_membarrier - issue memory barriers on a set of threads
313- * @cmd: Takes command values defined in enum membarrier_cmd.
314- * @flags: Currently needs to be 0. For future extensions.
367+ * @cmd: Takes command values defined in enum membarrier_cmd.
368+ * @flags: Currently needs to be 0 for all commands other than
369+ * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: in the latter
370+ * case it can be MEMBARRIER_CMD_FLAG_CPU, indicating that @cpu_id
371+ * contains the CPU on which to interrupt (= restart)
372+ * the RSEQ critical section.
373+ * @cpu_id: if @flags == MEMBARRIER_CMD_FLAG_CPU, indicates the cpu on which
374+ * RSEQ CS should be interrupted (@cmd must be
375+ * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ).
315376 *
316377 * If this system call is not implemented, -ENOSYS is returned. If the
317378 * command specified does not exist, not available on the running
@@ -337,10 +398,21 @@ static int membarrier_register_private_expedited(int flags)
337398 * smp_mb() X O O
338399 * sys_membarrier() O O O
339400 */
340- SYSCALL_DEFINE2 (membarrier , int , cmd , int , flags )
401+ SYSCALL_DEFINE3 (membarrier , int , cmd , unsigned int , flags , int , cpu_id )
341402{
342- if (unlikely (flags ))
343- return - EINVAL ;
403+ switch (cmd ) {
404+ case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ :
405+ if (unlikely (flags && flags != MEMBARRIER_CMD_FLAG_CPU ))
406+ return - EINVAL ;
407+ break ;
408+ default :
409+ if (unlikely (flags ))
410+ return - EINVAL ;
411+ }
412+
413+ if (!(flags & MEMBARRIER_CMD_FLAG_CPU ))
414+ cpu_id = -1 ;
415+
344416 switch (cmd ) {
345417 case MEMBARRIER_CMD_QUERY :
346418 {
@@ -362,13 +434,17 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
362434 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED :
363435 return membarrier_register_global_expedited ();
364436 case MEMBARRIER_CMD_PRIVATE_EXPEDITED :
365- return membarrier_private_expedited (0 );
437+ return membarrier_private_expedited (0 , cpu_id );
366438 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED :
367439 return membarrier_register_private_expedited (0 );
368440 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE :
369- return membarrier_private_expedited (MEMBARRIER_FLAG_SYNC_CORE );
441+ return membarrier_private_expedited (MEMBARRIER_FLAG_SYNC_CORE , cpu_id );
370442 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE :
371443 return membarrier_register_private_expedited (MEMBARRIER_FLAG_SYNC_CORE );
444+ case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ :
445+ return membarrier_private_expedited (MEMBARRIER_FLAG_RSEQ , cpu_id );
446+ case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ :
447+ return membarrier_register_private_expedited (MEMBARRIER_FLAG_RSEQ );
372448 default :
373449 return - EINVAL ;
374450 }
0 commit comments