1919#include <linux/platform_device.h>
2020#include <linux/slab.h>
2121#include <linux/spinlock.h>
22+ #include <linux/wait.h>
2223
2324#include <soc/qcom/cmd-db.h>
2425#include <soc/qcom/tcs.h>
@@ -453,6 +454,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
453454 if (!drv -> tcs [ACTIVE_TCS ].num_tcs )
454455 enable_tcs_irq (drv , i , false);
455456 spin_unlock (& drv -> lock );
457+ wake_up (& drv -> tcs_wait );
456458 if (req )
457459 rpmh_tx_done (req , err );
458460 }
@@ -571,73 +573,34 @@ static int find_free_tcs(struct tcs_group *tcs)
571573}
572574
573575/**
574- * tcs_write () - Store messages into a TCS right now, or return -EBUSY .
576+ * claim_tcs_for_req () - Claim a tcs in the given tcs_group; only for active .
575577 * @drv: The controller.
578+ * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
576579 * @msg: The data to be sent.
577580 *
578- * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
581+ * Claims a tcs in the given tcs_group while making sure that no existing cmd
582+ * is in flight that would conflict with the one in @msg.
579583 *
580- * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
581- * the same address is already transferring returns -EBUSY which means the
582- * client should retry shortly.
584+ * Context: Must be called with the drv->lock held since that protects
585+ * tcs_in_use.
583586 *
584- * Return: 0 on success, -EBUSY if client should retry, or an error.
585- * Client should have interrupts enabled for a bit before retrying .
587+ * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
588+ * or the tcs_group is full .
586589 */
587- static int tcs_write (struct rsc_drv * drv , const struct tcs_request * msg )
590+ static int claim_tcs_for_req (struct rsc_drv * drv , struct tcs_group * tcs ,
591+ const struct tcs_request * msg )
588592{
589- struct tcs_group * tcs ;
590- int tcs_id ;
591- unsigned long flags ;
592593 int ret ;
593594
594- tcs = get_tcs_for_msg (drv , msg );
595- if (IS_ERR (tcs ))
596- return PTR_ERR (tcs );
597-
598- spin_lock_irqsave (& drv -> lock , flags );
599595 /*
600596 * The h/w does not like if we send a request to the same address,
601597 * when one is already in-flight or being processed.
602598 */
603599 ret = check_for_req_inflight (drv , tcs , msg );
604600 if (ret )
605- goto unlock ;
606-
607- ret = find_free_tcs (tcs );
608- if (ret < 0 )
609- goto unlock ;
610- tcs_id = ret ;
611-
612- tcs -> req [tcs_id - tcs -> offset ] = msg ;
613- set_bit (tcs_id , drv -> tcs_in_use );
614- if (msg -> state == RPMH_ACTIVE_ONLY_STATE && tcs -> type != ACTIVE_TCS ) {
615- /*
616- * Clear previously programmed WAKE commands in selected
617- * repurposed TCS to avoid triggering them. tcs->slots will be
618- * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
619- */
620- write_tcs_reg_sync (drv , RSC_DRV_CMD_ENABLE , tcs_id , 0 );
621- write_tcs_reg_sync (drv , RSC_DRV_CMD_WAIT_FOR_CMPL , tcs_id , 0 );
622- enable_tcs_irq (drv , tcs_id , true);
623- }
624- spin_unlock_irqrestore (& drv -> lock , flags );
625-
626- /*
627- * These two can be done after the lock is released because:
628- * - We marked "tcs_in_use" under lock.
629- * - Once "tcs_in_use" has been marked nobody else could be writing
630- * to these registers until the interrupt goes off.
631- * - The interrupt can't go off until we trigger w/ the last line
632- * of __tcs_set_trigger() below.
633- */
634- __tcs_buffer_write (drv , tcs_id , 0 , msg );
635- __tcs_set_trigger (drv , tcs_id , true);
601+ return ret ;
636602
637- return 0 ;
638- unlock :
639- spin_unlock_irqrestore (& drv -> lock , flags );
640- return ret ;
603+ return find_free_tcs (tcs );
641604}
642605
643606/**
@@ -664,18 +627,47 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
664627 */
665628int rpmh_rsc_send_data (struct rsc_drv * drv , const struct tcs_request * msg )
666629{
667- int ret ;
630+ struct tcs_group * tcs ;
631+ int tcs_id ;
632+ unsigned long flags ;
668633
669- do {
670- ret = tcs_write (drv , msg );
671- if (ret == - EBUSY ) {
672- pr_info_ratelimited ("TCS Busy, retrying RPMH message send: addr=%#x\n" ,
673- msg -> cmds [0 ].addr );
674- udelay (10 );
675- }
676- } while (ret == - EBUSY );
634+ tcs = get_tcs_for_msg (drv , msg );
635+ if (IS_ERR (tcs ))
636+ return PTR_ERR (tcs );
677637
678- return ret ;
638+ spin_lock_irqsave (& drv -> lock , flags );
639+
640+ /* Wait forever for a free tcs. It better be there eventually! */
641+ wait_event_lock_irq (drv -> tcs_wait ,
642+ (tcs_id = claim_tcs_for_req (drv , tcs , msg )) >= 0 ,
643+ drv -> lock );
644+
645+ tcs -> req [tcs_id - tcs -> offset ] = msg ;
646+ set_bit (tcs_id , drv -> tcs_in_use );
647+ if (msg -> state == RPMH_ACTIVE_ONLY_STATE && tcs -> type != ACTIVE_TCS ) {
648+ /*
649+ * Clear previously programmed WAKE commands in selected
650+ * repurposed TCS to avoid triggering them. tcs->slots will be
651+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
652+ */
653+ write_tcs_reg_sync (drv , RSC_DRV_CMD_ENABLE , tcs_id , 0 );
654+ write_tcs_reg_sync (drv , RSC_DRV_CMD_WAIT_FOR_CMPL , tcs_id , 0 );
655+ enable_tcs_irq (drv , tcs_id , true);
656+ }
657+ spin_unlock_irqrestore (& drv -> lock , flags );
658+
659+ /*
660+ * These two can be done after the lock is released because:
661+ * - We marked "tcs_in_use" under lock.
662+ * - Once "tcs_in_use" has been marked nobody else could be writing
663+ * to these registers until the interrupt goes off.
664+ * - The interrupt can't go off until we trigger w/ the last line
665+ * of __tcs_set_trigger() below.
666+ */
667+ __tcs_buffer_write (drv , tcs_id , 0 , msg );
668+ __tcs_set_trigger (drv , tcs_id , true);
669+
670+ return 0 ;
679671}
680672
681673/**
@@ -983,6 +975,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
983975 return ret ;
984976
985977 spin_lock_init (& drv -> lock );
978+ init_waitqueue_head (& drv -> tcs_wait );
986979 bitmap_zero (drv -> tcs_in_use , MAX_TCS_NR );
987980
988981 irq = platform_get_irq (pdev , drv -> id );
0 commit comments