33 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
44 */
55
6+ #include <linux/adreno-smmu-priv.h>
67#include <linux/of_device.h>
78#include <linux/qcom_scm.h>
89
@@ -12,6 +13,132 @@ struct qcom_smmu {
1213 struct arm_smmu_device smmu ;
1314};
1415
16+ #define QCOM_ADRENO_SMMU_GPU_SID 0
17+
18+ static bool qcom_adreno_smmu_is_gpu_device (struct device * dev )
19+ {
20+ struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (dev );
21+ int i ;
22+
23+ /*
24+ * The GPU will always use SID 0 so that is a handy way to uniquely
25+ * identify it and configure it for per-instance pagetables
26+ */
27+ for (i = 0 ; i < fwspec -> num_ids ; i ++ ) {
28+ u16 sid = FIELD_GET (ARM_SMMU_SMR_ID , fwspec -> ids [i ]);
29+
30+ if (sid == QCOM_ADRENO_SMMU_GPU_SID )
31+ return true;
32+ }
33+
34+ return false;
35+ }
36+
37+ static const struct io_pgtable_cfg * qcom_adreno_smmu_get_ttbr1_cfg (
38+ const void * cookie )
39+ {
40+ struct arm_smmu_domain * smmu_domain = (void * )cookie ;
41+ struct io_pgtable * pgtable =
42+ io_pgtable_ops_to_pgtable (smmu_domain -> pgtbl_ops );
43+ return & pgtable -> cfg ;
44+ }
45+
46+ /*
47+ * Local implementation to configure TTBR0 with the specified pagetable config.
48+ * The GPU driver will call this to enable TTBR0 when per-instance pagetables
49+ * are active
50+ */
51+
52+ static int qcom_adreno_smmu_set_ttbr0_cfg (const void * cookie ,
53+ const struct io_pgtable_cfg * pgtbl_cfg )
54+ {
55+ struct arm_smmu_domain * smmu_domain = (void * )cookie ;
56+ struct io_pgtable * pgtable = io_pgtable_ops_to_pgtable (smmu_domain -> pgtbl_ops );
57+ struct arm_smmu_cfg * cfg = & smmu_domain -> cfg ;
58+ struct arm_smmu_cb * cb = & smmu_domain -> smmu -> cbs [cfg -> cbndx ];
59+
60+ /* The domain must have split pagetables already enabled */
61+ if (cb -> tcr [0 ] & ARM_SMMU_TCR_EPD1 )
62+ return - EINVAL ;
63+
64+ /* If the pagetable config is NULL, disable TTBR0 */
65+ if (!pgtbl_cfg ) {
66+ /* Do nothing if it is already disabled */
67+ if ((cb -> tcr [0 ] & ARM_SMMU_TCR_EPD0 ))
68+ return - EINVAL ;
69+
70+ /* Set TCR to the original configuration */
71+ cb -> tcr [0 ] = arm_smmu_lpae_tcr (& pgtable -> cfg );
72+ cb -> ttbr [0 ] = FIELD_PREP (ARM_SMMU_TTBRn_ASID , cb -> cfg -> asid );
73+ } else {
74+ u32 tcr = cb -> tcr [0 ];
75+
76+ /* Don't call this again if TTBR0 is already enabled */
77+ if (!(cb -> tcr [0 ] & ARM_SMMU_TCR_EPD0 ))
78+ return - EINVAL ;
79+
80+ tcr |= arm_smmu_lpae_tcr (pgtbl_cfg );
81+ tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1 );
82+
83+ cb -> tcr [0 ] = tcr ;
84+ cb -> ttbr [0 ] = pgtbl_cfg -> arm_lpae_s1_cfg .ttbr ;
85+ cb -> ttbr [0 ] |= FIELD_PREP (ARM_SMMU_TTBRn_ASID , cb -> cfg -> asid );
86+ }
87+
88+ arm_smmu_write_context_bank (smmu_domain -> smmu , cb -> cfg -> cbndx );
89+
90+ return 0 ;
91+ }
92+
93+ static int qcom_adreno_smmu_alloc_context_bank (struct arm_smmu_domain * smmu_domain ,
94+ struct device * dev , int start , int count )
95+ {
96+ struct arm_smmu_device * smmu = smmu_domain -> smmu ;
97+
98+ /*
99+ * Assign context bank 0 to the GPU device so the GPU hardware can
100+ * switch pagetables
101+ */
102+ if (qcom_adreno_smmu_is_gpu_device (dev )) {
103+ start = 0 ;
104+ count = 1 ;
105+ } else {
106+ start = 1 ;
107+ }
108+
109+ return __arm_smmu_alloc_bitmap (smmu -> context_map , start , count );
110+ }
111+
112+ static int qcom_adreno_smmu_init_context (struct arm_smmu_domain * smmu_domain ,
113+ struct io_pgtable_cfg * pgtbl_cfg , struct device * dev )
114+ {
115+ struct adreno_smmu_priv * priv ;
116+
117+ /* Only enable split pagetables for the GPU device (SID 0) */
118+ if (!qcom_adreno_smmu_is_gpu_device (dev ))
119+ return 0 ;
120+
121+ /*
122+ * All targets that use the qcom,adreno-smmu compatible string *should*
123+ * be AARCH64 stage 1 but double check because the arm-smmu code assumes
124+ * that is the case when the TTBR1 quirk is enabled
125+ */
126+ if ((smmu_domain -> stage == ARM_SMMU_DOMAIN_S1 ) &&
127+ (smmu_domain -> cfg .fmt == ARM_SMMU_CTX_FMT_AARCH64 ))
128+ pgtbl_cfg -> quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1 ;
129+
130+ /*
131+ * Initialize private interface with GPU:
132+ */
133+
134+ priv = dev_get_drvdata (dev );
135+ priv -> cookie = smmu_domain ;
136+ priv -> get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg ;
137+ priv -> set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg ;
138+
139+ return 0 ;
140+ }
141+
15142static const struct of_device_id qcom_smmu_client_of_match [] __maybe_unused = {
16143 { .compatible = "qcom,adreno" },
17144 { .compatible = "qcom,mdp4" },
@@ -65,7 +192,15 @@ static const struct arm_smmu_impl qcom_smmu_impl = {
65192 .reset = qcom_smmu500_reset ,
66193};
67194
68- struct arm_smmu_device * qcom_smmu_impl_init (struct arm_smmu_device * smmu )
195+ static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
196+ .init_context = qcom_adreno_smmu_init_context ,
197+ .def_domain_type = qcom_smmu_def_domain_type ,
198+ .reset = qcom_smmu500_reset ,
199+ .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank ,
200+ };
201+
202+ static struct arm_smmu_device * qcom_smmu_create (struct arm_smmu_device * smmu ,
203+ const struct arm_smmu_impl * impl )
69204{
70205 struct qcom_smmu * qsmmu ;
71206
@@ -79,8 +214,18 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
79214
80215 qsmmu -> smmu = * smmu ;
81216
82- qsmmu -> smmu .impl = & qcom_smmu_impl ;
217+ qsmmu -> smmu .impl = impl ;
83218 devm_kfree (smmu -> dev , smmu );
84219
85220 return & qsmmu -> smmu ;
86221}
222+
223+ struct arm_smmu_device * qcom_smmu_impl_init (struct arm_smmu_device * smmu )
224+ {
225+ return qcom_smmu_create (smmu , & qcom_smmu_impl );
226+ }
227+
228+ struct arm_smmu_device * qcom_adreno_smmu_impl_init (struct arm_smmu_device * smmu )
229+ {
230+ return qcom_smmu_create (smmu , & qcom_adreno_smmu_impl );
231+ }
0 commit comments