33// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
44// Author: Vignesh Raghavendra <vigneshr@ti.com>
55
6+ #include <linux/completion.h>
7+ #include <linux/dma-direction.h>
8+ #include <linux/dma-mapping.h>
9+ #include <linux/dmaengine.h>
610#include <linux/err.h>
711#include <linux/kernel.h>
812#include <linux/module.h>
1317#include <linux/of.h>
1418#include <linux/of_address.h>
1519#include <linux/platform_device.h>
16- #include <linux/pm_runtime .h>
20+ #include <linux/sched/task_stack .h>
1721#include <linux/types.h>
1822
1923#define AM654_HBMC_CALIB_COUNT 25
2024
25+ struct am654_hbmc_device_priv {
26+ struct completion rx_dma_complete ;
27+ phys_addr_t device_base ;
28+ struct hyperbus_ctlr * ctlr ;
29+ struct dma_chan * rx_chan ;
30+ };
31+
2132struct am654_hbmc_priv {
2233 struct hyperbus_ctlr ctlr ;
2334 struct hyperbus_device hbdev ;
@@ -52,13 +63,103 @@ static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
5263 return ret ;
5364}
5465
66+ static void am654_hbmc_dma_callback (void * param )
67+ {
68+ struct am654_hbmc_device_priv * priv = param ;
69+
70+ complete (& priv -> rx_dma_complete );
71+ }
72+
73+ static int am654_hbmc_dma_read (struct am654_hbmc_device_priv * priv , void * to ,
74+ unsigned long from , ssize_t len )
75+
76+ {
77+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT ;
78+ struct dma_chan * rx_chan = priv -> rx_chan ;
79+ struct dma_async_tx_descriptor * tx ;
80+ dma_addr_t dma_dst , dma_src ;
81+ dma_cookie_t cookie ;
82+ int ret ;
83+
84+ if (!priv -> rx_chan || !virt_addr_valid (to ) || object_is_on_stack (to ))
85+ return - EINVAL ;
86+
87+ dma_dst = dma_map_single (rx_chan -> device -> dev , to , len , DMA_FROM_DEVICE );
88+ if (dma_mapping_error (rx_chan -> device -> dev , dma_dst )) {
89+ dev_dbg (priv -> ctlr -> dev , "DMA mapping failed\n" );
90+ return - EIO ;
91+ }
92+
93+ dma_src = priv -> device_base + from ;
94+ tx = dmaengine_prep_dma_memcpy (rx_chan , dma_dst , dma_src , len , flags );
95+ if (!tx ) {
96+ dev_err (priv -> ctlr -> dev , "device_prep_dma_memcpy error\n" );
97+ ret = - EIO ;
98+ goto unmap_dma ;
99+ }
100+
101+ reinit_completion (& priv -> rx_dma_complete );
102+ tx -> callback = am654_hbmc_dma_callback ;
103+ tx -> callback_param = priv ;
104+ cookie = dmaengine_submit (tx );
105+
106+ ret = dma_submit_error (cookie );
107+ if (ret ) {
108+ dev_err (priv -> ctlr -> dev , "dma_submit_error %d\n" , cookie );
109+ goto unmap_dma ;
110+ }
111+
112+ dma_async_issue_pending (rx_chan );
113+ if (!wait_for_completion_timeout (& priv -> rx_dma_complete , msecs_to_jiffies (len + 1000 ))) {
114+ dmaengine_terminate_sync (rx_chan );
115+ dev_err (priv -> ctlr -> dev , "DMA wait_for_completion_timeout\n" );
116+ ret = - ETIMEDOUT ;
117+ }
118+
119+ unmap_dma :
120+ dma_unmap_single (rx_chan -> device -> dev , dma_dst , len , DMA_FROM_DEVICE );
121+ return ret ;
122+ }
123+
124+ static void am654_hbmc_read (struct hyperbus_device * hbdev , void * to ,
125+ unsigned long from , ssize_t len )
126+ {
127+ struct am654_hbmc_device_priv * priv = hbdev -> priv ;
128+
129+ if (len < SZ_1K || am654_hbmc_dma_read (priv , to , from , len ))
130+ memcpy_fromio (to , hbdev -> map .virt + from , len );
131+ }
132+
55133static const struct hyperbus_ops am654_hbmc_ops = {
56134 .calibrate = am654_hbmc_calibrate ,
135+ .copy_from = am654_hbmc_read ,
57136};
58137
138+ static int am654_hbmc_request_mmap_dma (struct am654_hbmc_device_priv * priv )
139+ {
140+ struct dma_chan * rx_chan ;
141+ dma_cap_mask_t mask ;
142+
143+ dma_cap_zero (mask );
144+ dma_cap_set (DMA_MEMCPY , mask );
145+
146+ rx_chan = dma_request_chan_by_mask (& mask );
147+ if (IS_ERR (rx_chan )) {
148+ if (PTR_ERR (rx_chan ) == - EPROBE_DEFER )
149+ return - EPROBE_DEFER ;
150+ dev_dbg (priv -> ctlr -> dev , "No DMA channel available\n" );
151+ return 0 ;
152+ }
153+ priv -> rx_chan = rx_chan ;
154+ init_completion (& priv -> rx_dma_complete );
155+
156+ return 0 ;
157+ }
158+
59159static int am654_hbmc_probe (struct platform_device * pdev )
60160{
61161 struct device_node * np = pdev -> dev .of_node ;
162+ struct am654_hbmc_device_priv * dev_priv ;
62163 struct device * dev = & pdev -> dev ;
63164 struct am654_hbmc_priv * priv ;
64165 struct resource res ;
@@ -70,7 +171,8 @@ static int am654_hbmc_probe(struct platform_device *pdev)
70171
71172 platform_set_drvdata (pdev , priv );
72173
73- ret = of_address_to_resource (np , 0 , & res );
174+ priv -> hbdev .np = of_get_next_child (np , NULL );
175+ ret = of_address_to_resource (priv -> hbdev .np , 0 , & res );
74176 if (ret )
75177 return ret ;
76178
@@ -88,13 +190,6 @@ static int am654_hbmc_probe(struct platform_device *pdev)
88190 priv -> mux_ctrl = control ;
89191 }
90192
91- pm_runtime_enable (dev );
92- ret = pm_runtime_get_sync (dev );
93- if (ret < 0 ) {
94- pm_runtime_put_noidle (dev );
95- goto disable_pm ;
96- }
97-
98193 priv -> hbdev .map .size = resource_size (& res );
99194 priv -> hbdev .map .virt = devm_ioremap_resource (dev , & res );
100195 if (IS_ERR (priv -> hbdev .map .virt ))
@@ -103,17 +198,32 @@ static int am654_hbmc_probe(struct platform_device *pdev)
103198 priv -> ctlr .dev = dev ;
104199 priv -> ctlr .ops = & am654_hbmc_ops ;
105200 priv -> hbdev .ctlr = & priv -> ctlr ;
106- priv -> hbdev .np = of_get_next_child (dev -> of_node , NULL );
201+
202+ dev_priv = devm_kzalloc (dev , sizeof (* dev_priv ), GFP_KERNEL );
203+ if (!dev_priv ) {
204+ ret = - ENOMEM ;
205+ goto disable_mux ;
206+ }
207+
208+ priv -> hbdev .priv = dev_priv ;
209+ dev_priv -> device_base = res .start ;
210+ dev_priv -> ctlr = & priv -> ctlr ;
211+
212+ ret = am654_hbmc_request_mmap_dma (dev_priv );
213+ if (ret )
214+ goto disable_mux ;
215+
107216 ret = hyperbus_register_device (& priv -> hbdev );
108217 if (ret ) {
109218 dev_err (dev , "failed to register controller\n" );
110- pm_runtime_put_sync (& pdev -> dev );
111- goto disable_pm ;
219+ goto release_dma ;
112220 }
113221
114222 return 0 ;
115- disable_pm :
116- pm_runtime_disable (dev );
223+ release_dma :
224+ if (dev_priv -> rx_chan )
225+ dma_release_channel (dev_priv -> rx_chan );
226+ disable_mux :
117227 if (priv -> mux_ctrl )
118228 mux_control_deselect (priv -> mux_ctrl );
119229 return ret ;
@@ -122,13 +232,15 @@ static int am654_hbmc_probe(struct platform_device *pdev)
122232static int am654_hbmc_remove (struct platform_device * pdev )
123233{
124234 struct am654_hbmc_priv * priv = platform_get_drvdata (pdev );
235+ struct am654_hbmc_device_priv * dev_priv = priv -> hbdev .priv ;
125236 int ret ;
126237
127238 ret = hyperbus_unregister_device (& priv -> hbdev );
128239 if (priv -> mux_ctrl )
129240 mux_control_deselect (priv -> mux_ctrl );
130- pm_runtime_put_sync (& pdev -> dev );
131- pm_runtime_disable (& pdev -> dev );
241+
242+ if (dev_priv -> rx_chan )
243+ dma_release_channel (dev_priv -> rx_chan );
132244
133245 return ret ;
134246}
0 commit comments