33// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
44// Author: Vignesh Raghavendra <vigneshr@ti.com>
55
6+ #include <linux/completion.h>
7+ #include <linux/dma-direction.h>
8+ #include <linux/dma-mapping.h>
9+ #include <linux/dmaengine.h>
610#include <linux/err.h>
711#include <linux/kernel.h>
812#include <linux/module.h>
1317#include <linux/of.h>
1418#include <linux/of_address.h>
1519#include <linux/platform_device.h>
20+ #include <linux/sched/task_stack.h>
1621#include <linux/types.h>
1722
1823#define AM654_HBMC_CALIB_COUNT 25
1924
25+ struct am654_hbmc_device_priv {
26+ struct completion rx_dma_complete ;
27+ phys_addr_t device_base ;
28+ struct hyperbus_ctlr * ctlr ;
29+ struct dma_chan * rx_chan ;
30+ };
31+
2032struct am654_hbmc_priv {
2133 struct hyperbus_ctlr ctlr ;
2234 struct hyperbus_device hbdev ;
@@ -51,13 +63,103 @@ static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
5163 return ret ;
5264}
5365
66+ static void am654_hbmc_dma_callback (void * param )
67+ {
68+ struct am654_hbmc_device_priv * priv = param ;
69+
70+ complete (& priv -> rx_dma_complete );
71+ }
72+
73+ static int am654_hbmc_dma_read (struct am654_hbmc_device_priv * priv , void * to ,
74+ unsigned long from , ssize_t len )
75+
76+ {
77+ enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT ;
78+ struct dma_chan * rx_chan = priv -> rx_chan ;
79+ struct dma_async_tx_descriptor * tx ;
80+ dma_addr_t dma_dst , dma_src ;
81+ dma_cookie_t cookie ;
82+ int ret ;
83+
84+ if (!priv -> rx_chan || !virt_addr_valid (to ) || object_is_on_stack (to ))
85+ return - EINVAL ;
86+
87+ dma_dst = dma_map_single (rx_chan -> device -> dev , to , len , DMA_FROM_DEVICE );
88+ if (dma_mapping_error (rx_chan -> device -> dev , dma_dst )) {
89+ dev_dbg (priv -> ctlr -> dev , "DMA mapping failed\n" );
90+ return - EIO ;
91+ }
92+
93+ dma_src = priv -> device_base + from ;
94+ tx = dmaengine_prep_dma_memcpy (rx_chan , dma_dst , dma_src , len , flags );
95+ if (!tx ) {
96+ dev_err (priv -> ctlr -> dev , "device_prep_dma_memcpy error\n" );
97+ ret = - EIO ;
98+ goto unmap_dma ;
99+ }
100+
101+ reinit_completion (& priv -> rx_dma_complete );
102+ tx -> callback = am654_hbmc_dma_callback ;
103+ tx -> callback_param = priv ;
104+ cookie = dmaengine_submit (tx );
105+
106+ ret = dma_submit_error (cookie );
107+ if (ret ) {
108+ dev_err (priv -> ctlr -> dev , "dma_submit_error %d\n" , cookie );
109+ goto unmap_dma ;
110+ }
111+
112+ dma_async_issue_pending (rx_chan );
113+ if (!wait_for_completion_timeout (& priv -> rx_dma_complete , msecs_to_jiffies (len + 1000 ))) {
114+ dmaengine_terminate_sync (rx_chan );
115+ dev_err (priv -> ctlr -> dev , "DMA wait_for_completion_timeout\n" );
116+ ret = - ETIMEDOUT ;
117+ }
118+
119+ unmap_dma :
120+ dma_unmap_single (rx_chan -> device -> dev , dma_dst , len , DMA_FROM_DEVICE );
121+ return ret ;
122+ }
123+
124+ static void am654_hbmc_read (struct hyperbus_device * hbdev , void * to ,
125+ unsigned long from , ssize_t len )
126+ {
127+ struct am654_hbmc_device_priv * priv = hbdev -> priv ;
128+
129+ if (len < SZ_1K || am654_hbmc_dma_read (priv , to , from , len ))
130+ memcpy_fromio (to , hbdev -> map .virt + from , len );
131+ }
132+
54133static const struct hyperbus_ops am654_hbmc_ops = {
55134 .calibrate = am654_hbmc_calibrate ,
135+ .copy_from = am654_hbmc_read ,
56136};
57137
138+ static int am654_hbmc_request_mmap_dma (struct am654_hbmc_device_priv * priv )
139+ {
140+ struct dma_chan * rx_chan ;
141+ dma_cap_mask_t mask ;
142+
143+ dma_cap_zero (mask );
144+ dma_cap_set (DMA_MEMCPY , mask );
145+
146+ rx_chan = dma_request_chan_by_mask (& mask );
147+ if (IS_ERR (rx_chan )) {
148+ if (PTR_ERR (rx_chan ) == - EPROBE_DEFER )
149+ return - EPROBE_DEFER ;
150+ dev_dbg (priv -> ctlr -> dev , "No DMA channel available\n" );
151+ return 0 ;
152+ }
153+ priv -> rx_chan = rx_chan ;
154+ init_completion (& priv -> rx_dma_complete );
155+
156+ return 0 ;
157+ }
158+
58159static int am654_hbmc_probe (struct platform_device * pdev )
59160{
60161 struct device_node * np = pdev -> dev .of_node ;
162+ struct am654_hbmc_device_priv * dev_priv ;
61163 struct device * dev = & pdev -> dev ;
62164 struct am654_hbmc_priv * priv ;
63165 struct resource res ;
@@ -96,13 +198,31 @@ static int am654_hbmc_probe(struct platform_device *pdev)
96198 priv -> ctlr .dev = dev ;
97199 priv -> ctlr .ops = & am654_hbmc_ops ;
98200 priv -> hbdev .ctlr = & priv -> ctlr ;
201+
202+ dev_priv = devm_kzalloc (dev , sizeof (* dev_priv ), GFP_KERNEL );
203+ if (!dev_priv ) {
204+ ret = - ENOMEM ;
205+ goto disable_mux ;
206+ }
207+
208+ priv -> hbdev .priv = dev_priv ;
209+ dev_priv -> device_base = res .start ;
210+ dev_priv -> ctlr = & priv -> ctlr ;
211+
212+ ret = am654_hbmc_request_mmap_dma (dev_priv );
213+ if (ret )
214+ goto disable_mux ;
215+
99216 ret = hyperbus_register_device (& priv -> hbdev );
100217 if (ret ) {
101218 dev_err (dev , "failed to register controller\n" );
102- goto disable_mux ;
219+ goto release_dma ;
103220 }
104221
105222 return 0 ;
223+ release_dma :
224+ if (dev_priv -> rx_chan )
225+ dma_release_channel (dev_priv -> rx_chan );
106226disable_mux :
107227 if (priv -> mux_ctrl )
108228 mux_control_deselect (priv -> mux_ctrl );
@@ -112,12 +232,16 @@ static int am654_hbmc_probe(struct platform_device *pdev)
112232static int am654_hbmc_remove (struct platform_device * pdev )
113233{
114234 struct am654_hbmc_priv * priv = platform_get_drvdata (pdev );
235+ struct am654_hbmc_device_priv * dev_priv = priv -> hbdev .priv ;
115236 int ret ;
116237
117238 ret = hyperbus_unregister_device (& priv -> hbdev );
118239 if (priv -> mux_ctrl )
119240 mux_control_deselect (priv -> mux_ctrl );
120241
242+ if (dev_priv -> rx_chan )
243+ dma_release_channel (dev_priv -> rx_chan );
244+
121245 return ret ;
122246}
123247
0 commit comments