aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/danipc/danipc_lowlevel.c
blob: 5da2e26602898ad484a0d9e6f03ee5365aecf454 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
/*
	All files except if stated otherwise in the begining of the file
	are under the ISC license:
	----------------------------------------------------------------------

	Copyright (c) 2010-2012 Design Art Networks Ltd.

	Permission to use, copy, modify, and/or distribute this software for any
	purpose with or without fee is hereby granted, provided that the above
	copyright notice and this permission notice appear in all copies.

	THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
	WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
	MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
	ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
	WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
	ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
	OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/


#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/irq.h>

#include "danipc_k.h"


#include "ipc_api.h"

#include "danipc_lowlevel.h"

#define AF_THRESHOLD		0x7d

#define FIFO_THR_AF_CFG_F0	0x34
#define CDU_INT0_MASK_F0	0x44
#define CDU_INT0_ENABLE_F0	0x4c
#define CDU_INT0_CLEAR_F0	0x64

#define IPC_INTR_FIFO_AF	5
#define IPC_INTR(intr)		(1 << (intr))


/* IPC and Linux coexistence.
 * IPC uses/needs physical addresses with bit 31 set while Linux obviously
 * uses virtual addresses. So when writing an address to IPC / reading from IPC
 * make sure it is converted from virtual to IPC address
 * (physical address with bit 31 set) and vice versa.
 * For every cpuid (except my own) FIFO buffers of both priorities remapped.
 * It is guaranteed (?) that FIFO buffers are in contiguous memory of 16kB long.
 * So remapping of 2*16kB is a safe way to access all possible FIFO buffers.
 * For my own CPU just take physical address.
 * Vladik, 21.08.2011
 */

#define FIFO_MAP_SIZE		SZ_256K
#define FIFO_MAP_MASK		(FIFO_MAP_SIZE - 1)

uint8_t	__iomem			*ipc_buffers;

struct ipc_to_virt_map		ipc_to_virt_map[PLATFORM_MAX_NUM_OF_NODES][2];

static void __iomem		*krait_ipc_mux;

static void init_own_ipc_to_virt_map(struct danipc_priv *priv)
{
	struct ipc_to_virt_map *high_map =
		&ipc_to_virt_map[LOCAL_IPC_ID][ipc_trns_prio_1];
	struct ipc_to_virt_map *low_map =
		&ipc_to_virt_map[LOCAL_IPC_ID][ipc_trns_prio_0];

	/* This prevents remapping by remap_fifo_mem() */
	high_map->vaddr		= &ipc_buffers[0];
	high_map->paddr		= priv->res_start[IPC_BUFS_RES];

	low_map->vaddr		= &ipc_buffers[IPC_BUF_COUNT_MAX *
							IPC_BUF_SIZE_MAX];
	low_map->paddr		= priv->res_start[IPC_BUFS_RES] +
							(IPC_BUF_SIZE / 2);
}


static void unmap_ipc_to_virt_map(void)
{
	int		cpuid;

	for (cpuid = 0; cpuid < PLATFORM_MAX_NUM_OF_NODES; cpuid++) {
		if (cpuid == LOCAL_IPC_ID)
			continue;
		if (ipc_to_virt_map[cpuid][ipc_trns_prio_1].vaddr)
			iounmap(ipc_to_virt_map[cpuid][ipc_trns_prio_1].vaddr);
		if (ipc_to_virt_map[cpuid][ipc_trns_prio_0].vaddr)
			iounmap(ipc_to_virt_map[cpuid][ipc_trns_prio_0].vaddr);
	}
}

static void remap_fifo_mem(const int cpuid, const unsigned prio,
				const uint32_t paddr)
{
	struct ipc_to_virt_map *const map = ipc_to_virt_map[cpuid];
	unsigned other_prio = (prio == ipc_trns_prio_0) ?
				ipc_trns_prio_1 : ipc_trns_prio_0;
	uint32_t start_addr;
	uint32_t map_size;
	uint32_t map_mask;

	/* Use shared memory size if defined for given CPU.
	 * Since shared memory is used for both FIFO priorities, remap
	 * only once for this CPU.
	 */
	if (ipc_shared_mem_sizes[cpuid]) {
		map_size = ipc_shared_mem_sizes[cpuid];
		map_mask = map_size - 1;
		start_addr = ((paddr + map_mask) & ~map_mask) - map_size;
		map[prio].paddr = map[other_prio].paddr = start_addr;
		map[prio].vaddr = map[other_prio].vaddr =
			ioremap_nocache(start_addr, 2 * map_size);
	} else {
		map_size = FIFO_MAP_SIZE;
		map_mask = FIFO_MAP_MASK;
		start_addr = ((paddr + map_mask) & ~map_mask) - 2 * map_size;
		map[prio].paddr = start_addr;
		map[prio].vaddr = ioremap_nocache(start_addr, 2 * map_size);
	}

	if (!map[prio].vaddr) {
		pr_err(
			"%s:%d cpuid = %d priority = %u cannot remap FIFO memory at addr. 0x%x\n",
			__func__, __LINE__, cpuid, prio, start_addr);
		BUG();
	}
}

uint32_t virt_to_ipc(const int cpuid, const unsigned prio, void *vaddr)
{
	if (likely(prio <= ipc_trns_prio_1)) {
		struct ipc_to_virt_map	*map = &ipc_to_virt_map[cpuid][prio];
		int		offset;

		if (unlikely(!map->paddr)) {
			pr_err("%s:%d: cpuid = %d priority = %u unmapped\n",
				__func__, __LINE__, cpuid, prio);
			BUG();
		}
		offset = (unsigned)vaddr - (unsigned)map->vaddr;
		return map->paddr + offset;
	} else {
		pr_err("%s:%d: cpuid = %d illegal priority = %u\n",
			__func__, __LINE__, cpuid, prio);
		BUG();
	}

	return 0;
}

void *ipc_to_virt(const int cpuid, const unsigned prio, const uint32_t ipc_addr)
{
	if (likely(prio <= ipc_trns_prio_1 ||
			cpuid < PLATFORM_MAX_NUM_OF_NODES)) {
		struct ipc_to_virt_map	*map = &ipc_to_virt_map[cpuid][prio];
		const uint32_t	paddr = ipc_addr;
		int		offset;

		if (unlikely(!map->paddr))
			remap_fifo_mem(cpuid, prio, paddr);
		offset = paddr - map->paddr;
		return (uint8_t *)map->vaddr + offset;
	} else {
		pr_err("%s:%d: cpuid = %d illegal priority = %u\n",
			__func__, __LINE__, cpuid, prio);
		BUG();
	}
	return NULL;
}


void high_prio_rx(unsigned long data)
{
	struct net_device	*dev = (struct net_device *)data;
	const unsigned	base_addr = ipc_regs[LOCAL_IPC_ID];

	/* Clear interrupt source. */
	__raw_writel_no_log(IPC_INTR(IPC_INTR_FIFO_AF),
				(void *)(base_addr + CDU_INT0_CLEAR_F0));

	/* Process all messages. */
	ipc_recv(IPC_FIFO_BUF_NUM_HIGH, ipc_trns_prio_1);

	/* Unmask IPC AF interrupt again. */
	__raw_writel_no_log(~IPC_INTR(IPC_INTR_FIFO_AF),
					(void *)(base_addr + CDU_INT0_MASK_F0));

	enable_irq(dev->irq);
}

irqreturn_t danipc_interrupt(int irq, void *data)
{
	struct net_device	*dev = (struct net_device *)data;
	struct danipc_priv	*priv = netdev_priv(dev);
	const unsigned	base_addr = ipc_regs[LOCAL_IPC_ID];

	/* Mask all IPC interrupts. */
	__raw_writel_no_log(~0, (void *)(base_addr + CDU_INT0_MASK_F0));

	disable_irq_nosync(irq);

	tasklet_schedule(&priv->rx_task);

	return IRQ_HANDLED;
}

void danipc_init_irq(struct net_device *dev, struct danipc_priv *priv)
{
	const unsigned	base_addr = ipc_regs[LOCAL_IPC_ID];

	__raw_writel_no_log(AF_THRESHOLD,
				(void *)(base_addr + FIFO_THR_AF_CFG_F0));
	__raw_writel_no_log(IPC_INTR(IPC_INTR_FIFO_AF),
				(void *)(base_addr + CDU_INT0_CLEAR_F0));
	__raw_writel_no_log(IPC_INTR(IPC_INTR_FIFO_AF),
				(void *)(base_addr + CDU_INT0_ENABLE_F0));
	__raw_writel_no_log(~IPC_INTR(IPC_INTR_FIFO_AF),
				(void *)(base_addr + CDU_INT0_MASK_F0));

	/* Enable passing all IPC interrupts. */
	__raw_writel_no_log(~0, krait_ipc_mux);
}


static void remap_agent_table(struct danipc_priv *priv)
{
	agent_table = ioremap_nocache(priv->res_start[AGENT_TABLE_RES],
					priv->res_len[AGENT_TABLE_RES]);
	if (!agent_table) {
		pr_err("%s: cannot remap IPC global agent table\n", __func__);
		BUG();
	}
}

static void unmap_agent_table(void)
{
	if (agent_table)
		iounmap(agent_table);
}

static void prepare_node(const int cpuid)
{
	struct ipc_to_virt_map	*map;

	ipc_regs[cpuid] = (uintptr_t)ioremap_nocache(ipc_regs_phys[cpuid],
							ipc_regs_len[cpuid]);
	map = &ipc_to_virt_map[cpuid][ipc_trns_prio_0];
	atomic_set(&map->pending_skbs, 0);

	map = &ipc_to_virt_map[cpuid][ipc_trns_prio_1];
	atomic_set(&map->pending_skbs, 0);
}

static void prepare_nodes(void)
{
	int		n;

	for (n = 0; n < PLATFORM_MAX_NUM_OF_NODES; n++)
		if (ipc_regs_phys[n])
			prepare_node(n);
}

static void unmap_nodes_memory(void)
{
	int		n;

	for (n = 0; n < PLATFORM_MAX_NUM_OF_NODES; n++)
		if (ipc_regs[n])
			iounmap((void __iomem *)ipc_regs[n]);
}

static void *alloc_ipc_buffers(struct danipc_priv *priv)
{
	ipc_buffers = ioremap_nocache(priv->res_start[IPC_BUFS_RES],
					priv->res_len[IPC_BUFS_RES]);
	if (ipc_buffers)
		memset(ipc_buffers, 0, priv->res_len[IPC_BUFS_RES]);
	else {
		pr_err("%s: cannot allocate IPC buffers!\n", __func__);
		BUG();
	}
	return ipc_buffers;
}

static void free_ipc_buffers(void)
{
	if (ipc_buffers)
		iounmap(ipc_buffers);
}

static void remap_krait_ipc_mux(struct danipc_priv *priv)
{
	krait_ipc_mux = ioremap_nocache(priv->res_start[KRAIT_IPC_MUX_RES],
					priv->res_len[KRAIT_IPC_MUX_RES]);
	if (!krait_ipc_mux) {
		pr_err("%s: cannot remap Krait IPC mux\n", __func__);
		BUG();
	}
}


static void unmap_krait_ipc_mux(void)
{
	if (krait_ipc_mux)
		iounmap(krait_ipc_mux);
}


int danipc_ll_init(struct danipc_priv *priv)
{
	int		rc = -1;

	if (alloc_ipc_buffers(priv) != NULL) {
		prepare_nodes();
		remap_agent_table(priv);
		init_own_ipc_to_virt_map(priv);
		remap_krait_ipc_mux(priv);
		rc = ipc_init();
	}

	return rc;
}


void danipc_ll_cleanup(void)
{
	unmap_krait_ipc_mux();
	unmap_ipc_to_virt_map();
	unmap_agent_table();
	unmap_nodes_memory();
	free_ipc_buffers();
}


void danipc_poll(struct net_device *dev)
{
	(void)dev;
	ipc_recv(IPC_FIFO_BUF_NUM_LOW, ipc_trns_prio_0);
}