1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
|
/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_SHAREDMEM_H
#define __KGSL_SHAREDMEM_H
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include "kgsl_mmu.h"
#include <linux/slab.h>
#include <linux/kmemleak.h>
#include <linux/iommu.h>
#include "kgsl_log.h"
struct kgsl_device;
struct kgsl_process_private;
#define KGSL_CACHE_OP_INV 0x01
#define KGSL_CACHE_OP_FLUSH 0x02
#define KGSL_CACHE_OP_CLEAN 0x03
int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size);
int kgsl_cma_alloc_coherent(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size);
int kgsl_cma_alloc_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, size_t size);
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
uint32_t *dst,
unsigned int offsetbytes);
int kgsl_sharedmem_writel(struct kgsl_device *device,
const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes,
uint32_t src);
int kgsl_sharedmem_set(struct kgsl_device *device,
const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes, unsigned int value,
unsigned int sizebytes);
int kgsl_cache_range_op(struct kgsl_memdesc *memdesc,
size_t offset, size_t size,
unsigned int op);
int kgsl_process_init_sysfs(struct kgsl_device *device,
struct kgsl_process_private *private);
void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
int kgsl_sharedmem_init_sysfs(void);
void kgsl_sharedmem_uninit_sysfs(void);
/*
* kgsl_memdesc_get_align - Get alignment flags from a memdesc
* @memdesc - the memdesc
*
* Returns the alignment requested, as power of 2 exponent.
*/
static inline int
kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
{
return (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
}
/*
* kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
* @memdesc: the memdesc
*
* Returns a KGSL_CACHEMODE* value.
*/
static inline int
kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
{
return (memdesc->flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT;
}
/*
* kgsl_memdesc_set_align - Set alignment flags of a memdesc
* @memdesc - the memdesc
* @align - alignment requested, as a power of 2 exponent.
*/
static inline int
kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
{
if (align > 32) {
KGSL_CORE_ERR("Alignment too big, restricting to 2^32\n");
align = 32;
}
memdesc->flags &= ~KGSL_MEMALIGN_MASK;
memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
return 0;
}
/*
* kgsl_memdesc_usermem_type - return buffer type
* @memdesc - the memdesc
*
* Returns a KGSL_MEM_ENTRY_* value for this buffer, which
* identifies if was allocated by us, or imported from
* another allocator.
*/
static inline unsigned int
kgsl_memdesc_usermem_type(const struct kgsl_memdesc *memdesc)
{
return (memdesc->flags & KGSL_MEMFLAGS_USERMEM_MASK)
>> KGSL_MEMFLAGS_USERMEM_SHIFT;
}
static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
{
/*
* Try sg_dma_address first to support ion carveout
* regions which do not work with sg_phys().
*/
unsigned int pa = sg_dma_address(sg);
if (pa == 0)
pa = sg_phys(sg);
return pa;
}
static inline int
memdesc_sg_phys(struct kgsl_memdesc *memdesc,
phys_addr_t physaddr, size_t size)
{
memdesc->sg = kgsl_malloc(sizeof(struct scatterlist));
if (memdesc->sg == NULL)
return -ENOMEM;
if (!is_vmalloc_addr(memdesc->sg))
kmemleak_not_leak(memdesc->sg);
memdesc->sglen = 1;
sg_init_table(memdesc->sg, 1);
memdesc->sg[0].length = size;
memdesc->sg[0].offset = 0;
memdesc->sg[0].dma_address = physaddr;
return 0;
}
/*
* kgsl_memdesc_is_global - is this a globally mapped buffer?
* @memdesc: the memdesc
*
* Returns nonzero if this is a global mapping, 0 otherwise
*/
static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
{
return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
}
/*
* kgsl_memdesc_is_secured - is this a secure buffer?
* @memdesc: the memdesc
*
* Returns true if this is a secure mapping, false otherwise
*/
static inline bool kgsl_memdesc_is_secured(const struct kgsl_memdesc *memdesc)
{
return memdesc && (memdesc->priv & KGSL_MEMDESC_SECURE);
}
/*
* kgsl_memdesc_has_guard_page - is the last page a guard page?
* @memdesc - the memdesc
*
* Returns nonzero if there is a guard page, 0 otherwise
*/
static inline int
kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
{
return (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) != 0;
}
/*
* kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
* @memdesc - the memdesc
*/
static inline int
kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
{
return (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP) != 0;
}
/*
* kgsl_memdesc_mmapsize - get the size of the mmap region
* @memdesc - the memdesc
*
* The entire memdesc must be mapped. Additionally if the
* CPU mapping is going to be mirrored, there must be room
* for the guard page to be mapped so that the address spaces
* match up.
*/
static inline size_t
kgsl_memdesc_mmapsize(const struct kgsl_memdesc *memdesc)
{
size_t size = memdesc->size;
if (kgsl_memdesc_has_guard_page(memdesc))
size += SZ_4K;
return size;
}
static inline int
kgsl_allocate_user(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, unsigned int flags)
{
int ret;
if (size == 0)
return -EINVAL;
memdesc->flags = flags;
if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE) {
size = ALIGN(size, PAGE_SIZE);
ret = kgsl_cma_alloc_coherent(device, memdesc, pagetable, size);
} else if (flags & KGSL_MEMFLAGS_SECURE)
ret = kgsl_cma_alloc_secure(device, memdesc, size);
else
ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size);
return ret;
}
static inline int
kgsl_allocate_contiguous(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, size_t size)
{
int ret;
size = ALIGN(size, PAGE_SIZE);
ret = kgsl_cma_alloc_coherent(device, memdesc, NULL, size);
if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
memdesc->gpuaddr = memdesc->physaddr;
return ret;
}
/*
* kgsl_allocate_global() - Allocate GPU accessible memory that will be global
* across all processes
* @device: The device pointer to which the memdesc belongs
* @memdesc: Pointer to a KGSL memory descriptor for the memory allocation
* @size: size of the allocation
* @flags: Allocation flags that control how the memory is mapped
* @priv: Priv flags that controls memory attributes
*
* Allocate contiguous memory for internal use and add the allocation to the
* list of global pagetable entries that will be mapped at the same address in
* all pagetables. This is for use for device wide GPU allocations such as
* ringbuffers.
*/
static inline int kgsl_allocate_global(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, size_t size, unsigned int flags,
unsigned int priv)
{
int ret;
memdesc->flags = flags;
memdesc->priv = priv;
ret = kgsl_allocate_contiguous(device, memdesc, size);
if (!ret) {
ret = kgsl_add_global_pt_entry(device, memdesc);
if (ret)
kgsl_sharedmem_free(memdesc);
}
return ret;
}
/**
* kgsl_free_global() - Free a device wide GPU allocation and remove it from the
* global pagetable entry list
*
* @memdesc: Pointer to the GPU memory descriptor to free
*
* Remove the specific memory descriptor from the global pagetable entry list
* and free it
*/
static inline void kgsl_free_global(struct kgsl_memdesc *memdesc)
{
kgsl_remove_global_pt_entry(memdesc);
kgsl_sharedmem_free(memdesc);
}
int kgsl_heap_init(void);
struct page *kgsl_heap_alloc(unsigned long size);
void kgsl_heap_free(struct page *page);
#endif /* __KGSL_SHAREDMEM_H */
|