1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
|
/**************************************************************************
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Satyanantha RamaGopal M <rama.gopal.m.satyanantha@intel.com>
*/
#include <linux/device.h>
#include "drmP.h"
#include "uapi/drm/drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_sync.h"
#ifdef CONFIG_DRM_I915_SYNC
static int i915_sync_pt_has_signaled(struct sync_pt *sync_pt)
{
drm_i915_private_t *dev_priv = NULL;
struct i915_sync_pt *pt = (struct i915_sync_pt *)sync_pt;
struct i915_sync_timeline *obj =
(struct i915_sync_timeline *)sync_pt->parent;
dev_priv = (drm_i915_private_t *)obj->pvt.dev->dev_private;
/* Upon TDR, fail the status of pending sync_pts.
* This callback is synchronous with the thread which calls
* sync_timeline_signal. If this has been signaled from TDR due
* to an error then the TDR will have set ring->tdr_seqno
* to the failing seqno (otherwise it will be 0). Compare the
* sync point seqno with the failing seqno to detect errors */
if (!obj->pvt.ring)
return -ENODEV;
else if (pt->pvt.value == obj->pvt.ring->tdr_seqno)
return -ETIMEDOUT;
else if (pt->pvt.value == 0)
/* It hasn't yet been assigned a sequence number which means
* it can't have finished */
return 0;
else if (pt->pvt.cycle != obj->pvt.cycle) {
/* The seqno has wrapped so complete this point */
return 1;
}
else
/* This shouldn't require locking as it is synchronous
* with the timeline signal function which is the only updater
* of these fields*/
return (obj->pvt.value >= pt->pvt.value) ? 1 : 0;
return 0;
}
static int i915_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
{
struct i915_sync_pt *pt_a = (struct i915_sync_pt *)a;
struct i915_sync_pt *pt_b = (struct i915_sync_pt *)b;
if (pt_a->pvt.value == pt_b->pvt.value)
return 0;
else
return ((pt_a->pvt.value > pt_b->pvt.value) ? 1 : -1);
}
static int i915_sync_fill_driver_data(struct sync_pt *sync_pt,
void *data, int size)
{
struct i915_sync_pt *pt = (struct i915_sync_pt *)sync_pt;
if (size < sizeof(pt->pvt))
return -ENOMEM;
memcpy(data, &pt->pvt, sizeof(pt->pvt));
return sizeof(pt->pvt);
}
static
struct sync_pt *i915_sync_pt_create(struct i915_sync_timeline *obj, u32 value, u32 cycle)
{
struct i915_sync_pt *pt;
struct intel_ring_buffer *ring;
if (!obj)
return NULL;
ring = obj->pvt.ring;
if (!ring->irq_get(ring))
return NULL;
pt = (struct i915_sync_pt *)
sync_pt_create(&obj->obj, sizeof(struct i915_sync_pt));
if (pt) {
pt->pvt.value = value;
pt->pvt.cycle = cycle;
} else
ring->irq_put(ring);
return (struct sync_pt *)pt;
}
static struct sync_pt *i915_sync_pt_dup(struct sync_pt *sync_pt)
{
struct i915_sync_pt *pt = (struct i915_sync_pt *) sync_pt;
struct sync_pt *new_pt;
struct i915_sync_timeline *obj =
(struct i915_sync_timeline *)sync_pt->parent;
new_pt = (struct sync_pt *)i915_sync_pt_create(obj, pt->pvt.value, pt->pvt.cycle);
return new_pt;
}
static void i915_sync_pt_free(struct sync_pt *sync_pt)
{
struct i915_sync_timeline *obj =
(struct i915_sync_timeline *)sync_pt->parent;
struct intel_ring_buffer *ring = obj->pvt.ring;
/* We can drop reference to having interrupts enabled now. */
ring->irq_put(ring);
}
struct sync_timeline_ops i915_sync_timeline_ops = {
.driver_name = "i915_sync",
.dup = i915_sync_pt_dup,
.has_signaled = i915_sync_pt_has_signaled,
.compare = i915_sync_pt_compare,
.fill_driver_data = i915_sync_fill_driver_data,
.free_pt = i915_sync_pt_free,
};
int i915_sync_timeline_create(struct drm_device *dev,
const char *name,
struct intel_ring_buffer *ring)
{
struct i915_sync_timeline *obj = (struct i915_sync_timeline *)
sync_timeline_create(&i915_sync_timeline_ops,
sizeof(struct i915_sync_timeline),
name);
if (!obj)
return -EINVAL;
obj->pvt.dev = dev;
obj->pvt.ring = ring;
/* Start the timeline from seqno 0 as this is a special value
* that is never assigned to a batch buffer. */
obj->pvt.value = 0;
ring->timeline = obj;
return 0;
}
void i915_sync_timeline_destroy(struct intel_ring_buffer *ring)
{
if (ring->timeline) {
sync_timeline_destroy(&ring->timeline->obj);
ring->timeline = NULL;
}
}
void i915_sync_timeline_signal(struct i915_sync_timeline *obj, u32 value,
int own_pt)
{
/* Update the timeline to notify it that the monotonic seqno counter
* has advanced. */
if (obj) {
obj->pvt.value = value;
/* Only process the timeline if we own the sync point */
if (own_pt)
sync_timeline_signal(&obj->obj);
}
}
void i915_sync_reset_timelines(struct drm_i915_private *dev_priv)
{
unsigned int i;
/* Reset all ring timelines to 0 */
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *sync_ring =
&dev_priv->ring[i];
if (sync_ring && sync_ring->timeline)
sync_ring->timeline->pvt.cycle++;
i915_sync_timeline_signal(sync_ring->timeline, 0, 1);
}
}
static int i915_write_active_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_ACTIVE_SEQNO_INDEX <<
MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
void *i915_sync_prepare_request(struct drm_i915_gem_execbuffer2 *args,
struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
struct sync_pt *pt;
BUG_ON(!ring->timeline);
/* Write the current seqno to the HWS page so that
* we can identify the cause of any hangs. */
ret = i915_write_active_seqno(ring, seqno);
if (ret) {
DRM_DEBUG_DRIVER("Failed to store seqno for %d\n", ring->id);
return ERR_PTR(ret);
}
/* Fence was not requested, nothing more to do. */
if (!(args->flags & I915_EXEC_REQUEST_FENCE))
return NULL;
/* Caller has requested a sync fence.
* User interrupts will be enabled to make sure that
* the timeline is signalled on completion. */
pt = i915_sync_pt_create(ring->timeline, seqno, ring->timeline->pvt.cycle);
if (!pt)
DRM_DEBUG_DRIVER("Failed to create sync point for %d/%u\n",
ring->id, seqno);
return (void *)pt;
}
int i915_sync_finish_request(void *handle,
struct drm_i915_gem_execbuffer2 *args,
struct intel_ring_buffer *ring)
{
struct sync_pt *pt = (struct sync_pt *)handle;
int err;
int fd = -1;
struct sync_fence *fence;
/* Clear the active seqno. */
if (i915_write_active_seqno(ring, 0))
DRM_DEBUG_DRIVER("Failed to clear seqno for %d\n", ring->id);
/* Fence was not requested, nothing more to do. */
if (!pt)
return 0;
fd = get_unused_fd();
if (fd < 0) {
DRM_DEBUG_DRIVER("Unable to get file descriptor for fence\n");
err = fd;
goto err;
}
fence = sync_fence_create("I915", pt);
if (!fence) {
DRM_DEBUG_DRIVER("Fence creation failed\n");
err = -ENOMEM;
goto err_fd;
}
sync_fence_install(fence, fd);
/* Return the fence through the rsvd2 field */
args->rsvd2 = (__u64)fd;
return 0;
err_fd:
put_unused_fd(fd);
fd = err;
err:
args->rsvd2 = (__u64)fd;
return err;
}
void i915_sync_cancel_request(void *handle,
struct drm_i915_gem_execbuffer2 *args,
struct intel_ring_buffer *ring)
{
struct sync_pt *pt = (struct sync_pt *)handle;
if (pt)
sync_pt_free(pt);
}
void i915_sync_timeline_advance(struct intel_ring_buffer *ring)
{
if (ring->timeline)
i915_sync_timeline_signal(ring->timeline,
ring->get_seqno(ring, false), 1);
}
void i915_sync_hung_ring(struct intel_ring_buffer *ring)
{
/* Sample the active seqno to see if this request failed during
* a batch buffer */
ring->tdr_seqno = intel_read_status_page(ring,
I915_GEM_ACTIVE_SEQNO_INDEX);
if (ring->tdr_seqno) {
/* Clear it in the HWS to avoid seeing it more than once */
intel_write_status_page(ring, I915_GEM_ACTIVE_SEQNO_INDEX, 0);
/* Signal the timeline. This will cause it to query the
* signaled state of any waiting sync points.
* If any match with ring->tdr_seqno then
* they will be marked with an error state. */
i915_sync_timeline_signal(ring->timeline, ring->tdr_seqno, 1);
/* Clear the tdr_seqno so it isn't seen twice */
ring->tdr_seqno = 0;
}
}
#endif
|