aboutsummaryrefslogtreecommitdiff
path: root/kernel/safestack.c
blob: b36708b3402c9517d656fb38197db8664a537d5c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
/*
 * SafeStack unsafe stack management
 *
 * Copyright (C) 2017 Google, Inc.
 */

#include <linux/sched.h>
#include <linux/safestack.h>
#include <linux/mm.h>
#include <linux/memcontrol.h>
#include <linux/slab.h>

static struct kmem_cache *unsafe_stack_cache;

void init_unsafe_stack_cache()
{
	unsafe_stack_cache = kmem_cache_create("unsafe_stack",
					UNSAFE_STACK_SIZE, UNSAFE_STACK_ALIGN,
					0, NULL);
	BUG_ON(unsafe_stack_cache == NULL);
}

int alloc_unsafe_stack(struct task_struct *tsk, int node)
{
	struct page *first;
	void *stack;

	stack = kmem_cache_alloc_node(unsafe_stack_cache,
				      THREADINFO_GFP | __GFP_ZERO, node);
	if (unlikely(!stack))
		return -ENOMEM;

	first = virt_to_page(stack);

	/* account as kernel stack */
	mod_zone_page_state(page_zone(first), NR_KERNEL_STACK_KB,
		UNSAFE_STACK_SIZE / 1024);
	memcg_kmem_update_page_stat(first, MEMCG_KERNEL_STACK_KB,
		UNSAFE_STACK_SIZE / 1024);

	tsk->unsafe_stack = stack;
	tsk->unsafe_stack_ptr = stack + UNSAFE_STACK_SIZE;
	tsk->unsafe_saved_ptr = NULL;

	return 0;
}

void free_unsafe_stack(struct task_struct *tsk)
{
	struct page *first;

	if (unlikely(!tsk->unsafe_stack))
		return;

	first = virt_to_page(tsk->unsafe_stack);

	mod_zone_page_state(page_zone(first), NR_KERNEL_STACK_KB,
		-(long)UNSAFE_STACK_SIZE / 1024);
	memcg_kmem_update_page_stat(first, MEMCG_KERNEL_STACK_KB,
		-(long)UNSAFE_STACK_SIZE / 1024);

	kmem_cache_free(unsafe_stack_cache, tsk->unsafe_stack);

	tsk->unsafe_stack = NULL;
	tsk->unsafe_stack_ptr = NULL;
	tsk->unsafe_saved_ptr = NULL;
}