summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/sgx/virt.c
blob: 64511c4a52001c574bbd76cb4174d51fc1364ba3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
// SPDX-License-Identifier: GPL-2.0
/*
 * Device driver to expose SGX enclave memory to KVM guests.
 *
 * Copyright(c) 2021 Intel Corporation.
 */

#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include <asm/sgx.h>
#include <uapi/asm/sgx.h>

#include "encls.h"
#include "sgx.h"

struct sgx_vepc {
	struct xarray page_array;
	struct mutex lock;
};

/*
 * Temporary SECS pages that cannot be EREMOVE'd due to having child in other
 * virtual EPC instances, and the lock to protect it.
 */
static struct mutex zombie_secs_pages_lock;
static struct list_head zombie_secs_pages;

static int __sgx_vepc_fault(struct sgx_vepc *vepc,
			    struct vm_area_struct *vma, unsigned long addr)
{
	struct sgx_epc_page *epc_page;
	unsigned long index, pfn;
	int ret;

	WARN_ON(!mutex_is_locked(&vepc->lock));

	/* Calculate index of EPC page in virtual EPC's page_array */
	index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start);

	epc_page = xa_load(&vepc->page_array, index);
	if (epc_page)
		return 0;

	epc_page = sgx_alloc_epc_page(vepc, false);
	if (IS_ERR(epc_page))
		return PTR_ERR(epc_page);

	ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL));
	if (ret)
		goto err_free;

	pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page));

	ret = vmf_insert_pfn(vma, addr, pfn);
	if (ret != VM_FAULT_NOPAGE) {
		ret = -EFAULT;
		goto err_delete;
	}

	return 0;

err_delete:
	xa_erase(&vepc->page_array, index);
err_free:
	sgx_free_epc_page(epc_page);
	return ret;
}

static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct sgx_vepc *vepc = vma->vm_private_data;
	int ret;

	mutex_lock(&vepc->lock);
	ret = __sgx_vepc_fault(vepc, vma, vmf->address);
	mutex_unlock(&vepc->lock);

	if (!ret)
		return VM_FAULT_NOPAGE;

	if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) {
		mmap_read_unlock(vma->vm_mm);
		return VM_FAULT_RETRY;
	}

	return VM_FAULT_SIGBUS;
}

static const struct vm_operations_struct sgx_vepc_vm_ops = {
	.fault = sgx_vepc_fault,
};

static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct sgx_vepc *vepc = file->private_data;

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

	vma->vm_ops = &sgx_vepc_vm_ops;
	/* Don't copy VMA in fork() */
	vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
	vma->vm_private_data = vepc;

	return 0;
}

static int sgx_vepc_free_page(struct sgx_epc_page *epc_page)
{
	int ret;

	/*
	 * Take a previously guest-owned EPC page and return it to the
	 * general EPC page pool.
	 *
	 * Guests can not be trusted to have left this page in a good
	 * state, so run EREMOVE on the page unconditionally.  In the
	 * case that a guest properly EREMOVE'd this page, a superfluous
	 * EREMOVE is harmless.
	 */
	ret = __eremove(sgx_get_epc_virt_addr(epc_page));
	if (ret) {
		/*
		 * Only SGX_CHILD_PRESENT is expected, which is because of
		 * EREMOVE'ing an SECS still with child, in which case it can
		 * be handled by EREMOVE'ing the SECS again after all pages in
		 * virtual EPC have been EREMOVE'd. See comments in below in
		 * sgx_vepc_release().
		 *
		 * The user of virtual EPC (KVM) needs to guarantee there's no
		 * logical processor is still running in the enclave in guest,
		 * otherwise EREMOVE will get SGX_ENCLAVE_ACT which cannot be
		 * handled here.
		 */
		WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE,
			  ret, ret);
		return ret;
	}

	sgx_free_epc_page(epc_page);

	return 0;
}

static int sgx_vepc_release(struct inode *inode, struct file *file)
{
	struct sgx_vepc *vepc = file->private_data;
	struct sgx_epc_page *epc_page, *tmp, *entry;
	unsigned long index;

	LIST_HEAD(secs_pages);

	xa_for_each(&vepc->page_array, index, entry) {
		/*
		 * Remove all normal, child pages.  sgx_vepc_free_page()
		 * will fail if EREMOVE fails, but this is OK and expected on
		 * SECS pages.  Those can only be EREMOVE'd *after* all their
		 * child pages. Retries below will clean them up.
		 */
		if (sgx_vepc_free_page(entry))
			continue;

		xa_erase(&vepc->page_array, index);
	}

	/*
	 * Retry EREMOVE'ing pages.  This will clean up any SECS pages that
	 * only had children in this 'epc' area.
	 */
	xa_for_each(&vepc->page_array, index, entry) {
		epc_page = entry;
		/*
		 * An EREMOVE failure here means that the SECS page still
		 * has children.  But, since all children in this 'sgx_vepc'
		 * have been removed, the SECS page must have a child on
		 * another instance.
		 */
		if (sgx_vepc_free_page(epc_page))
			list_add_tail(&epc_page->list, &secs_pages);

		xa_erase(&vepc->page_array, index);
	}

	/*
	 * SECS pages are "pinned" by child pages, and "unpinned" once all
	 * children have been EREMOVE'd.  A child page in this instance
	 * may have pinned an SECS page encountered in an earlier release(),
	 * creating a zombie.  Since some children were EREMOVE'd above,
	 * try to EREMOVE all zombies in the hopes that one was unpinned.
	 */
	mutex_lock(&zombie_secs_pages_lock);
	list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) {
		/*
		 * Speculatively remove the page from the list of zombies,
		 * if the page is successfully EREMOVE'd it will be added to
		 * the list of free pages.  If EREMOVE fails, throw the page
		 * on the local list, which will be spliced on at the end.
		 */
		list_del(&epc_page->list);

		if (sgx_vepc_free_page(epc_page))
			list_add_tail(&epc_page->list, &secs_pages);
	}

	if (!list_empty(&secs_pages))
		list_splice_tail(&secs_pages, &zombie_secs_pages);
	mutex_unlock(&zombie_secs_pages_lock);

	xa_destroy(&vepc->page_array);
	kfree(vepc);

	return 0;
}

static int sgx_vepc_open(struct inode *inode, struct file *file)
{
	struct sgx_vepc *vepc;

	vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL);
	if (!vepc)
		return -ENOMEM;
	mutex_init(&vepc->lock);
	xa_init(&vepc->page_array);

	file->private_data = vepc;

	return 0;
}

static const struct file_operations sgx_vepc_fops = {
	.owner		= THIS_MODULE,
	.open		= sgx_vepc_open,
	.release	= sgx_vepc_release,
	.mmap		= sgx_vepc_mmap,
};

static struct miscdevice sgx_vepc_dev = {
	.minor		= MISC_DYNAMIC_MINOR,
	.name		= "sgx_vepc",
	.nodename	= "sgx_vepc",
	.fops		= &sgx_vepc_fops,
};

int __init sgx_vepc_init(void)
{
	/* SGX virtualization requires KVM to work */
	if (!cpu_feature_enabled(X86_FEATURE_VMX))
		return -ENODEV;

	INIT_LIST_HEAD(&zombie_secs_pages);
	mutex_init(&zombie_secs_pages_lock);

	return misc_register(&sgx_vepc_dev);
}

/**
 * sgx_virt_ecreate() - Run ECREATE on behalf of guest
 * @pageinfo:	Pointer to PAGEINFO structure
 * @secs:	Userspace pointer to SECS page
 * @trapnr:	trap number injected to guest in case of ECREATE error
 *
 * Run ECREATE on behalf of guest after KVM traps ECREATE for the purpose
 * of enforcing policies of guest's enclaves, and return the trap number
 * which should be injected to guest in case of any ECREATE error.
 *
 * Return:
 * -  0:	ECREATE was successful.
 * - <0:	on error.
 */
int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs,
		     int *trapnr)
{
	int ret;

	/*
	 * @secs is an untrusted, userspace-provided address.  It comes from
	 * KVM and is assumed to be a valid pointer which points somewhere in
	 * userspace.  This can fault and call SGX or other fault handlers when
	 * userspace mapping @secs doesn't exist.
	 *
	 * Add a WARN() to make sure @secs is already valid userspace pointer
	 * from caller (KVM), who should already have handled invalid pointer
	 * case (for instance, made by malicious guest).  All other checks,
	 * such as alignment of @secs, are deferred to ENCLS itself.
	 */
	if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE)))
		return -EINVAL;

	__uaccess_begin();
	ret = __ecreate(pageinfo, (void *)secs);
	__uaccess_end();

	if (encls_faulted(ret)) {
		*trapnr = ENCLS_TRAPNR(ret);
		return -EFAULT;
	}

	/* ECREATE doesn't return an error code, it faults or succeeds. */
	WARN_ON_ONCE(ret);
	return 0;
}
EXPORT_SYMBOL_GPL(sgx_virt_ecreate);

static int __sgx_virt_einit(void __user *sigstruct, void __user *token,
			    void __user *secs)
{
	int ret;

	/*
	 * Make sure all userspace pointers from caller (KVM) are valid.
	 * All other checks deferred to ENCLS itself.  Also see comment
	 * for @secs in sgx_virt_ecreate().
	 */
#define SGX_EINITTOKEN_SIZE	304
	if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) ||
			 !access_ok(token, SGX_EINITTOKEN_SIZE) ||
			 !access_ok(secs, PAGE_SIZE)))
		return -EINVAL;

	__uaccess_begin();
	ret = __einit((void *)sigstruct, (void *)token, (void *)secs);
	__uaccess_end();

	return ret;
}

/**
 * sgx_virt_einit() - Run EINIT on behalf of guest
 * @sigstruct:		Userspace pointer to SIGSTRUCT structure
 * @token:		Userspace pointer to EINITTOKEN structure
 * @secs:		Userspace pointer to SECS page
 * @lepubkeyhash:	Pointer to guest's *virtual* SGX_LEPUBKEYHASH MSR values
 * @trapnr:		trap number injected to guest in case of EINIT error
 *
 * Run EINIT on behalf of guest after KVM traps EINIT. If SGX_LC is available
 * in host, SGX driver may rewrite the hardware values at wish, therefore KVM
 * needs to update hardware values to guest's virtual MSR values in order to
 * ensure EINIT is executed with expected hardware values.
 *
 * Return:
 * -  0:	EINIT was successful.
 * - <0:	on error.
 */
int sgx_virt_einit(void __user *sigstruct, void __user *token,
		   void __user *secs, u64 *lepubkeyhash, int *trapnr)
{
	int ret;

	if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
		ret = __sgx_virt_einit(sigstruct, token, secs);
	} else {
		preempt_disable();

		sgx_update_lepubkeyhash(lepubkeyhash);

		ret = __sgx_virt_einit(sigstruct, token, secs);
		preempt_enable();
	}

	/* Propagate up the error from the WARN_ON_ONCE in __sgx_virt_einit() */
	if (ret == -EINVAL)
		return ret;

	if (encls_faulted(ret)) {
		*trapnr = ENCLS_TRAPNR(ret);
		return -EFAULT;
	}

	return ret;
}
EXPORT_SYMBOL_GPL(sgx_virt_einit);