diff mbox series

[14/14] arm64: kexec_file: add vmlinux format support

Message ID 20170824081811.19299-15-takahiro.akashi@linaro.org
State New
Headers show
Series arm64: kexec: add kexec_file_load support | expand

Commit Message

AKASHI Takahiro Aug. 24, 2017, 8:18 a.m. UTC
The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux
will be loaded at the offset of TEXT_OFFSET from the begining of system
memory. The other PT_LOAD segments are placed relative to the first one.

Regarding kernel verification, since there is no standard way to contain
a signature within elf binary, we follow PowerPC's (not yet upstreamed)
approach, that is, appending a signature right after the kernel binary
itself like module signing.
This way, the signature can be easily retrieved and verified with
verify_pkcs7_signature().

We can sign the kernel with sign-file command.

Unlike PowerPC, we don't support ima-based kexec for now since arm64
doesn't have any secure solution for system appraisal at this moment.

Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
---
 arch/arm64/Kconfig                     |   8 ++
 arch/arm64/include/asm/kexec_file.h    |   1 +
 arch/arm64/kernel/Makefile             |   1 +
 arch/arm64/kernel/kexec_elf.c          | 216 +++++++++++++++++++++++++++++++++
 arch/arm64/kernel/machine_kexec_file.c |   3 +
 5 files changed, 229 insertions(+)
 create mode 100644 arch/arm64/kernel/kexec_elf.c

-- 
2.14.1

Comments

Mark Rutland Aug. 24, 2017, 5:30 p.m. UTC | #1
On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:
> The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

> will be loaded at the offset of TEXT_OFFSET from the begining of system

> memory. The other PT_LOAD segments are placed relative to the first one.


I really don't like assuming things about the vmlinux ELF file.

> Regarding kernel verification, since there is no standard way to contain

> a signature within elf binary, we follow PowerPC's (not yet upstreamed)

> approach, that is, appending a signature right after the kernel binary

> itself like module signing.


I also *really* don't like this. It's a bizarre in-band mechanism,
without explcit information. It's not a nice ABI.

If we can load an Image, why do we need to be able to load a vmlinux?

[...]

> diff --git a/arch/arm64/kernel/kexec_elf.c b/arch/arm64/kernel/kexec_elf.c

> new file mode 100644

> index 000000000000..7bd3c1e1f65a

> --- /dev/null

> +++ b/arch/arm64/kernel/kexec_elf.c

> @@ -0,0 +1,216 @@

> +/*

> + * Kexec vmlinux loader

> +

> + * Copyright (C) 2017 Linaro Limited

> + * Authors: AKASHI Takahiro <takahiro.akashi@linaro.org>

> + *

> + * This program is free software; you can redistribute it and/or modify

> + * it under the terms of the GNU General Public License version 2 as

> + * published by the Free Software Foundation.

> + */

> +

> +#define pr_fmt(fmt)	"kexec_file(elf): " fmt

> +

> +#include <linux/elf.h>

> +#include <linux/err.h>

> +#include <linux/errno.h>

> +#include <linux/kernel.h>

> +#include <linux/kexec.h>

> +#include <linux/module_signature.h>

> +#include <linux/types.h>

> +#include <linux/verification.h>

> +#include <asm/byteorder.h>

> +#include <asm/kexec_file.h>

> +#include <asm/memory.h>

> +

> +static int elf64_probe(const char *buf, unsigned long len)

> +{

> +	struct elfhdr ehdr;

> +

> +	/* Check for magic and architecture */

> +	memcpy(&ehdr, buf, sizeof(ehdr));

> +	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) ||

> +		(elf16_to_cpu(&ehdr, ehdr.e_machine) != EM_AARCH64))

> +		return -ENOEXEC;

> +

> +	return 0;

> +}

> +

> +static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,

> +			 struct elf_info *elf_info,

> +			 unsigned long *kernel_load_addr)

> +{

> +	struct kexec_buf kbuf;

> +	const struct elf_phdr *phdr;

> +	const struct arm64_image_header *h;

> +	unsigned long text_offset, rand_offset;

> +	unsigned long page_offset, phys_offset;

> +	int first_segment, i, ret = -ENOEXEC;

> +

> +	kbuf.image = image;

> +	if (image->type == KEXEC_TYPE_CRASH) {

> +		kbuf.buf_min = crashk_res.start;

> +		kbuf.buf_max = crashk_res.end + 1;

> +	} else {

> +		kbuf.buf_min = 0;

> +		kbuf.buf_max = ULONG_MAX;

> +	}

> +	kbuf.top_down = 0;

> +

> +	/* Load PT_LOAD segments. */

> +	for (i = 0, first_segment = 1; i < ehdr->e_phnum; i++) {

> +		phdr = &elf_info->proghdrs[i];

> +		if (phdr->p_type != PT_LOAD)

> +			continue;

> +

> +		kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;

> +		kbuf.bufsz = min(phdr->p_filesz, phdr->p_memsz);

> +		kbuf.memsz = phdr->p_memsz;

> +		kbuf.buf_align = phdr->p_align;

> +

> +		if (first_segment) {

> +			/*

> +			 * Identify TEXT_OFFSET:

> +			 * When CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET=y the image

> +			 * header could be offset in the elf segment. The linker

> +			 * script sets ehdr->e_entry to the start of text.


Please, let's not have to go delving into the vmlinux, knowing intimate
details about how it's put together.

> +			 *

> +			 * NOTE: In v3.16 or older, h->text_offset is 0,

> +			 * so use the default, 0x80000

> +			 */

> +			rand_offset = ehdr->e_entry - phdr->p_vaddr;

> +			h = (struct arm64_image_header *)

> +					(elf_info->buffer + phdr->p_offset +

> +					rand_offset);

> +

> +			if (!arm64_header_check_magic(h))

> +				goto out;

> +

> +			if (h->image_size)

> +				text_offset = le64_to_cpu(h->text_offset);

> +			else

> +				text_offset = 0x80000;


Surely we can share the Image header parsing with the Image parser?

The Image code had practically the exact same logic operating on the
header struct.

Thanks,
Mark.
AKASHI Takahiro Aug. 25, 2017, 2:03 a.m. UTC | #2
On Thu, Aug 24, 2017 at 06:30:50PM +0100, Mark Rutland wrote:
> On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:

> > The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

> > will be loaded at the offset of TEXT_OFFSET from the begining of system

> > memory. The other PT_LOAD segments are placed relative to the first one.

> 

> I really don't like assuming things about the vmlinux ELF file.


If so, vmlinux is not an appropriate format for loading.

> > Regarding kernel verification, since there is no standard way to contain

> > a signature within elf binary, we follow PowerPC's (not yet upstreamed)

> > approach, that is, appending a signature right after the kernel binary

> > itself like module signing.

> 

> I also *really* don't like this. It's a bizarre in-band mechanism,

> without explcit information. It's not a nice ABI.

> 

> If we can load an Image, why do we need to be able to load a vmlinux?


Well, kexec-tools does. I don't know why Geoff wanted to support vmlinux.
I'm just trying to support what kexec-tools does support.

> [...]

> 

> > diff --git a/arch/arm64/kernel/kexec_elf.c b/arch/arm64/kernel/kexec_elf.c

> > new file mode 100644

> > index 000000000000..7bd3c1e1f65a

> > --- /dev/null

> > +++ b/arch/arm64/kernel/kexec_elf.c

> > @@ -0,0 +1,216 @@

> > +/*

> > + * Kexec vmlinux loader

> > +

> > + * Copyright (C) 2017 Linaro Limited

> > + * Authors: AKASHI Takahiro <takahiro.akashi@linaro.org>

> > + *

> > + * This program is free software; you can redistribute it and/or modify

> > + * it under the terms of the GNU General Public License version 2 as

> > + * published by the Free Software Foundation.

> > + */

> > +

> > +#define pr_fmt(fmt)	"kexec_file(elf): " fmt

> > +

> > +#include <linux/elf.h>

> > +#include <linux/err.h>

> > +#include <linux/errno.h>

> > +#include <linux/kernel.h>

> > +#include <linux/kexec.h>

> > +#include <linux/module_signature.h>

> > +#include <linux/types.h>

> > +#include <linux/verification.h>

> > +#include <asm/byteorder.h>

> > +#include <asm/kexec_file.h>

> > +#include <asm/memory.h>

> > +

> > +static int elf64_probe(const char *buf, unsigned long len)

> > +{

> > +	struct elfhdr ehdr;

> > +

> > +	/* Check for magic and architecture */

> > +	memcpy(&ehdr, buf, sizeof(ehdr));

> > +	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) ||

> > +		(elf16_to_cpu(&ehdr, ehdr.e_machine) != EM_AARCH64))

> > +		return -ENOEXEC;

> > +

> > +	return 0;

> > +}

> > +

> > +static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,

> > +			 struct elf_info *elf_info,

> > +			 unsigned long *kernel_load_addr)

> > +{

> > +	struct kexec_buf kbuf;

> > +	const struct elf_phdr *phdr;

> > +	const struct arm64_image_header *h;

> > +	unsigned long text_offset, rand_offset;

> > +	unsigned long page_offset, phys_offset;

> > +	int first_segment, i, ret = -ENOEXEC;

> > +

> > +	kbuf.image = image;

> > +	if (image->type == KEXEC_TYPE_CRASH) {

> > +		kbuf.buf_min = crashk_res.start;

> > +		kbuf.buf_max = crashk_res.end + 1;

> > +	} else {

> > +		kbuf.buf_min = 0;

> > +		kbuf.buf_max = ULONG_MAX;

> > +	}

> > +	kbuf.top_down = 0;

> > +

> > +	/* Load PT_LOAD segments. */

> > +	for (i = 0, first_segment = 1; i < ehdr->e_phnum; i++) {

> > +		phdr = &elf_info->proghdrs[i];

> > +		if (phdr->p_type != PT_LOAD)

> > +			continue;

> > +

> > +		kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;

> > +		kbuf.bufsz = min(phdr->p_filesz, phdr->p_memsz);

> > +		kbuf.memsz = phdr->p_memsz;

> > +		kbuf.buf_align = phdr->p_align;

> > +

> > +		if (first_segment) {

> > +			/*

> > +			 * Identify TEXT_OFFSET:

> > +			 * When CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET=y the image

> > +			 * header could be offset in the elf segment. The linker

> > +			 * script sets ehdr->e_entry to the start of text.

> 

> Please, let's not have to go delving into the vmlinux, knowing intimate

> details about how it's put together.


If we don't need to take care of RANDOMIZE_TEXT_OFFSET, the code would
be much simpler and look similar to Image code.

> 

> > +			 *

> > +			 * NOTE: In v3.16 or older, h->text_offset is 0,

> > +			 * so use the default, 0x80000

> > +			 */

> > +			rand_offset = ehdr->e_entry - phdr->p_vaddr;

> > +			h = (struct arm64_image_header *)

> > +					(elf_info->buffer + phdr->p_offset +

> > +					rand_offset);

> > +

> > +			if (!arm64_header_check_magic(h))

> > +				goto out;

> > +

> > +			if (h->image_size)

> > +				text_offset = le64_to_cpu(h->text_offset);

> > +			else

> > +				text_offset = 0x80000;

> 

> Surely we can share the Image header parsing with the Image parser?

> 

> The Image code had practically the exact same logic operating on the

> header struct.


Thanks,
-Takahiro AKASHI

> Thanks,

> Mark.
Dave Young Aug. 25, 2017, 6:13 a.m. UTC | #3
On 08/25/17 at 11:03am, AKASHI Takahiro wrote:
> On Thu, Aug 24, 2017 at 06:30:50PM +0100, Mark Rutland wrote:

> > On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:

> > > The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

> > > will be loaded at the offset of TEXT_OFFSET from the begining of system

> > > memory. The other PT_LOAD segments are placed relative to the first one.

> > 

> > I really don't like assuming things about the vmlinux ELF file.

> 

> If so, vmlinux is not an appropriate format for loading.

> 

> > > Regarding kernel verification, since there is no standard way to contain

> > > a signature within elf binary, we follow PowerPC's (not yet upstreamed)

> > > approach, that is, appending a signature right after the kernel binary

> > > itself like module signing.

> > 

> > I also *really* don't like this. It's a bizarre in-band mechanism,

> > without explcit information. It's not a nice ABI.

> > 

> > If we can load an Image, why do we need to be able to load a vmlinux?

> 

> Well, kexec-tools does. I don't know why Geoff wanted to support vmlinux.

> I'm just trying to support what kexec-tools does support.


We only add things when it is really necessary, kexec-tools
functionalities should have some historic reasons.

If only for doing kexec-tools has done I would say just not to do it.

Thanks
Dave
Mark Rutland Aug. 29, 2017, 10:01 a.m. UTC | #4
On Thu, Aug 24, 2017 at 06:30:50PM +0100, Mark Rutland wrote:
> On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:

> > The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

> > will be loaded at the offset of TEXT_OFFSET from the begining of system

> > memory. The other PT_LOAD segments are placed relative to the first one.

> 

> I really don't like assuming things about the vmlinux ELF file.

> 

> > Regarding kernel verification, since there is no standard way to contain

> > a signature within elf binary, we follow PowerPC's (not yet upstreamed)

> > approach, that is, appending a signature right after the kernel binary

> > itself like module signing.

> 

> I also *really* don't like this. It's a bizarre in-band mechanism,

> without explcit information. It's not a nice ABI.

> 

> If we can load an Image, why do we need to be able to load a vmlinux?


So IIUC, the whole point of this is to be able to kexec_file_load() a
vmlinux + signature bundle, for !CONFIG_EFI kernels.

For that, I think that we actually need a new kexec_file_load${N}
syscall, where we can pass the signature for the kernel as a separate
file. Ideally also with a flags argument and perhaps the ability to sign
the initrd too.

That way we don't ahve to come up with a magic vmlinux+signature format,
as we can just pass a regular image and a signature for that image
separately. That should work for PPC and others, too.

Thanks,
Mark.
Thiago Jung Bauermann Aug. 29, 2017, 4:15 p.m. UTC | #5
Mark Rutland <mark.rutland@arm.com> writes:

> On Thu, Aug 24, 2017 at 06:30:50PM +0100, Mark Rutland wrote:

>> On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:

>> > The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

>> > will be loaded at the offset of TEXT_OFFSET from the begining of system

>> > memory. The other PT_LOAD segments are placed relative to the first one.

>> 

>> I really don't like assuming things about the vmlinux ELF file.

>> 

>> > Regarding kernel verification, since there is no standard way to contain

>> > a signature within elf binary, we follow PowerPC's (not yet upstreamed)

>> > approach, that is, appending a signature right after the kernel binary

>> > itself like module signing.

>> 

>> I also *really* don't like this. It's a bizarre in-band mechanism,

>> without explcit information. It's not a nice ABI.

>> 

>> If we can load an Image, why do we need to be able to load a vmlinux?

>

> So IIUC, the whole point of this is to be able to kexec_file_load() a

> vmlinux + signature bundle, for !CONFIG_EFI kernels.

>

> For that, I think that we actually need a new kexec_file_load${N}

> syscall, where we can pass the signature for the kernel as a separate

> file. Ideally also with a flags argument and perhaps the ability to sign

> the initrd too.

>

> That way we don't ahve to come up with a magic vmlinux+signature format,

> as we can just pass a regular image and a signature for that image

> separately. That should work for PPC and others, too.


powerpc uses the same format that is used for signed kernel modules,
which is a signature appended at the end of the file. It doesn't need to
be passed separately since it's embedded in the file itself.

The kernel already has a mechanism to verify signatures that aren't
embedded in the file: it's possible to use IMA via the LSM hook in
kernel_read_file_from_fd (which is called in
kimage_file_prepare_segments) to verify a signature stored in an
extended attribute by using an IMA policy rule such as:

appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig

Of course, that only works if the kernel image is stored in a filesystem
which supports extended attributes. But that is the case of most
filesystems nowadays, with the notable exception of FAT-based
filesystems.

evmctl, the IMA userspace tool, also support signatures stored in a
separate file as well ("sidecar" signatures), but the kernel can only
verify them if they are copied into an xattr (which I believe the
userspace tool can do).

-- 
Thiago Jung Bauermann
IBM Linux Technology Center
Michael Ellerman Aug. 30, 2017, 8:40 a.m. UTC | #6
Mark Rutland <mark.rutland@arm.com> writes:

> On Thu, Aug 24, 2017 at 06:30:50PM +0100, Mark Rutland wrote:

>> On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:

>> > The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

>> > will be loaded at the offset of TEXT_OFFSET from the begining of system

>> > memory. The other PT_LOAD segments are placed relative to the first one.

>> 

>> I really don't like assuming things about the vmlinux ELF file.

>> 

>> > Regarding kernel verification, since there is no standard way to contain

>> > a signature within elf binary, we follow PowerPC's (not yet upstreamed)

>> > approach, that is, appending a signature right after the kernel binary

>> > itself like module signing.

>> 

>> I also *really* don't like this. It's a bizarre in-band mechanism,

>> without explcit information. It's not a nice ABI.

>> 

>> If we can load an Image, why do we need to be able to load a vmlinux?

>

> So IIUC, the whole point of this is to be able to kexec_file_load() a

> vmlinux + signature bundle, for !CONFIG_EFI kernels.

>

> For that, I think that we actually need a new kexec_file_load${N}

> syscall, where we can pass the signature for the kernel as a separate

> file. Ideally also with a flags argument and perhaps the ability to sign

> the initrd too.

>

> That way we don't ahve to come up with a magic vmlinux+signature format,


You don't have to come up with one, it already exists. We've been using
it for signed modules for ~5 years.

It also has the advantages of being a signature of the entire ELF, no
silly games about which sections are included, and it's attached to the
vmlinux so you don't have to remember to copy it around. And the code to
produce it and verify it already exists.

cheers
AKASHI Takahiro Sept. 8, 2017, 2:54 a.m. UTC | #7
On Fri, Aug 25, 2017 at 02:13:53PM +0800, Dave Young wrote:
> On 08/25/17 at 11:03am, AKASHI Takahiro wrote:

> > On Thu, Aug 24, 2017 at 06:30:50PM +0100, Mark Rutland wrote:

> > > On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:

> > > > The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

> > > > will be loaded at the offset of TEXT_OFFSET from the begining of system

> > > > memory. The other PT_LOAD segments are placed relative to the first one.

> > > 

> > > I really don't like assuming things about the vmlinux ELF file.

> > 

> > If so, vmlinux is not an appropriate format for loading.

> > 

> > > > Regarding kernel verification, since there is no standard way to contain

> > > > a signature within elf binary, we follow PowerPC's (not yet upstreamed)

> > > > approach, that is, appending a signature right after the kernel binary

> > > > itself like module signing.

> > > 

> > > I also *really* don't like this. It's a bizarre in-band mechanism,

> > > without explcit information. It's not a nice ABI.

> > > 

> > > If we can load an Image, why do we need to be able to load a vmlinux?

> > 

> > Well, kexec-tools does. I don't know why Geoff wanted to support vmlinux.

> > I'm just trying to support what kexec-tools does support.

> 

> We only add things when it is really necessary, kexec-tools

> functionalities should have some historic reasons.


Geoff had been working on kexec since old kernels (3.14 or 15?).

> If only for doing kexec-tools has done I would say just not to do it.


Sure

-Takahiro AKASHI

> Thanks

> Dave
AKASHI Takahiro Sept. 8, 2017, 3:07 a.m. UTC | #8
On Tue, Aug 29, 2017 at 11:01:12AM +0100, Mark Rutland wrote:
> On Thu, Aug 24, 2017 at 06:30:50PM +0100, Mark Rutland wrote:

> > On Thu, Aug 24, 2017 at 05:18:11PM +0900, AKASHI Takahiro wrote:

> > > The first PT_LOAD segment, which is assumed to be "text" code, in vmlinux

> > > will be loaded at the offset of TEXT_OFFSET from the begining of system

> > > memory. The other PT_LOAD segments are placed relative to the first one.

> > 

> > I really don't like assuming things about the vmlinux ELF file.

> > 

> > > Regarding kernel verification, since there is no standard way to contain

> > > a signature within elf binary, we follow PowerPC's (not yet upstreamed)

> > > approach, that is, appending a signature right after the kernel binary

> > > itself like module signing.

> > 

> > I also *really* don't like this. It's a bizarre in-band mechanism,

> > without explcit information. It's not a nice ABI.

> > 

> > If we can load an Image, why do we need to be able to load a vmlinux?

> 

> So IIUC, the whole point of this is to be able to kexec_file_load() a

> vmlinux + signature bundle, for !CONFIG_EFI kernels.

> 

> For that, I think that we actually need a new kexec_file_load${N}

> syscall, where we can pass the signature for the kernel as a separate

> file. Ideally also with a flags argument and perhaps the ability to sign

> the initrd too.


Verifying root file system would be another topic in general.

> That way we don't ahve to come up with a magic vmlinux+signature format,

> as we can just pass a regular image and a signature for that image

> separately. That should work for PPC and others, too.


Since some discussions are to be expected around vmlinux signing,
I will drop vmlinux support in v2.

(This means, as you mentioned, that we have no way to sign
a !CONFIG_EFI kernel for now. The possible solution in future would
be to utilize file extended attributes as proposed by powerpc guys?)

Thanks,
-Takahiro AKASHI

> Thanks,

> Mark.
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c8f603700bdd..94021e66b826 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -772,11 +772,19 @@  config KEXEC_FILE_IMAGE_FMT
 	---help---
 	  Select this option to enable 'Image' kernel loading.
 
+config KEXEC_FILE_ELF_FMT
+	bool "Enable vmlinux/elf support"
+	depends on KEXEC_FILE
+	select KEXEC_FILE_ELF
+	---help---
+	  Select this option to enable 'vmlinux' kernel loading.
+
 config KEXEC_VERIFY_SIG
 	bool "Verify kernel signature during kexec_file_load() syscall"
 	depends on KEXEC_FILE
 	select SYSTEM_DATA_VERIFICATION
 	select SIGNED_PE_FILE_VERIFICATION if KEXEC_FILE_IMAGE_FMT
+	select MODULE_SIG_FORMAT if KEXEC_FILE_ELF_FMT
 	---help---
 	  This option makes kernel signature verification mandatory for
 	  the kexec_file_load() syscall.
diff --git a/arch/arm64/include/asm/kexec_file.h b/arch/arm64/include/asm/kexec_file.h
index 5df899aa0d2e..eaf2adc1121c 100644
--- a/arch/arm64/include/asm/kexec_file.h
+++ b/arch/arm64/include/asm/kexec_file.h
@@ -2,6 +2,7 @@ 
 #define _ASM_KEXEC_FILE_H
 
 extern struct kexec_file_ops kexec_image_ops;
+extern struct kexec_file_ops kexec_elf64_ops;
 
 /**
  * struct arm64_image_header - arm64 kernel image header.
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index a1161bab6810..1463337160ea 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -52,6 +52,7 @@  arm64-obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
 					   cpu-reset.o
 arm64-obj-$(CONFIG_KEXEC_FILE)		+= machine_kexec_file.o
 arm64-obj-$(CONFIG_KEXEC_FILE_IMAGE_FMT)	+= kexec_image.o
+arm64-obj-$(CONFIG_KEXEC_FILE_ELF_FMT)	+= kexec_elf.o
 arm64-obj-$(CONFIG_ARM64_RELOC_TEST)	+= arm64-reloc-test.o
 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 arm64-obj-$(CONFIG_CRASH_DUMP)		+= crash_dump.o
diff --git a/arch/arm64/kernel/kexec_elf.c b/arch/arm64/kernel/kexec_elf.c
new file mode 100644
index 000000000000..7bd3c1e1f65a
--- /dev/null
+++ b/arch/arm64/kernel/kexec_elf.c
@@ -0,0 +1,216 @@ 
+/*
+ * Kexec vmlinux loader
+
+ * Copyright (C) 2017 Linaro Limited
+ * Authors: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)	"kexec_file(elf): " fmt
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/module_signature.h>
+#include <linux/types.h>
+#include <linux/verification.h>
+#include <asm/byteorder.h>
+#include <asm/kexec_file.h>
+#include <asm/memory.h>
+
+static int elf64_probe(const char *buf, unsigned long len)
+{
+	struct elfhdr ehdr;
+
+	/* Check for magic and architecture */
+	memcpy(&ehdr, buf, sizeof(ehdr));
+	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) ||
+		(elf16_to_cpu(&ehdr, ehdr.e_machine) != EM_AARCH64))
+		return -ENOEXEC;
+
+	return 0;
+}
+
+static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,
+			 struct elf_info *elf_info,
+			 unsigned long *kernel_load_addr)
+{
+	struct kexec_buf kbuf;
+	const struct elf_phdr *phdr;
+	const struct arm64_image_header *h;
+	unsigned long text_offset, rand_offset;
+	unsigned long page_offset, phys_offset;
+	int first_segment, i, ret = -ENOEXEC;
+
+	kbuf.image = image;
+	if (image->type == KEXEC_TYPE_CRASH) {
+		kbuf.buf_min = crashk_res.start;
+		kbuf.buf_max = crashk_res.end + 1;
+	} else {
+		kbuf.buf_min = 0;
+		kbuf.buf_max = ULONG_MAX;
+	}
+	kbuf.top_down = 0;
+
+	/* Load PT_LOAD segments. */
+	for (i = 0, first_segment = 1; i < ehdr->e_phnum; i++) {
+		phdr = &elf_info->proghdrs[i];
+		if (phdr->p_type != PT_LOAD)
+			continue;
+
+		kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;
+		kbuf.bufsz = min(phdr->p_filesz, phdr->p_memsz);
+		kbuf.memsz = phdr->p_memsz;
+		kbuf.buf_align = phdr->p_align;
+
+		if (first_segment) {
+			/*
+			 * Identify TEXT_OFFSET:
+			 * When CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET=y the image
+			 * header could be offset in the elf segment. The linker
+			 * script sets ehdr->e_entry to the start of text.
+			 *
+			 * NOTE: In v3.16 or older, h->text_offset is 0,
+			 * so use the default, 0x80000
+			 */
+			rand_offset = ehdr->e_entry - phdr->p_vaddr;
+			h = (struct arm64_image_header *)
+					(elf_info->buffer + phdr->p_offset +
+					rand_offset);
+
+			if (!arm64_header_check_magic(h))
+				goto out;
+
+			if (h->image_size)
+				text_offset = le64_to_cpu(h->text_offset);
+			else
+				text_offset = 0x80000;
+
+			/* Adjust kernel segment with TEXT_OFFSET */
+			kbuf.memsz += text_offset - rand_offset;
+
+			ret = kexec_add_buffer(&kbuf);
+			if (ret)
+				goto out;
+
+			image->segment[image->nr_segments - 1].mem
+					+= text_offset - rand_offset;
+			image->segment[image->nr_segments - 1].memsz
+					-= text_offset - rand_offset;
+
+			*kernel_load_addr = kbuf.mem + text_offset;
+
+			/* for succeeding segmemts */
+			page_offset = ALIGN_DOWN(phdr->p_vaddr, SZ_2M);
+			phys_offset = kbuf.mem;
+
+			first_segment = 0;
+		} else {
+			/* Calculate physical address */
+			kbuf.mem = phdr->p_vaddr - page_offset + phys_offset;
+
+			ret = kexec_add_segment(&kbuf);
+			if (ret)
+				goto out;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static void *elf64_load(struct kimage *image, char *kernel_buf,
+			unsigned long kernel_len, char *initrd,
+			unsigned long initrd_len, char *cmdline,
+			unsigned long cmdline_len)
+{
+	struct elfhdr ehdr;
+	struct elf_info elf_info;
+	unsigned long kernel_load_addr;
+	int ret;
+
+	/* Create elf core header segment */
+	ret = load_crashdump_segments(image);
+	if (ret)
+		goto out;
+
+	/* Load the kernel */
+	ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
+	if (ret)
+		goto out;
+
+	ret = elf_exec_load(image, &ehdr, &elf_info, &kernel_load_addr);
+	if (ret)
+		goto out;
+	pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
+
+	/* Load additional data */
+	ret = load_other_segments(image, kernel_load_addr,
+				initrd, initrd_len, cmdline, cmdline_len);
+
+out:
+	elf_free_info(&elf_info);
+
+	return ERR_PTR(ret);
+}
+
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+/*
+ * The file format is the exact same as module signing:
+ *   <kernel> := <Image> + <signature part> + <marker>
+ *   <signature part> := <signature data> + <struct module_signature>
+ */
+static int elf64_verify_sig(const char *kernel, unsigned long kernel_len)
+{
+	const size_t marker_len = sizeof(MODULE_SIG_STRING) - 1;
+	const struct module_signature *sig;
+	size_t file_len = kernel_len;
+	size_t sig_len;
+	const void *p;
+	int rc;
+
+	if (kernel_len <= marker_len + sizeof(*sig))
+		return -ENOENT;
+
+	/* Check for marker */
+	p = kernel + kernel_len - marker_len;
+	if (memcmp(p, MODULE_SIG_STRING, marker_len)) {
+		pr_err("probably the kernel is not signed.\n");
+		return -ENOENT;
+	}
+
+	/* Validate signature */
+	sig = (const struct module_signature *) (p - sizeof(*sig));
+	file_len -= marker_len;
+
+	rc = validate_module_sig(sig, kernel_len - marker_len);
+	if (rc) {
+		pr_err("signature is not valid\n");
+		return rc;
+	}
+
+	/* Verify kernel with signature */
+	sig_len = be32_to_cpu(sig->sig_len);
+	p -= sig_len + sizeof(*sig);
+	file_len -= sig_len + sizeof(*sig);
+
+	rc = verify_pkcs7_signature(kernel, p - (void *)kernel, p, sig_len,
+					NULL, VERIFYING_MODULE_SIGNATURE,
+					NULL, NULL);
+
+	return rc;
+}
+#endif
+
+struct kexec_file_ops kexec_elf64_ops = {
+	.probe = elf64_probe,
+	.load = elf64_load,
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+	.verify_sig = elf64_verify_sig,
+#endif
+};
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index ab3b19d51727..cb1f24d98f87 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -31,6 +31,9 @@  static struct kexec_file_ops *kexec_file_loaders[] = {
 #ifdef CONFIG_KEXEC_FILE_IMAGE_FMT
 	&kexec_image_ops,
 #endif
+#ifdef CONFIG_KEXEC_FILE_ELF_FMT
+	&kexec_elf64_ops,
+#endif
 };
 
 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,