From patchwork Mon Nov 4 21:45:22 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Greg Kroah-Hartman X-Patchwork-Id: 178471 Delivered-To: patch@linaro.org Received: by 2002:a92:38d5:0:0:0:0:0 with SMTP id g82csp2073548ilf; Mon, 4 Nov 2019 14:03:32 -0800 (PST) X-Google-Smtp-Source: APXvYqwsmIDpAvDwvR6hYR3jsNEvrk4Sp4+HkiT2u0oaQz6G9j1giCHvPuGj6K0mjiQT/Cb3lkzb X-Received: by 2002:a05:6402:138c:: with SMTP id b12mr17390427edv.28.1572905012371; Mon, 04 Nov 2019 14:03:32 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1572905012; cv=none; d=google.com; s=arc-20160816; b=Q+ZR9UStGP5l7k/X4atWMd2MS73UbR6378NrH6H6XzJ7uDEy4uUXjp+BsqwaU61/Ou zNoxZcTAuKLAbFg7F2piFhcTGtML3Mwx9/iTLAebbB0SkpJJ5DWgIEhAtg6w6NupVgui zorSKnkoecpEyB99hBLjW3qRlA6DdcYnKGhS2spCBlPNhNsnmEe9Gg1iFzh8jKzm7E3s jXBxSvqQE459Ndii8iY2ELjdQ91ibgir3WRLSXEQIhK3Ly4GJeth46/Bqjw1zll7ay13 xGN+rZm1hC7YiHmBvob3AfqLv2snJolXVdCYyC7aRpf+tMJVVkPcTx/sCuN+d3qKMaJV vv/g== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:content-transfer-encoding:mime-version :user-agent:references:in-reply-to:message-id:date:subject:cc:to :from:dkim-signature; bh=dUL/d+7XUGkFUOET/wSxkAxmlfGoXkJPUahIypvGrMw=; b=ywp5k3wux6akn6Y9nwGd8VPP4rH9IvMBc9VBs6LNwXf2t+I+/Kq7gEDdcsEjO3ClhQ B4N+uN+HdW7FFAScnlTEP36zr7S/Q6wN3PPdSyFx5jJdjK8Z+491tY0/wj/xhwlMKmvI jHov3mNa/F9g9yTa2OfTC4Eaz2GyY+HNh9WC1LZ9HmmSvQMovRXl/i9iwoWM2ye3ChL+ 11pfkyvBAwrEKJJbEg9MtuCUBW/SYv9LlJl2sPIXxwjPqVzPguYt3UEiUVvAyhwggl0p 7FzycA4OjQuH2OYnyryYW5YZifQQQYJT1k2q0dMPGOLkOSBMmXz8xZ0wNwfr/Pd/CQ/v 8Q7w== ARC-Authentication-Results: i=1; mx.google.com; dkim=pass header.i=@kernel.org header.s=default header.b=P1qM2RKe; spf=pass (google.com: best guess record for domain of stable-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=stable-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id e2si9157890edc.97.2019.11.04.14.03.32; Mon, 04 Nov 2019 14:03:32 -0800 (PST) Received-SPF: pass (google.com: best guess record for domain of stable-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; dkim=pass header.i=@kernel.org header.s=default header.b=P1qM2RKe; spf=pass (google.com: best guess record for domain of stable-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=stable-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S2388805AbfKDWD3 (ORCPT + 14 others); Mon, 4 Nov 2019 17:03:29 -0500 Received: from mail.kernel.org ([198.145.29.99]:33762 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S2389511AbfKDWD2 (ORCPT ); Mon, 4 Nov 2019 17:03:28 -0500 Received: from localhost (6.204-14-84.ripe.coltfrance.com [84.14.204.6]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id D9BA0205C9; Mon, 4 Nov 2019 22:03:25 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1572905006; bh=pG2vMRhsaY+rH69cxVWgYtlBkYGvGvaP7/cIEoxToqU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=P1qM2RKeyYhZL+Wym+wy/cDiNY9wlszPBsVg5Gcq1YIVkRGSCWFSIy9kMTjVOdxci XSARb0dwbXen/KdpPrL6OsC75TgLQUL0qY/NSm9bAQrxdAO/nejmMb5nVz8CyADym2 3iElrxZk1/7qYnLsjL0hZBLA4XJPv6HFvLybjtk4= From: Greg Kroah-Hartman To: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman , stable@vger.kernel.org, Will Deacon , Catalin Marinas Subject: [PATCH 4.19 129/149] arm64: Ensure VM_WRITE|VM_SHARED ptes are clean by default Date: Mon, 4 Nov 2019 22:45:22 +0100 Message-Id: <20191104212145.579321102@linuxfoundation.org> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191104212126.090054740@linuxfoundation.org> References: <20191104212126.090054740@linuxfoundation.org> User-Agent: quilt/0.66 MIME-Version: 1.0 Sender: stable-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: stable@vger.kernel.org From: Catalin Marinas commit aa57157be69fb599bd4c38a4b75c5aad74a60ec0 upstream. Shared and writable mappings (__S.1.) should be clean (!dirty) initially and made dirty on a subsequent write either through the hardware DBM (dirty bit management) mechanism or through a write page fault. A clean pte for the arm64 kernel is one that has PTE_RDONLY set and PTE_DIRTY clear. The PAGE_SHARED{,_EXEC} attributes have PTE_WRITE set (PTE_DBM) and PTE_DIRTY clear. Prior to commit 73e86cb03cf2 ("arm64: Move PTE_RDONLY bit handling out of set_pte_at()"), it was the responsibility of set_pte_at() to set the PTE_RDONLY bit and mark the pte clean if the software PTE_DIRTY bit was not set. However, the above commit removed the pte_sw_dirty() check and the subsequent setting of PTE_RDONLY in set_pte_at() while leaving the PAGE_SHARED{,_EXEC} definitions unchanged. The result is that shared+writable mappings are now dirty by default Fix the above by explicitly setting PTE_RDONLY in PAGE_SHARED{,_EXEC}. In addition, remove the superfluous PTE_DIRTY bit from the kernel PROT_* attributes. Fixes: 73e86cb03cf2 ("arm64: Move PTE_RDONLY bit handling out of set_pte_at()") Cc: # 4.14.x- Cc: Will Deacon Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/pgtable-prot.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -43,11 +43,11 @@ #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) -#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) -#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) +#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) @@ -91,8 +91,9 @@ #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) +/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)