diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index eec506c44d97..3791bdf5f78a 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -867,3 +867,18 @@ Description: This threshold is used to control triggering garbage collection whi reserved section before preallocating on pinned file. By default, the value is ovp_sections, especially, for zoned ufs, the value is 1. + +What: /sys/fs/f2fs//effective_lookup_mode +Date: August 2025 +Contact: "Daniel Lee" +Description: + This is a read-only entry to show the effective directory lookup mode + F2FS is currently using for casefolded directories. + This considers both the "lookup_mode" mount option and the on-disk + encoding flag, SB_ENC_NO_COMPAT_FALLBACK_FL. + + Possible values are: + - "perf": Hash-only lookup. + - "compat": Hash-based lookup with a linear search fallback enabled + - "auto:perf": lookup_mode is auto and fallback is disabled on-disk + - "auto:compat": lookup_mode is auto and fallback is enabled on-disk diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index b26b5274eaaf..cc9c98204db0 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1246,17 +1246,10 @@ PAGE_SIZE multiple when read back. This is a simple interface to trigger memory reclaim in the target cgroup. - This file accepts a single key, the number of bytes to reclaim. - No nested keys are currently supported. - Example:: echo "1G" > memory.reclaim - The interface can be later extended with nested keys to - configure the reclaim behavior. For example, specify the - type of memory to reclaim from (anon, file, ..). - Please note that the kernel can over or under reclaim from the target cgroup. If less bytes are reclaimed than the specified amount, -EAGAIN is returned. @@ -1268,6 +1261,17 @@ PAGE_SIZE multiple when read back. This means that the networking layer will not adapt based on reclaim induced by memory.reclaim. +The following nested keys are defined. + + ========== ================================ + swappiness Swappiness value to reclaim with + ========== ================================ + + Specifying a swappiness value instructs the kernel to perform + the reclaim with that swappiness value. Note that this has the + same semantics as vm.swappiness applied to memcg reclaim with + all the existing limitations and potential future extensions. + memory.peak A read-only single value file which exists on non-root cgroups. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 61b8b14c2c59..94b0bca87431 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2655,6 +2655,11 @@ (enabled). Disable by KVM if hardware lacks support for NPT. + kvm-arm.hyp_lm_size_mb= + [KVM,ARM,EARLY] Maximum amount of contiguous memory mappable in + the pKVM hypervisor linear map, in MB. Any attempt to map more + memory than this into pKVM stage-1 at run-time may be fatal. + kvm-arm.mode= [KVM,ARM] Select one of KVM/arm64's modes of operation. diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst index fb7d2ee022bc..4e1b4202f8fd 100644 --- a/Documentation/filesystems/f2fs.rst +++ b/Documentation/filesystems/f2fs.rst @@ -365,6 +365,25 @@ errors=%s Specify f2fs behavior on critical errors. This supports modes: pending node write drop keep N/A pending meta write keep keep N/A ====================== =============== =============== ======== +lookup_mode=%s Control the directory lookup behavior for casefolded + directories. This option has no effect on directories + that do not have the casefold feature enabled. + + ================== ======================================== + Value Description + ================== ======================================== + perf (Default) Enforces a hash-only lookup. + The linear search fallback is always + disabled, ignoring the on-disk flag. + compat Enables the linear search fallback for + compatibility with directory entries + created by older kernel that used a + different case-folding algorithm. + This mode ignores the on-disk flag. + auto F2FS determines the mode based on the + on-disk `SB_ENC_NO_COMPAT_FALLBACK_FL` + flag. + ================== ======================================== ======================== ============================================================ Debugfs Entries diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index a66054d0763a..66d657d53b6d 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -2304,6 +2304,20 @@ accept_ra_pinfo - BOOLEAN - enabled if accept_ra is enabled. - disabled if accept_ra is disabled. +ra_honor_pio_pflag - BOOLEAN + The Prefix Information Option P-flag indicates the network can + allocate a unique IPv6 prefix per client using DHCPv6-PD. + This sysctl can be enabled when a userspace DHCPv6-PD client + is running to cause the P-flag to take effect: i.e. the + P-flag suppresses any effects of the A-flag within the same + PIO. For a given PIO, P=1 and A=1 is treated as A=0. + + - If disabled, the P-flag is ignored. + - If enabled, the P-flag will disable SLAAC autoconfiguration + for the given Prefix Information Option. + + Default: 0 (disabled) + accept_ra_rt_info_min_plen - INTEGER Minimum prefix length of Route Information in RA. diff --git a/Makefile b/Makefile index 76402e1d934d..ba23e51fa00e 100644 --- a/Makefile +++ b/Makefile @@ -970,16 +970,9 @@ endif ifdef CONFIG_LTO_CLANG ifdef CONFIG_LTO_CLANG_THIN -CC_FLAGS_LTO := -flto=thin -fsplit-lto-unit +CC_FLAGS_LTO := -flto=thin -fsplit-lto-unit -fvisibility=default else -CC_FLAGS_LTO := -flto -endif - -ifeq ($(SRCARCH),x86) -# Workaround for compiler / linker bug -CC_FLAGS_LTO += -fvisibility=hidden -else -CC_FLAGS_LTO += -fvisibility=default +CC_FLAGS_LTO := -flto -fvisibility=hidden endif # Limit inlining across translation units to reduce binary size diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 78f10f7d9bec..b4ec7dc6a60d 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -1933,6 +1933,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x30ae0fb9 } +pointer_reference { + id: 0x06cbb7fd + kind: POINTER + pointee_type_id: 0x316e396a +} pointer_reference { id: 0x06dcdc5a kind: POINTER @@ -2393,6 +2398,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x0b7b35bf } +pointer_reference { + id: 0x0850608e + kind: POINTER + pointee_type_id: 0x0b0164a7 +} pointer_reference { id: 0x0858434c kind: POINTER @@ -4858,6 +4868,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x1e840f82 } +pointer_reference { + id: 0x0d3144be + kind: POINTER + pointee_type_id: 0x1e85f467 +} pointer_reference { id: 0x0d32da14 kind: POINTER @@ -8918,6 +8933,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x61859821 } +pointer_reference { + id: 0x12f234f7 + kind: POINTER + pointee_type_id: 0x61883543 +} pointer_reference { id: 0x13012dce kind: POINTER @@ -22728,6 +22748,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x977bcf07 } +pointer_reference { + id: 0x2f4f676a + kind: POINTER + pointee_type_id: 0x977d7b37 +} pointer_reference { id: 0x2f4f9f95 kind: POINTER @@ -24473,6 +24498,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xeff3c532 } +pointer_reference { + id: 0x316e396a + kind: POINTER + pointee_type_id: 0xeff80337 +} pointer_reference { id: 0x3176a085 kind: POINTER @@ -29908,6 +29938,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xdb5e7b14 } +pointer_reference { + id: 0x3c497d0a + kind: POINTER + pointee_type_id: 0xdb6512b4 +} pointer_reference { id: 0x3c4ed50c kind: POINTER @@ -36123,6 +36158,11 @@ qualified { qualifier: CONST qualified_type_id: 0x1d3e5adb } +qualified { + id: 0xdb6512b4 + qualifier: CONST + qualified_type_id: 0x1dd3fc58 +} qualified { id: 0xdb91d338 qualifier: CONST @@ -42180,6 +42220,10 @@ member { id: 0x22e86cdc type_id: 0x048fc360 } +member { + id: 0x22fde5dd + type_id: 0x04d9e766 +} member { id: 0x230f36dc type_id: 0x0312ab60 @@ -42188,10 +42232,6 @@ member { id: 0x233f54d1 type_id: 0x03d32356 } -member { - id: 0x23531e28 - type_id: 0x026208b2 -} member { id: 0x2380a48f type_id: 0x012ce22f @@ -42467,6 +42507,10 @@ member { id: 0x2a99a59c type_id: 0x2548e662 } +member { + id: 0x2aac4c8c + type_id: 0x259f4220 +} member { id: 0x2ad28b61 type_id: 0x24645d96 @@ -43296,8 +43340,8 @@ member { offset: 96 } member { - id: 0x34aaae1a - type_id: 0x5d84d1ff + id: 0x34aab4f5 + type_id: 0x5d84ba41 offset: 24 } member { @@ -43349,6 +43393,11 @@ member { type_id: 0x5c5fa7e4 offset: 96 } +member { + id: 0x34dd58d6 + type_id: 0x5c5b3484 + offset: 2240 +} member { id: 0x34df0c4f type_id: 0x5c52412d @@ -43425,6 +43474,11 @@ member { type_id: 0x5a41364f offset: 768 } +member { + id: 0x355c1a4d + type_id: 0x5a5e007a + offset: 28608 +} member { id: 0x355c619d type_id: 0x5a5fd068 @@ -43511,6 +43565,11 @@ member { type_id: 0x56037e9c offset: 1408 } +member { + id: 0x366394f0 + type_id: 0x56a024aa + offset: 1920 +} member { id: 0x36707abc type_id: 0x56efb9c6 @@ -43778,6 +43837,11 @@ member { id: 0x3a3e9211 type_id: 0x67d43857 } +member { + id: 0x3a6f0eb6 + type_id: 0x66926050 + offset: 896 +} member { id: 0x3a7ed482 type_id: 0x66d53bfd @@ -50417,12 +50481,6 @@ member { type_id: 0x92233392 offset: 1280 } -member { - id: 0x2d081365 - name: "android_kabi_reserved1" - type_id: 0x92233392 - offset: 28608 -} member { id: 0x2d081368 name: "android_kabi_reserved1" @@ -83222,12 +83280,29 @@ member { type_id: 0x00c83ba6 offset: 768 } +member { + id: 0x082d878d + name: "dmabuf_count" + type_id: 0x4585663f + offset: 320 +} +member { + id: 0x32b44094 + name: "dmabuf_info" + type_id: 0x12f234f7 +} member { id: 0x810e204c name: "dmabufs" type_id: 0xeb923a9b offset: 384 } +member { + id: 0x8136732c + name: "dmabufs" + type_id: 0xd3c80119 + offset: 192 +} member { id: 0x065bb89a name: "dmac" @@ -116128,6 +116203,11 @@ member { type_id: 0x0c55d62d offset: 512 } +member { + id: 0xc185c03f + name: "iotlb_sync_map" + type_id: 0x2f4f676a +} member { id: 0xc1a6eb7e name: "iotlb_sync_map" @@ -144563,6 +144643,11 @@ member { type_id: 0x6720d32f offset: 1056 } +member { + id: 0x014cd3d7 + name: "nr_task_refs" + type_id: 0x1f4573ef +} member { id: 0xe321d910 name: "nr_tasks" @@ -152200,6 +152285,12 @@ member { name: "padding2" type_id: 0x23cbe491 } +member { + id: 0x16657abd + name: "padding4" + type_id: 0x44377683 + offset: 8 +} member { id: 0xe861beb9 name: "paddr" @@ -160457,6 +160548,13 @@ member { type_id: 0xe276adef offset: 64 } +member { + id: 0x52bd4ccc + name: "preferpd" + type_id: 0xb3e7bac9 + offset: 4 + bitsize: 1 +} member { id: 0x9f058ff3 name: "preferred_bpp" @@ -166119,6 +166217,11 @@ member { type_id: 0xe62ebf07 offset: 672 } +member { + id: 0xf7da7e97 + name: "ra_honor_pio_pflag" + type_id: 0xb3e7bac9 +} member { id: 0x66aa8ae5 name: "ra_mtu" @@ -169592,6 +169695,12 @@ member { name: "refcnt" type_id: 0x1f4573ef } +member { + id: 0xb7dcf16e + name: "refcnt" + type_id: 0xa722c13e + offset: 128 +} member { id: 0xb7dcf1d9 name: "refcnt" @@ -172965,10 +173074,10 @@ member { bitsize: 1 } member { - id: 0x688b9626 + id: 0x72849e28 name: "reserved" type_id: 0xb3e7bac9 - bitsize: 6 + bitsize: 4 } member { id: 0xc5c2fe1d @@ -176097,6 +176206,13 @@ member { type_id: 0x5d8155a5 offset: 104 } +member { + id: 0xad209f54 + name: "routeraddr" + type_id: 0xb3e7bac9 + offset: 5 + bitsize: 1 +} member { id: 0x74fcbeff name: "routes" @@ -176565,6 +176681,11 @@ member { type_id: 0x914dbfdc offset: 160 } +member { + id: 0x10789eb1 + name: "rss" + type_id: 0x33756485 +} member { id: 0x10ad0965 name: "rss" @@ -176581,6 +176702,12 @@ member { name: "rss_context" type_id: 0xe62ebf07 } +member { + id: 0x027688eb + name: "rss_hwm" + type_id: 0x33756485 + offset: 64 +} member { id: 0x9625f4bd name: "rss_query_en" @@ -219481,16 +219608,6 @@ struct_union { member_id: 0x2d8a4e32 } } -struct_union { - id: 0x026208b2 - kind: STRUCT - definition { - bytesize: 1 - member_id: 0x688b9626 - member_id: 0xb2dd5b41 - member_id: 0x7adb5caf - } -} struct_union { id: 0x02c70092 kind: STRUCT @@ -219653,6 +219770,18 @@ struct_union { member_id: 0x72d76384 } } +struct_union { + id: 0x04d9e766 + kind: STRUCT + definition { + bytesize: 1 + member_id: 0x72849e28 + member_id: 0x52bd4ccc + member_id: 0xad209f54 + member_id: 0xb2dd5b41 + member_id: 0x7adb5caf + } +} struct_union { id: 0x04ecbf8c kind: STRUCT @@ -222633,6 +222762,15 @@ struct_union { member_id: 0x0193fe4b } } +struct_union { + id: 0x259f4220 + kind: STRUCT + definition { + bytesize: 8 + member_id: 0xf7da7e97 + member_id: 0x16657abd + } +} struct_union { id: 0x260445a5 kind: STRUCT @@ -225858,6 +225996,16 @@ struct_union { member_id: 0x0f4b9ba4 } } +struct_union { + id: 0x56a024aa + kind: UNION + definition { + bytesize: 8 + member_id: 0x014cd3d7 + member_id: 0x27000c61 + member_id: 0x36752b74 + } +} struct_union { id: 0x56efb9c6 kind: UNION @@ -225983,6 +226131,16 @@ struct_union { member_id: 0x36752b74 } } +struct_union { + id: 0x5a5e007a + kind: UNION + definition { + bytesize: 8 + member_id: 0x32b44094 + member_id: 0x27000c61 + member_id: 0x36752b74 + } +} struct_union { id: 0x5a5fd068 kind: UNION @@ -226139,6 +226297,16 @@ struct_union { member_id: 0x75f74db3 } } +struct_union { + id: 0x5c5b3484 + kind: UNION + definition { + bytesize: 8 + member_id: 0x2aac4c8c + member_id: 0x2bdfee76 + member_id: 0x36752b74 + } +} struct_union { id: 0x5c5fa7e4 kind: UNION @@ -226269,12 +226437,12 @@ struct_union { } } struct_union { - id: 0x5d84d1ff + id: 0x5d84ba41 kind: UNION definition { bytesize: 1 member_id: 0x2ddb63e4 - member_id: 0x23531e28 + member_id: 0x22fde5dd } } struct_union { @@ -227152,6 +227320,16 @@ struct_union { member_id: 0x082b1975 } } +struct_union { + id: 0x66926050 + kind: UNION + definition { + bytesize: 8 + member_id: 0xc185c03f + member_id: 0x27000c61 + member_id: 0x36752b74 + } +} struct_union { id: 0x66d53bfd kind: UNION @@ -237174,7 +237352,7 @@ struct_union { member_id: 0xb265164c member_id: 0xaf33fcbc member_id: 0x5f2a8271 - member_id: 0x2d0814ec + member_id: 0x366394f0 member_id: 0x63760b26 } } @@ -251403,7 +251581,7 @@ struct_union { member_id: 0x2d0812b0 member_id: 0x637607e0 member_id: 0xac894cc9 - member_id: 0xe0f63db8 + member_id: 0x34dd58d6 } } struct_union { @@ -253656,7 +253834,7 @@ struct_union { member_id: 0xa60fc2c2 member_id: 0xd12a0a5e member_id: 0x80f5a8ec - member_id: 0x2d081f94 + member_id: 0x3a6f0eb6 member_id: 0x63760151 member_id: 0xac894e49 member_id: 0xe0f6393d @@ -262893,7 +263071,7 @@ struct_union { member_id: 0x5ce532c4 member_id: 0xb5de8e04 member_id: 0x2165da89 - member_id: 0x34aaae1a + member_id: 0x34aab4f5 member_id: 0xe91a1d71 member_id: 0xbada6e7d member_id: 0x08cabd3c @@ -273188,6 +273366,20 @@ struct_union { member_id: 0x528c4588 } } +struct_union { + id: 0x61883543 + kind: STRUCT + name: "task_dma_buf_info" + definition { + bytesize: 48 + member_id: 0x10789eb1 + member_id: 0x027688eb + member_id: 0xb7dcf16e + member_id: 0x2d1fe165 + member_id: 0x8136732c + member_id: 0x082d878d + } +} struct_union { id: 0x84d533ac kind: STRUCT @@ -273447,7 +273639,7 @@ struct_union { member_id: 0xedf50eb4 member_id: 0xd666c042 member_id: 0x9a3ad2fc - member_id: 0x2d081365 + member_id: 0x355c1a4d member_id: 0x63760904 member_id: 0xac894e01 member_id: 0xe0f6361e @@ -305362,6 +305554,12 @@ function { parameter_id: 0x21d7b2e4 parameter_id: 0x0c3d2d88 } +function { + id: 0x09df0d20 + return_type_id: 0x48b5725f + parameter_id: 0x6720d32f + parameter_id: 0x23da1e1b +} function { id: 0x09fe2e07 return_type_id: 0x33f8b54b @@ -305615,6 +305813,11 @@ function { parameter_id: 0x397d00ab parameter_id: 0x6720d32f } +function { + id: 0x0ef7ccf3 + return_type_id: 0x48b5725f + parameter_id: 0x79be7582 +} function { id: 0x0f2f7206 return_type_id: 0x17fa285b @@ -306743,6 +306946,12 @@ function { parameter_id: 0x6720d32f parameter_id: 0x6d7f5ff6 } +function { + id: 0x10ff6b9a + return_type_id: 0x48b5725f + parameter_id: 0x0258f96e + parameter_id: 0x3c4114bd +} function { id: 0x10ffd8df return_type_id: 0x48b5725f @@ -309753,6 +309962,13 @@ function { parameter_id: 0x3e10b518 parameter_id: 0x1b44744f } +function { + id: 0x150da89f + return_type_id: 0x48b5725f + parameter_id: 0x1042c9d1 + parameter_id: 0x6720d32f + parameter_id: 0x6720d32f +} function { id: 0x150f2137 return_type_id: 0x48b5725f @@ -310829,6 +311045,11 @@ function { parameter_id: 0x3e10b518 parameter_id: 0xf435685e } +function { + id: 0x169466ca + return_type_id: 0x48b5725f + parameter_id: 0x1830dd64 +} function { id: 0x169718bc return_type_id: 0x48b5725f @@ -314043,6 +314264,12 @@ function { return_type_id: 0x48b5725f parameter_id: 0x2c518f27 } +function { + id: 0x1b8fc015 + return_type_id: 0x48b5725f + parameter_id: 0x2e94a1e1 + parameter_id: 0x2cae7fa9 +} function { id: 0x1b90a8d8 return_type_id: 0x48b5725f @@ -314676,6 +314903,12 @@ function { parameter_id: 0x274194ac parameter_id: 0x2e18f543 } +function { + id: 0x1c56d411 + return_type_id: 0x48b5725f + parameter_id: 0x33756485 + parameter_id: 0x04f728c1 +} function { id: 0x1c5815c3 return_type_id: 0x48b5725f @@ -317728,6 +317961,12 @@ function { parameter_id: 0x4585663f parameter_id: 0x4585663f } +function { + id: 0x1fadab22 + return_type_id: 0x48b5725f + parameter_id: 0x3f84ee3c + parameter_id: 0x35304fb1 +} function { id: 0x1fae5edf return_type_id: 0x48b5725f @@ -317758,6 +317997,12 @@ function { return_type_id: 0x48b5725f parameter_id: 0x3ca2533c } +function { + id: 0x1fb4bf8e + return_type_id: 0x48b5725f + parameter_id: 0x3f84ee3c + parameter_id: 0x33756485 +} function { id: 0x1fb5d3a2 return_type_id: 0x48b5725f @@ -318963,6 +319208,14 @@ function { return_type_id: 0x33756485 parameter_id: 0x15a30023 } +function { + id: 0x34f12b6b + return_type_id: 0xe62ebf07 + parameter_id: 0x3c497d0a + parameter_id: 0x00c72527 + parameter_id: 0x4585663f + parameter_id: 0x4585663f +} function { id: 0x34f176e3 return_type_id: 0x33756485 @@ -320200,6 +320453,14 @@ function { return_type_id: 0x18bd6530 parameter_id: 0x4585663f } +function { + id: 0x4812e6a9 + return_type_id: 0x35304fb1 + parameter_id: 0x3f84ee3c + parameter_id: 0x33756485 + parameter_id: 0x33756485 + parameter_id: 0x6d7f5ff6 +} function { id: 0x48135354 return_type_id: 0x35304fb1 @@ -320458,6 +320719,11 @@ function { parameter_id: 0x18bd6530 parameter_id: 0x6720d32f } +function { + id: 0x4f10276c + return_type_id: 0x213700a8 + parameter_id: 0x0258f96e +} function { id: 0x4f33ba92 return_type_id: 0x21003da7 @@ -322290,6 +322556,11 @@ function { return_type_id: 0x2cae7fa9 parameter_id: 0x0258f96e } +function { + id: 0x636e6c75 + return_type_id: 0x38b136d3 + parameter_id: 0x2309ad3e +} function { id: 0x637004ab return_type_id: 0x1582ab06 @@ -322856,6 +323127,13 @@ function { parameter_id: 0x32caaf24 parameter_id: 0x1bf16028 } +function { + id: 0x6f9910f4 + return_type_id: 0x1d44326e + parameter_id: 0x0ca27481 + parameter_id: 0x3e6396e0 + parameter_id: 0x3399c453 +} function { id: 0x6f9cf068 return_type_id: 0x1d44326e @@ -327376,6 +327654,12 @@ function { parameter_id: 0x4585663f parameter_id: 0xf1a6dfed } +function { + id: 0x91bef2c3 + return_type_id: 0x6720d32f + parameter_id: 0x33756485 + parameter_id: 0x38d23361 +} function { id: 0x91bf7954 return_type_id: 0x6720d32f @@ -333716,6 +334000,13 @@ function { parameter_id: 0x2bc93f35 parameter_id: 0x0258f96e } +function { + id: 0x977d7b37 + return_type_id: 0x6720d32f + parameter_id: 0x28350343 + parameter_id: 0x33756485 + parameter_id: 0xf435685e +} function { id: 0x977e98cb return_type_id: 0x6720d32f @@ -334707,6 +334998,17 @@ function { parameter_id: 0x27a7c613 parameter_id: 0x27a7c613 } +function { + id: 0x98787515 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0xf1a6dfed + parameter_id: 0x4585663f + parameter_id: 0x0d3144be + parameter_id: 0x06cbb7fd + parameter_id: 0x0850608e + parameter_id: 0x1bf16028 +} function { id: 0x98788d90 return_type_id: 0x6720d32f @@ -336915,6 +337217,15 @@ function { parameter_id: 0x1d19a9d5 parameter_id: 0x13580d6c } +function { + id: 0x9a2ab257 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x6720d32f + parameter_id: 0x6720d32f + parameter_id: 0x11cfee5a + parameter_id: 0x11cfee5a +} function { id: 0x9a2ab624 return_type_id: 0x6720d32f @@ -337655,6 +337966,15 @@ function { parameter_id: 0x4585663f parameter_id: 0x33756485 } +function { + id: 0x9aa1ff3f + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x4585663f + parameter_id: 0x6720d32f + parameter_id: 0x6720d32f + parameter_id: 0x11cfee5a +} function { id: 0x9aa2d024 return_type_id: 0x6720d32f @@ -339151,6 +339471,15 @@ function { parameter_id: 0x00c72527 parameter_id: 0x13580d6c } +function { + id: 0x9b430741 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x39182992 + parameter_id: 0x2170d06d + parameter_id: 0x2170d06d + parameter_id: 0x11cfee5a +} function { id: 0x9b43a9b0 return_type_id: 0x6720d32f @@ -339277,6 +339606,12 @@ function { parameter_id: 0x38fa32ef parameter_id: 0x13580d6c } +function { + id: 0x9b48ef76 + return_type_id: 0x6720d32f + parameter_id: 0x1830dd64 + parameter_id: 0x310ec01d +} function { id: 0x9b49a977 return_type_id: 0x6720d32f @@ -340167,6 +340502,19 @@ function { parameter_id: 0x3e10b518 parameter_id: 0xf435685e } +function { + id: 0x9b6c13bd + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x33756485 + parameter_id: 0x39182992 + parameter_id: 0x0d164218 + parameter_id: 0x3e6239e1 + parameter_id: 0x6720d32f + parameter_id: 0x6d7f5ff6 + parameter_id: 0x064d6086 + parameter_id: 0x064d6086 +} function { id: 0x9b6cf0a4 return_type_id: 0x6720d32f @@ -340625,6 +340973,21 @@ function { parameter_id: 0x3caf1899 parameter_id: 0x0a2e9ae5 } +function { + id: 0x9b82ade6 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x0a63398f + parameter_id: 0x13580d6c + parameter_id: 0x13580d6c +} +function { + id: 0x9b82e086 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x0a63398f + parameter_id: 0x13580d6c +} function { id: 0x9b83293d return_type_id: 0x6720d32f @@ -340821,6 +341184,12 @@ function { parameter_id: 0x36311c57 parameter_id: 0x18bd6530 } +function { + id: 0x9b8cc3ad + return_type_id: 0x6720d32f + parameter_id: 0x18150d9f + parameter_id: 0x0258f96e +} function { id: 0x9b8e1adc return_type_id: 0x6720d32f @@ -340945,6 +341314,13 @@ function { parameter_id: 0x92233392 parameter_id: 0x92233392 } +function { + id: 0x9b93638b + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x0dd653d2 + parameter_id: 0x2e029f76 +} function { id: 0x9b938987 return_type_id: 0x6720d32f @@ -340997,6 +341373,15 @@ function { parameter_id: 0x0ed82db2 parameter_id: 0x082be49e } +function { + id: 0x9b973ccf + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x0a63398f + parameter_id: 0x4585663f + parameter_id: 0x1bf16028 + parameter_id: 0x13580d6c +} function { id: 0x9b974729 return_type_id: 0x6720d32f @@ -341906,6 +342291,17 @@ function { parameter_id: 0x11cffa09 parameter_id: 0x92233392 } +function { + id: 0x9bbbc132 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x0a63398f + parameter_id: 0xf1a6dfed + parameter_id: 0x6720d32f + parameter_id: 0x0b0164a7 + parameter_id: 0x11cfee5a + parameter_id: 0x11cfee5a +} function { id: 0x9bbc66c0 return_type_id: 0x6720d32f @@ -353445,6 +353841,23 @@ function { parameter_id: 0x18a2fb63 parameter_id: 0x4585663f } +function { + id: 0xe10f8963 + return_type_id: 0x2eab5b8a + parameter_id: 0x0de4c6b1 + parameter_id: 0xfa5f37bb + parameter_id: 0x4585663f + parameter_id: 0x4585663f +} +function { + id: 0xe10f9802 + return_type_id: 0x2eab5b8a + parameter_id: 0x0de4c6b1 + parameter_id: 0xfa5f37bb + parameter_id: 0x4585663f + parameter_id: 0x4585663f + parameter_id: 0x4585663f +} function { id: 0xe12a6d4e return_type_id: 0x07450c2a @@ -354176,6 +354589,11 @@ function { parameter_id: 0x3fa672fd parameter_id: 0x3e10b518 } +function { + id: 0xf1b60446 + return_type_id: 0x6d7f5ff6 + parameter_id: 0x3c497d0a +} function { id: 0xf1c60201 return_type_id: 0x6d7f5ff6 @@ -354612,6 +355030,11 @@ function { parameter_id: 0x3399c453 parameter_id: 0x6720d32f } +function { + id: 0xf3ed77aa + return_type_id: 0x0756289d + parameter_id: 0x12e6ffae +} function { id: 0xf3ef3eef return_type_id: 0x0b7f62fc @@ -357277,6 +357700,15 @@ elf_symbol { type_id: 0xf74d9260 full_name: "__folio_start_writeback" } +elf_symbol { + id: 0xcd2cca92 + name: "__free_iova" + is_defined: true + symbol_type: FUNCTION + crc: 0xb3a0080d + type_id: 0x1fadab22 + full_name: "__free_iova" +} elf_symbol { id: 0x5b1ea047 name: "__free_pages" @@ -361835,6 +362267,15 @@ elf_symbol { type_id: 0x9bcd4ff7 full_name: "__traceiter_android_vh_check_uninterruptible_tasks_dn" } +elf_symbol { + id: 0x253229ea + name: "__traceiter_android_vh_chk_task" + is_defined: true + symbol_type: FUNCTION + crc: 0xa543dc37 + type_id: 0x9b93638b + full_name: "__traceiter_android_vh_chk_task" +} elf_symbol { id: 0xef7737f8 name: "__traceiter_android_vh_cleanup_old_buffers_bypass" @@ -363491,6 +363932,78 @@ elf_symbol { type_id: 0x9b883042 full_name: "__traceiter_android_vh_mm_compaction_end" } +elf_symbol { + id: 0x28fd3694 + name: "__traceiter_android_vh_mm_customize_ac" + is_defined: true + symbol_type: FUNCTION + crc: 0xc217cd07 + type_id: 0x98787515 + full_name: "__traceiter_android_vh_mm_customize_ac" +} +elf_symbol { + id: 0x7a520c7e + name: "__traceiter_android_vh_mm_customize_file_is_tiny" + is_defined: true + symbol_type: FUNCTION + crc: 0x2a03eb45 + type_id: 0x9aa1ff3f + full_name: "__traceiter_android_vh_mm_customize_file_is_tiny" +} +elf_symbol { + id: 0x4f807371 + name: "__traceiter_android_vh_mm_customize_lru_add_dst" + is_defined: true + symbol_type: FUNCTION + crc: 0x973264a8 + type_id: 0x9b430741 + full_name: "__traceiter_android_vh_mm_customize_lru_add_dst" +} +elf_symbol { + id: 0x7637ee8f + name: "__traceiter_android_vh_mm_customize_pgdat_balanced" + is_defined: true + symbol_type: FUNCTION + crc: 0x86a9060f + type_id: 0x9a2ab257 + full_name: "__traceiter_android_vh_mm_customize_pgdat_balanced" +} +elf_symbol { + id: 0x46918874 + name: "__traceiter_android_vh_mm_customize_rmqueue" + is_defined: true + symbol_type: FUNCTION + crc: 0x18e2077a + type_id: 0x9b973ccf + full_name: "__traceiter_android_vh_mm_customize_rmqueue" +} +elf_symbol { + id: 0x0b6a1d80 + name: "__traceiter_android_vh_mm_customize_suitable_zone" + is_defined: true + symbol_type: FUNCTION + crc: 0xc42fa581 + type_id: 0x9bbbc132 + full_name: "__traceiter_android_vh_mm_customize_suitable_zone" +} +elf_symbol { + id: 0x2be8352c + name: "__traceiter_android_vh_mm_customize_zone_max_order" + is_defined: true + symbol_type: FUNCTION + crc: 0xf1dc287d + type_id: 0x9b82e086 + full_name: "__traceiter_android_vh_mm_customize_zone_max_order" +} +elf_symbol { + id: 0xa792d107 + name: "__traceiter_android_vh_mm_customize_zone_pageset" + is_defined: true + symbol_type: FUNCTION + crc: 0xbdbf230e + type_id: 0x9b82ade6 + full_name: "__traceiter_android_vh_mm_customize_zone_pageset" +} elf_symbol { id: 0x9f58159a name: "__traceiter_android_vh_mm_direct_reclaim_enter" @@ -363518,6 +364031,15 @@ elf_symbol { type_id: 0x9bb5b719 full_name: "__traceiter_android_vh_mm_free_page" } +elf_symbol { + id: 0x945dd126 + name: "__traceiter_android_vh_mm_isolate_priv_lru" + is_defined: true + symbol_type: FUNCTION + crc: 0x8449665c + type_id: 0x9b6c13bd + full_name: "__traceiter_android_vh_mm_isolate_priv_lru" +} elf_symbol { id: 0x6f5c8275 name: "__traceiter_android_vh_mm_kcompactd_cpu_online" @@ -364148,6 +364670,15 @@ elf_symbol { type_id: 0x9b49a977 full_name: "__traceiter_android_vh_ptype_head" } +elf_symbol { + id: 0x1adac14e + name: "__traceiter_android_vh_put_task" + is_defined: true + symbol_type: FUNCTION + crc: 0x591f8208 + type_id: 0x9bdbdcc4 + full_name: "__traceiter_android_vh_put_task" +} elf_symbol { id: 0x2c963d28 name: "__traceiter_android_vh_queue_request_and_unlock" @@ -368954,6 +369485,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_check_uninterruptible_tasks_dn" } +elf_symbol { + id: 0xc974433c + name: "__tracepoint_android_vh_chk_task" + is_defined: true + symbol_type: OBJECT + crc: 0x32ced20a + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_chk_task" +} elf_symbol { id: 0xca10f06e name: "__tracepoint_android_vh_cleanup_old_buffers_bypass" @@ -370610,6 +371150,78 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_mm_compaction_end" } +elf_symbol { + id: 0xd947b2e6 + name: "__tracepoint_android_vh_mm_customize_ac" + is_defined: true + symbol_type: OBJECT + crc: 0x3f179df5 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_ac" +} +elf_symbol { + id: 0x2e4d22dc + name: "__tracepoint_android_vh_mm_customize_file_is_tiny" + is_defined: true + symbol_type: OBJECT + crc: 0xe3958935 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_file_is_tiny" +} +elf_symbol { + id: 0x95745257 + name: "__tracepoint_android_vh_mm_customize_lru_add_dst" + is_defined: true + symbol_type: OBJECT + crc: 0x3aecb3cb + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_lru_add_dst" +} +elf_symbol { + id: 0x9ba36d51 + name: "__tracepoint_android_vh_mm_customize_pgdat_balanced" + is_defined: true + symbol_type: OBJECT + crc: 0x9ff913d9 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_pgdat_balanced" +} +elf_symbol { + id: 0x19f24652 + name: "__tracepoint_android_vh_mm_customize_rmqueue" + is_defined: true + symbol_type: OBJECT + crc: 0xa845e3ff + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_rmqueue" +} +elf_symbol { + id: 0x94940652 + name: "__tracepoint_android_vh_mm_customize_suitable_zone" + is_defined: true + symbol_type: OBJECT + crc: 0x2252b177 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_suitable_zone" +} +elf_symbol { + id: 0x4c4316c6 + name: "__tracepoint_android_vh_mm_customize_zone_max_order" + is_defined: true + symbol_type: OBJECT + crc: 0xfb3906ea + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_zone_max_order" +} +elf_symbol { + id: 0xc25513c9 + name: "__tracepoint_android_vh_mm_customize_zone_pageset" + is_defined: true + symbol_type: OBJECT + crc: 0x94a19f6a + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_customize_zone_pageset" +} elf_symbol { id: 0xd333a65c name: "__tracepoint_android_vh_mm_direct_reclaim_enter" @@ -370637,6 +371249,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_mm_free_page" } +elf_symbol { + id: 0xba9e92f8 + name: "__tracepoint_android_vh_mm_isolate_priv_lru" + is_defined: true + symbol_type: OBJECT + crc: 0x3e8c5eb2 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_mm_isolate_priv_lru" +} elf_symbol { id: 0x0f593caf name: "__tracepoint_android_vh_mm_kcompactd_cpu_online" @@ -371267,6 +371888,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_ptype_head" } +elf_symbol { + id: 0x74120954 + name: "__tracepoint_android_vh_put_task" + is_defined: true + symbol_type: OBJECT + crc: 0xd1ab6463 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_put_task" +} elf_symbol { id: 0xf04b1c62 name: "__tracepoint_android_vh_queue_request_and_unlock" @@ -375011,6 +375641,15 @@ elf_symbol { type_id: 0xca4b711f full_name: "alloc_io_pgtable_ops" } +elf_symbol { + id: 0x82a5f160 + name: "alloc_iova" + is_defined: true + symbol_type: FUNCTION + crc: 0xfd3617ee + type_id: 0x4812e6a9 + full_name: "alloc_iova" +} elf_symbol { id: 0x9cff6ebf name: "alloc_iova_fast" @@ -375220,6 +375859,15 @@ elf_symbol { type_id: 0x33756485 full_name: "arch_freq_scale" } +elf_symbol { + id: 0x97ee5cc4 + name: "arch_invalidate_pmem" + is_defined: true + symbol_type: FUNCTION + crc: 0x2c635527 + type_id: 0x1567dd7e + full_name: "arch_invalidate_pmem" +} elf_symbol { id: 0x0c805860 name: "arch_stack_walk" @@ -375361,6 +376009,15 @@ elf_symbol { type_id: 0xd473b4c2 full_name: "async_schedule_node_domain" } +elf_symbol { + id: 0xa09651b4 + name: "async_synchronize_cookie" + is_defined: true + symbol_type: FUNCTION + crc: 0x7a81541b + type_id: 0x0ef7ccf3 + full_name: "async_synchronize_cookie" +} elf_symbol { id: 0x67de90c8 name: "async_synchronize_full_domain" @@ -380302,6 +380959,15 @@ elf_symbol { type_id: 0x10985193 full_name: "cpuidle_pause_and_lock" } +elf_symbol { + id: 0x3a3b1479 + name: "cpuidle_register" + is_defined: true + symbol_type: FUNCTION + crc: 0xd93d8695 + type_id: 0x9b48ef76 + full_name: "cpuidle_register" +} elf_symbol { id: 0x9b056e94 name: "cpuidle_register_governor" @@ -380320,6 +380986,15 @@ elf_symbol { type_id: 0x10985193 full_name: "cpuidle_resume_and_unlock" } +elf_symbol { + id: 0x92dc6c49 + name: "cpuidle_unregister" + is_defined: true + symbol_type: FUNCTION + crc: 0x39e6940e + type_id: 0x169466ca + full_name: "cpuidle_unregister" +} elf_symbol { id: 0xd878ab56 name: "cpumask_any_and_distribute" @@ -382433,6 +383108,15 @@ elf_symbol { type_id: 0x1c76bf87 full_name: "dev_pm_domain_detach_list" } +elf_symbol { + id: 0x5647272b + name: "dev_pm_domain_set" + is_defined: true + symbol_type: FUNCTION + crc: 0x8e3b0cf7 + type_id: 0x10ff6b9a + full_name: "dev_pm_domain_set" +} elf_symbol { id: 0x6ef38ba9 name: "dev_pm_genpd_add_notifier" @@ -389116,6 +389800,15 @@ elf_symbol { type_id: 0xf06a3ce3 full_name: "drm_edid_is_valid" } +elf_symbol { + id: 0xd1f29cd4 + name: "drm_edid_override_connector_update" + is_defined: true + symbol_type: FUNCTION + crc: 0x3e20bf8a + type_id: 0x91ed8ab4 + full_name: "drm_edid_override_connector_update" +} elf_symbol { id: 0x2396487e name: "drm_edid_raw" @@ -389188,6 +389881,15 @@ elf_symbol { type_id: 0x930bf787 full_name: "drm_event_reserve_init_locked" } +elf_symbol { + id: 0x2027d999 + name: "drm_file_get_master" + is_defined: true + symbol_type: FUNCTION + crc: 0x2765be64 + type_id: 0xf3ed77aa + full_name: "drm_file_get_master" +} elf_symbol { id: 0x9d83d0f8 name: "drm_flip_work_cleanup" @@ -390214,6 +390916,15 @@ elf_symbol { type_id: 0x92c9aa18 full_name: "drm_mode_create_dp_colorspace_property" } +elf_symbol { + id: 0x465f41c2 + name: "drm_mode_create_dvi_i_properties" + is_defined: true + symbol_type: FUNCTION + crc: 0x57e7bcfa + type_id: 0x9341cc84 + full_name: "drm_mode_create_dvi_i_properties" +} elf_symbol { id: 0xbe3f1757 name: "drm_mode_create_hdmi_colorspace_property" @@ -393504,6 +394215,15 @@ elf_symbol { type_id: 0x165a4073 full_name: "free_io_pgtable_ops" } +elf_symbol { + id: 0x25649749 + name: "free_iova" + is_defined: true + symbol_type: FUNCTION + crc: 0x42e921d3 + type_id: 0x1fb4bf8e + full_name: "free_iova" +} elf_symbol { id: 0x998ad938 name: "free_iova_fast" @@ -396900,6 +397620,15 @@ elf_symbol { type_id: 0x9d398c85 full_name: "hid_driver_suspend" } +elf_symbol { + id: 0x74041f0e + name: "hid_field_extract" + is_defined: true + symbol_type: FUNCTION + crc: 0x59849d89 + type_id: 0x34f12b6b + full_name: "hid_field_extract" +} elf_symbol { id: 0x8717f26f name: "hid_hw_close" @@ -396981,6 +397710,15 @@ elf_symbol { type_id: 0x9d13a27c full_name: "hid_input_report" } +elf_symbol { + id: 0xbfab1179 + name: "hid_is_usb" + is_defined: true + symbol_type: FUNCTION + crc: 0xe0a3d0c1 + type_id: 0xf1b60446 + full_name: "hid_is_usb" +} elf_symbol { id: 0x0eb11e95 name: "hid_open_report" @@ -396999,6 +397737,24 @@ elf_symbol { type_id: 0x9e711486 full_name: "hid_parse_report" } +elf_symbol { + id: 0x88241dc1 + name: "hid_register_report" + is_defined: true + symbol_type: FUNCTION + crc: 0x665f4981 + type_id: 0xe10f8963 + full_name: "hid_register_report" +} +elf_symbol { + id: 0x741d5af7 + name: "hid_report_raw_event" + is_defined: true + symbol_type: FUNCTION + crc: 0xee30c096 + type_id: 0x9d13a27c + full_name: "hid_report_raw_event" +} elf_symbol { id: 0x6e37f09d name: "hid_unregister_driver" @@ -397008,6 +397764,15 @@ elf_symbol { type_id: 0x104da524 full_name: "hid_unregister_driver" } +elf_symbol { + id: 0x29d6d842 + name: "hid_validate_values" + is_defined: true + symbol_type: FUNCTION + crc: 0xc1730de5 + type_id: 0xe10f9802 + full_name: "hid_validate_values" +} elf_symbol { id: 0x334ff68f name: "high_memory" @@ -397017,6 +397782,15 @@ elf_symbol { type_id: 0x18bd6530 full_name: "high_memory" } +elf_symbol { + id: 0xd12f6730 + name: "host1x_context_device_bus_type" + is_defined: true + symbol_type: OBJECT + crc: 0x19002790 + type_id: 0x257935aa + full_name: "host1x_context_device_bus_type" +} elf_symbol { id: 0xd7ecf501 name: "housekeeping_cpumask" @@ -399192,6 +399966,15 @@ elf_symbol { type_id: 0xfab4e970 full_name: "input_device_enabled" } +elf_symbol { + id: 0x594eb27d + name: "input_enable_softrepeat" + is_defined: true + symbol_type: FUNCTION + crc: 0x85a146c6 + type_id: 0x150da89f + full_name: "input_enable_softrepeat" +} elf_symbol { id: 0x1cfb5d03 name: "input_event" @@ -399696,6 +400479,15 @@ elf_symbol { type_id: 0x1b338a63 full_name: "iommu_detach_device_pasid" } +elf_symbol { + id: 0xc578c7af + name: "iommu_detach_group" + is_defined: true + symbol_type: FUNCTION + crc: 0xf1ed8a55 + type_id: 0x1b8fc015 + full_name: "iommu_detach_group" +} elf_symbol { id: 0x4c05b91e name: "iommu_dev_disable_feature" @@ -400155,6 +400947,24 @@ elf_symbol { type_id: 0xaa8f5c2d full_name: "iov_iter_zero" } +elf_symbol { + id: 0x20af6f53 + name: "iova_cache_get" + is_defined: true + symbol_type: FUNCTION + crc: 0x438d8df2 + type_id: 0x9d80e32f + full_name: "iova_cache_get" +} +elf_symbol { + id: 0x3f2fa978 + name: "iova_cache_put" + is_defined: true + symbol_type: FUNCTION + crc: 0xc7061ef3 + type_id: 0x10985193 + full_name: "iova_cache_put" +} elf_symbol { id: 0x6c6beb07 name: "iova_domain_init_rcaches" @@ -401266,6 +402076,15 @@ elf_symbol { type_id: 0xc20627da full_name: "jiffies_to_msecs" } +elf_symbol { + id: 0xaccdc826 + name: "jiffies_to_timespec64" + is_defined: true + symbol_type: FUNCTION + crc: 0x188ea314 + type_id: 0x1c56d411 + full_name: "jiffies_to_timespec64" +} elf_symbol { id: 0x81dadb36 name: "jiffies_to_usecs" @@ -407019,6 +407838,24 @@ elf_symbol { type_id: 0x9e768b87 full_name: "nf_register_net_hooks" } +elf_symbol { + id: 0x7d136425 + name: "nf_sk_lookup_slow_v4" + is_defined: true + symbol_type: FUNCTION + crc: 0xd62cd0ac + type_id: 0x6f9910f4 + full_name: "nf_sk_lookup_slow_v4" +} +elf_symbol { + id: 0xfe90bb40 + name: "nf_sk_lookup_slow_v6" + is_defined: true + symbol_type: FUNCTION + crc: 0x514b71c5 + type_id: 0x6f9910f4 + full_name: "nf_sk_lookup_slow_v6" +} elf_symbol { id: 0x840dad92 name: "nf_unregister_net_hook" @@ -407317,6 +408154,15 @@ elf_symbol { type_id: 0x152401b7 full_name: "nsecs_to_jiffies" } +elf_symbol { + id: 0x03adeb35 + name: "nsecs_to_jiffies64" + is_defined: true + symbol_type: FUNCTION + crc: 0x75d0deb9 + type_id: 0x73531faf + full_name: "nsecs_to_jiffies64" +} elf_symbol { id: 0x77722cfe name: "nvdimm_bus_register" @@ -409922,6 +410768,15 @@ elf_symbol { type_id: 0x04ce0b6f full_name: "pci_find_ext_capability" } +elf_symbol { + id: 0x780d1110 + name: "pci_find_host_bridge" + is_defined: true + symbol_type: FUNCTION + crc: 0x2e22e561 + type_id: 0x636e6c75 + full_name: "pci_find_host_bridge" +} elf_symbol { id: 0xbbb674cc name: "pci_find_next_bus" @@ -410030,6 +410885,15 @@ elf_symbol { type_id: 0x93acae9b full_name: "pci_host_probe" } +elf_symbol { + id: 0x64fb0865 + name: "pci_ignore_hotplug" + is_defined: true + symbol_type: FUNCTION + crc: 0x8723eb68 + type_id: 0x14e1f000 + full_name: "pci_ignore_hotplug" +} elf_symbol { id: 0x9c6c58ea name: "pci_iomap" @@ -410642,6 +411506,15 @@ elf_symbol { type_id: 0x7eefaca3 full_name: "pcie_link_speed" } +elf_symbol { + id: 0x912e005a + name: "pcie_reset_flr" + is_defined: true + symbol_type: FUNCTION + crc: 0x3f585be2 + type_id: 0x984cbfc3 + full_name: "pcie_reset_flr" +} elf_symbol { id: 0xffa3ecd1 name: "pcie_set_mps" @@ -412865,6 +413738,33 @@ elf_symbol { type_id: 0x9d16dd74 full_name: "pm_clk_suspend" } +elf_symbol { + id: 0xfa3ae6ff + name: "pm_generic_freeze" + is_defined: true + symbol_type: FUNCTION + crc: 0xaa59b3a8 + type_id: 0x9d16dd74 + full_name: "pm_generic_freeze" +} +elf_symbol { + id: 0xd43b4f66 + name: "pm_generic_poweroff" + is_defined: true + symbol_type: FUNCTION + crc: 0x6e5c45fc + type_id: 0x9d16dd74 + full_name: "pm_generic_poweroff" +} +elf_symbol { + id: 0xfb61e52a + name: "pm_generic_restore" + is_defined: true + symbol_type: FUNCTION + crc: 0x758be1ec + type_id: 0x9d16dd74 + full_name: "pm_generic_restore" +} elf_symbol { id: 0xe243fac9 name: "pm_generic_resume" @@ -412901,6 +413801,15 @@ elf_symbol { type_id: 0x9d16dd74 full_name: "pm_generic_suspend" } +elf_symbol { + id: 0xcae30dd3 + name: "pm_generic_thaw" + is_defined: true + symbol_type: FUNCTION + crc: 0xc6da423b + type_id: 0x9d16dd74 + full_name: "pm_generic_thaw" +} elf_symbol { id: 0x92ae3cd2 name: "pm_genpd_add_device" @@ -413387,6 +414296,15 @@ elf_symbol { type_id: 0x9d80e32f full_name: "power_supply_is_system_supplied" } +elf_symbol { + id: 0x778ba508 + name: "power_supply_powers" + is_defined: true + symbol_type: FUNCTION + crc: 0x974bc8a3 + type_id: 0x9b8cc3ad + full_name: "power_supply_powers" +} elf_symbol { id: 0x26c5a1b9 name: "power_supply_put" @@ -416520,6 +417438,15 @@ elf_symbol { type_id: 0x9285caa7 full_name: "reset_control_assert" } +elf_symbol { + id: 0x3fb5c5d4 + name: "reset_control_bulk_acquire" + is_defined: true + symbol_type: FUNCTION + crc: 0x0207a6c6 + type_id: 0x84c7bf9c + full_name: "reset_control_bulk_acquire" +} elf_symbol { id: 0xace4bcdf name: "reset_control_bulk_assert" @@ -416538,6 +417465,15 @@ elf_symbol { type_id: 0x84c7bf9c full_name: "reset_control_bulk_deassert" } +elf_symbol { + id: 0x1a32d2b3 + name: "reset_control_bulk_release" + is_defined: true + symbol_type: FUNCTION + crc: 0xf4cd9f8f + type_id: 0x09df0d20 + full_name: "reset_control_bulk_release" +} elf_symbol { id: 0xd76b82b2 name: "reset_control_deassert" @@ -426178,6 +427114,15 @@ elf_symbol { type_id: 0x19c5ab78 full_name: "tegra_bpmp_free_mrq" } +elf_symbol { + id: 0x4a1dceb2 + name: "tegra_bpmp_get" + is_defined: true + symbol_type: FUNCTION + crc: 0x066facf2 + type_id: 0x4f10276c + full_name: "tegra_bpmp_get" +} elf_symbol { id: 0xa2565005 name: "tegra_bpmp_mrq_is_supported" @@ -426223,6 +427168,15 @@ elf_symbol { type_id: 0x9548acb9 full_name: "tegra_bpmp_transfer_atomic" } +elf_symbol { + id: 0x3c42a066 + name: "tegra_fuse_readl" + is_defined: true + symbol_type: FUNCTION + crc: 0xa9ed62d2 + type_id: 0x91bef2c3 + full_name: "tegra_fuse_readl" +} elf_symbol { id: 0x07f159e7 name: "tegra_ivc_init" @@ -436372,6 +437326,15 @@ elf_symbol { type_id: 0x80d705f2 full_name: "zlib_inflate" } +elf_symbol { + id: 0x84c3c252 + name: "zlib_inflateEnd" + is_defined: true + symbol_type: FUNCTION + crc: 0x107e5878 + type_id: 0x814b86be + full_name: "zlib_inflateEnd" +} elf_symbol { id: 0xf6eda681 name: "zlib_inflateIncomp" @@ -436749,6 +437712,7 @@ interface { symbol_id: 0x69ff7fd9 symbol_id: 0xebf4b11f symbol_id: 0xb8e29ef0 + symbol_id: 0xcd2cca92 symbol_id: 0x5b1ea047 symbol_id: 0x07f88ce8 symbol_id: 0xe458ae39 @@ -437255,6 +438219,7 @@ interface { symbol_id: 0xeebf3d23 symbol_id: 0xa2224fa6 symbol_id: 0xd37cc550 + symbol_id: 0x253229ea symbol_id: 0xef7737f8 symbol_id: 0xe50d4e50 symbol_id: 0xaed0a325 @@ -437439,9 +438404,18 @@ interface { symbol_id: 0xf8413699 symbol_id: 0x22de652b symbol_id: 0xf928bf8a + symbol_id: 0x28fd3694 + symbol_id: 0x7a520c7e + symbol_id: 0x4f807371 + symbol_id: 0x7637ee8f + symbol_id: 0x46918874 + symbol_id: 0x0b6a1d80 + symbol_id: 0x2be8352c + symbol_id: 0xa792d107 symbol_id: 0x9f58159a symbol_id: 0x29c67d40 symbol_id: 0x3fe16974 + symbol_id: 0x945dd126 symbol_id: 0x6f5c8275 symbol_id: 0xf182fb15 symbol_id: 0xe44dacb1 @@ -437512,6 +438486,7 @@ interface { symbol_id: 0xf2c39651 symbol_id: 0x3c69cece symbol_id: 0x93303c51 + symbol_id: 0x1adac14e symbol_id: 0x2c963d28 symbol_id: 0x3a545b61 symbol_id: 0x96662dde @@ -438046,6 +439021,7 @@ interface { symbol_id: 0x22653fb1 symbol_id: 0x2cf5984c symbol_id: 0x62b6878e + symbol_id: 0xc974433c symbol_id: 0xca10f06e symbol_id: 0xe8cdcd02 symbol_id: 0xbbfbc9db @@ -438230,9 +439206,18 @@ interface { symbol_id: 0xb32b3b17 symbol_id: 0xa3d5f70d symbol_id: 0x72c79d80 + symbol_id: 0xd947b2e6 + symbol_id: 0x2e4d22dc + symbol_id: 0x95745257 + symbol_id: 0x9ba36d51 + symbol_id: 0x19f24652 + symbol_id: 0x94940652 + symbol_id: 0x4c4316c6 + symbol_id: 0xc25513c9 symbol_id: 0xd333a65c symbol_id: 0xddcff44a symbol_id: 0x533ca98e + symbol_id: 0xba9e92f8 symbol_id: 0x0f593caf symbol_id: 0x47bcd15f symbol_id: 0xb6da564f @@ -438303,6 +439288,7 @@ interface { symbol_id: 0x0e92ee53 symbol_id: 0xe7b3cb34 symbol_id: 0xb0c197a3 + symbol_id: 0x74120954 symbol_id: 0xf04b1c62 symbol_id: 0x811d5fab symbol_id: 0x2d9a331c @@ -438719,6 +439705,7 @@ interface { symbol_id: 0x962c959a symbol_id: 0xdeecf7a4 symbol_id: 0xeba70df6 + symbol_id: 0x82a5f160 symbol_id: 0x9cff6ebf symbol_id: 0xcf6df527 symbol_id: 0x00374dfe @@ -438742,6 +439729,7 @@ interface { symbol_id: 0x520c1997 symbol_id: 0x2d3715da symbol_id: 0x7ff45ec2 + symbol_id: 0x97ee5cc4 symbol_id: 0x0c805860 symbol_id: 0xc447be59 symbol_id: 0x1198e2f6 @@ -438758,6 +439746,7 @@ interface { symbol_id: 0xac417394 symbol_id: 0xd9184490 symbol_id: 0xe6ba30b7 + symbol_id: 0xa09651b4 symbol_id: 0x67de90c8 symbol_id: 0x5f6a1554 symbol_id: 0x3beebbde @@ -439307,8 +440296,10 @@ interface { symbol_id: 0x28b04407 symbol_id: 0x474c5b56 symbol_id: 0xb1a9cc86 + symbol_id: 0x3a3b1479 symbol_id: 0x9b056e94 symbol_id: 0x2061f0ca + symbol_id: 0x92dc6c49 symbol_id: 0xd878ab56 symbol_id: 0x4c5ac0a5 symbol_id: 0xd53a5822 @@ -439543,6 +440534,7 @@ interface { symbol_id: 0xdfeb23ad symbol_id: 0xe5fe5ba3 symbol_id: 0x924ff6bb + symbol_id: 0x5647272b symbol_id: 0x6ef38ba9 symbol_id: 0xbd3b590c symbol_id: 0xa7986ac9 @@ -440283,6 +441275,7 @@ interface { symbol_id: 0x3d89aa92 symbol_id: 0xd5cd8e9b symbol_id: 0x903baf1e + symbol_id: 0xd1f29cd4 symbol_id: 0x2396487e symbol_id: 0xf7be00e0 symbol_id: 0x9ff39d6a @@ -440291,6 +441284,7 @@ interface { symbol_id: 0x99beb7c8 symbol_id: 0x0ba86fe5 symbol_id: 0x90dbe313 + symbol_id: 0x2027d999 symbol_id: 0x9d83d0f8 symbol_id: 0x11744540 symbol_id: 0x4b6be7d2 @@ -440405,6 +441399,7 @@ interface { symbol_id: 0xd4a7d3d3 symbol_id: 0x51184dc0 symbol_id: 0xf5b01d8e + symbol_id: 0x465f41c2 symbol_id: 0xbe3f1757 symbol_id: 0xb2afee20 symbol_id: 0x58b810bd @@ -440770,6 +441765,7 @@ interface { symbol_id: 0xb511ce50 symbol_id: 0xef2d8e08 symbol_id: 0x9b815cdc + symbol_id: 0x25649749 symbol_id: 0x998ad938 symbol_id: 0x27e49ce0 symbol_id: 0xa0e729a5 @@ -441146,6 +442142,7 @@ interface { symbol_id: 0x2ffc7c7e symbol_id: 0x1706be22 symbol_id: 0x4c3911f0 + symbol_id: 0x74041f0e symbol_id: 0x8717f26f symbol_id: 0x361004c8 symbol_id: 0xcf5ea9a2 @@ -441155,10 +442152,15 @@ interface { symbol_id: 0x52d444b1 symbol_id: 0x7d0e44ca symbol_id: 0x10de460f + symbol_id: 0xbfab1179 symbol_id: 0x0eb11e95 symbol_id: 0x6ca4d0ed + symbol_id: 0x88241dc1 + symbol_id: 0x741d5af7 symbol_id: 0x6e37f09d + symbol_id: 0x29d6d842 symbol_id: 0x334ff68f + symbol_id: 0xd12f6730 symbol_id: 0xd7ecf501 symbol_id: 0xa5f8942d symbol_id: 0x552bcd06 @@ -441399,6 +442401,7 @@ interface { symbol_id: 0x2336c16f symbol_id: 0x5b69d05f symbol_id: 0x5a86c89e + symbol_id: 0x594eb27d symbol_id: 0x1cfb5d03 symbol_id: 0x60efc3a4 symbol_id: 0xc802de31 @@ -441455,6 +442458,7 @@ interface { symbol_id: 0xe80863b1 symbol_id: 0xd81a7d03 symbol_id: 0x0566bca1 + symbol_id: 0xc578c7af symbol_id: 0x4c05b91e symbol_id: 0xa5459730 symbol_id: 0xe77a1ae2 @@ -441506,6 +442510,8 @@ interface { symbol_id: 0x1c2c7614 symbol_id: 0xc559647d symbol_id: 0x38ac7923 + symbol_id: 0x20af6f53 + symbol_id: 0x3f2fa978 symbol_id: 0x6c6beb07 symbol_id: 0x1be0f35f symbol_id: 0xd9fe9b1d @@ -441629,6 +442635,7 @@ interface { symbol_id: 0x6bcfa61d symbol_id: 0x01438401 symbol_id: 0x9175ce1c + symbol_id: 0xaccdc826 symbol_id: 0x81dadb36 symbol_id: 0x9bfc3a5e symbol_id: 0xc750a072 @@ -442267,6 +443274,8 @@ interface { symbol_id: 0x82e37620 symbol_id: 0x8b1dfb41 symbol_id: 0x43078d96 + symbol_id: 0x7d136425 + symbol_id: 0xfe90bb40 symbol_id: 0x840dad92 symbol_id: 0x619db28f symbol_id: 0x6aeef72a @@ -442300,6 +443309,7 @@ interface { symbol_id: 0xfab02ca8 symbol_id: 0xd7668767 symbol_id: 0x50d8c489 + symbol_id: 0x03adeb35 symbol_id: 0x77722cfe symbol_id: 0x58765106 symbol_id: 0x14f0cfc8 @@ -442589,6 +443599,7 @@ interface { symbol_id: 0x3852868c symbol_id: 0x27f20808 symbol_id: 0x63876663 + symbol_id: 0x780d1110 symbol_id: 0xbbb674cc symbol_id: 0xdea420f5 symbol_id: 0x08190210 @@ -442601,6 +443612,7 @@ interface { symbol_id: 0x9ac8ef20 symbol_id: 0x35c96922 symbol_id: 0xbe6406c3 + symbol_id: 0x64fb0865 symbol_id: 0x9c6c58ea symbol_id: 0x2fefe933 symbol_id: 0x1c994923 @@ -442669,6 +443681,7 @@ interface { symbol_id: 0x8897c24a symbol_id: 0xb69b7010 symbol_id: 0xc0468e7f + symbol_id: 0x912e005a symbol_id: 0xffa3ecd1 symbol_id: 0x42595f98 symbol_id: 0xd085753f @@ -442916,10 +443929,14 @@ interface { symbol_id: 0xa2a45fe2 symbol_id: 0xe8ce7aa9 symbol_id: 0xe1b4680c + symbol_id: 0xfa3ae6ff + symbol_id: 0xd43b4f66 + symbol_id: 0xfb61e52a symbol_id: 0xe243fac9 symbol_id: 0xf140b7ff symbol_id: 0x2e17e257 symbol_id: 0xb89cc5d0 + symbol_id: 0xcae30dd3 symbol_id: 0x92ae3cd2 symbol_id: 0x8dbf40f1 symbol_id: 0x80c5af01 @@ -442974,6 +443991,7 @@ interface { symbol_id: 0x4804f611 symbol_id: 0x842fc505 symbol_id: 0xd28bb85a + symbol_id: 0x778ba508 symbol_id: 0x26c5a1b9 symbol_id: 0xdb34fc88 symbol_id: 0xb0eab20a @@ -443322,8 +444340,10 @@ interface { symbol_id: 0xb15014ad symbol_id: 0x089f5fe6 symbol_id: 0x0c73acc8 + symbol_id: 0x3fb5c5d4 symbol_id: 0xace4bcdf symbol_id: 0x57ee69c1 + symbol_id: 0x1a32d2b3 symbol_id: 0xd76b82b2 symbol_id: 0x30c7b7f4 symbol_id: 0x9c7a2d6c @@ -444395,11 +445415,13 @@ interface { symbol_id: 0x591431f1 symbol_id: 0x6869c83d symbol_id: 0xe54ea1f3 + symbol_id: 0x4a1dceb2 symbol_id: 0xa2565005 symbol_id: 0xfc2dbec4 symbol_id: 0x08907db0 symbol_id: 0x81a07067 symbol_id: 0xb47ee8bd + symbol_id: 0x3c42a066 symbol_id: 0x07f159e7 symbol_id: 0x94af2b02 symbol_id: 0xa7d44351 @@ -445526,6 +446548,7 @@ interface { symbol_id: 0xf158bd25 symbol_id: 0xee403ebb symbol_id: 0x52ed8956 + symbol_id: 0x84c3c252 symbol_id: 0xf6eda681 symbol_id: 0xd7745fba symbol_id: 0xd9789392 diff --git a/android/abi_gki_aarch64.stg.allowed_breaks b/android/abi_gki_aarch64.stg.allowed_breaks index 8f5f52dbe65a..11988f8a9b28 100644 --- a/android/abi_gki_aarch64.stg.allowed_breaks +++ b/android/abi_gki_aarch64.stg.allowed_breaks @@ -213,3 +213,29 @@ type 'struct scm_stat' changed type 'struct scm_fp_list' changed member 'bool dead' was added +type 'struct kvm_iommu_ops' changed + member 'u64 android_kabi_reserved1' was removed + member 'union { int(* iotlb_sync_map)(struct kvm_hyp_iommu_domain*, unsigned long, size_t); struct { u64 android_kabi_reserved1; }; union { }; }' was added + +type 'struct prefix_info' changed + member changed from 'union { __u8 flags; struct { __u8 reserved:6; __u8 autoconf:1; __u8 onlink:1; }; }' to 'union { __u8 flags; struct { __u8 reserved:4; __u8 preferpd:1; __u8 routeraddr:1; __u8 autoconf:1; __u8 onlink:1; }; }' + type changed from 'union { __u8 flags; struct { __u8 reserved:6; __u8 autoconf:1; __u8 onlink:1; }; }' to 'union { __u8 flags; struct { __u8 reserved:4; __u8 preferpd:1; __u8 routeraddr:1; __u8 autoconf:1; __u8 onlink:1; }; }' + member changed from 'struct { __u8 reserved:6; __u8 autoconf:1; __u8 onlink:1; }' to 'struct { __u8 reserved:4; __u8 preferpd:1; __u8 routeraddr:1; __u8 autoconf:1; __u8 onlink:1; }' + type changed from 'struct { __u8 reserved:6; __u8 autoconf:1; __u8 onlink:1; }' to 'struct { __u8 reserved:4; __u8 preferpd:1; __u8 routeraddr:1; __u8 autoconf:1; __u8 onlink:1; }' + member changed from '__u8 reserved:6' to '__u8 reserved:4' + bit-field size changed from 6 to 4 + member '__u8 preferpd:1' was added + member '__u8 routeraddr:1' was added + +type 'struct ipv6_devconf' changed + member 'u64 android_kabi_reserved4' was removed + member 'union { struct { __u8 ra_honor_pio_pflag; __u8 padding4[7]; }; struct { u64 android_kabi_reserved4; }; union { }; }' was added + +type 'struct task_struct' changed + member 'u64 android_kabi_reserved1' was removed + member 'union { struct task_dma_buf_info* dmabuf_info; struct { u64 android_kabi_reserved1; }; union { }; }' was added + +type 'struct dma_buf' changed + member 'u64 android_kabi_reserved1' was removed + member 'union { atomic64_t nr_task_refs; struct { u64 android_kabi_reserved1; }; union { }; }' was added + diff --git a/android/abi_gki_aarch64_amlogic b/android/abi_gki_aarch64_amlogic index 2a3c0510146a..4ad39bd7effc 100644 --- a/android/abi_gki_aarch64_amlogic +++ b/android/abi_gki_aarch64_amlogic @@ -2372,7 +2372,6 @@ uart_resume_port uart_set_options uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup diff --git a/android/abi_gki_aarch64_db845c b/android/abi_gki_aarch64_db845c index 8a1438070389..5fca5b7e823a 100644 --- a/android/abi_gki_aarch64_db845c +++ b/android/abi_gki_aarch64_db845c @@ -1628,7 +1628,6 @@ uart_remove_one_port uart_resume_port uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup diff --git a/android/abi_gki_aarch64_exynos b/android/abi_gki_aarch64_exynos index 5e1c89bd0db3..c260589847e7 100644 --- a/android/abi_gki_aarch64_exynos +++ b/android/abi_gki_aarch64_exynos @@ -1689,7 +1689,6 @@ uart_resume_port uart_set_options uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup diff --git a/android/abi_gki_aarch64_exynosauto b/android/abi_gki_aarch64_exynosauto index 1c0af2cb9ddd..1fb6b29d45f4 100644 --- a/android/abi_gki_aarch64_exynosauto +++ b/android/abi_gki_aarch64_exynosauto @@ -988,7 +988,6 @@ uart_resume_port uart_set_options uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup diff --git a/android/abi_gki_aarch64_imx b/android/abi_gki_aarch64_imx index d2e8e2b9b6bf..8de6e482fa81 100644 --- a/android/abi_gki_aarch64_imx +++ b/android/abi_gki_aarch64_imx @@ -2550,7 +2550,6 @@ uart_resume_port uart_set_options uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup diff --git a/android/abi_gki_aarch64_mtk b/android/abi_gki_aarch64_mtk index ebecd7f8c5dd..2a33194968c8 100644 --- a/android/abi_gki_aarch64_mtk +++ b/android/abi_gki_aarch64_mtk @@ -1196,9 +1196,14 @@ hid_add_device hid_allocate_device hid_destroy_device + hid_field_extract hid_ignore hid_input_report + hid_is_usb hid_parse_report + hid_register_report + hid_report_raw_event + hid_validate_values housekeeping_cpumask housekeeping_overridden housekeeping_test_cpu @@ -1322,6 +1327,7 @@ input_alloc_absinfo input_allocate_device input_close_device + input_enable_softrepeat input_event input_free_device input_mt_destroy_slots @@ -1778,6 +1784,8 @@ net_selftest_get_strings nf_conntrack_destroy nf_register_net_hooks + nf_sk_lookup_slow_v4 + nf_sk_lookup_slow_v6 nf_unregister_net_hooks nla_find nla_memcpy @@ -2215,6 +2223,7 @@ power_supply_get_drvdata power_supply_get_property power_supply_is_system_supplied + power_supply_powers power_supply_put power_supply_register power_supply_reg_notifier @@ -3801,6 +3810,7 @@ zlib_deflateReset zlib_deflate_workspacesize zlib_inflate + zlib_inflateEnd zlib_inflateIncomp zlib_inflateInit2 zlib_inflateReset diff --git a/android/abi_gki_aarch64_nvidia b/android/abi_gki_aarch64_nvidia index 2497126b8372..ef2587aba9fa 100644 --- a/android/abi_gki_aarch64_nvidia +++ b/android/abi_gki_aarch64_nvidia @@ -1,175 +1,416 @@ [abi_symbol_list] # commonly used symbols alloc_chrdev_region + __alloc_pages + __alloc_skb + alloc_workqueue alt_cb_patch_nops + anon_inode_getfile __arch_copy_from_user __arch_copy_to_user + arm64_use_ng_mappings + __bitmap_clear + bitmap_find_next_zero_area_off + __bitmap_set + bpf_trace_run1 + bpf_trace_run2 + bpf_trace_run3 + bpf_trace_run4 + bpf_trace_run5 + bpf_trace_run6 + bpf_trace_run8 + cancel_delayed_work + cancel_delayed_work_sync + capable cdev_add cdev_del cdev_init __check_object_size class_create class_destroy + clk_disable + clk_enable + clk_get_rate + clk_prepare + clk_set_rate + clk_unprepare complete + __const_udelay + cpu_number + __cpu_online_mask + __cpu_possible_mask + debugfs_create_bool + debugfs_create_dir + debugfs_create_file + debugfs_create_u32 + debugfs_create_u64 + debugfs_remove + delayed_work_timer_fn + destroy_workqueue dev_driver_string _dev_err + dev_get_by_name device_create + device_create_file + device_del device_destroy + device_unregister _dev_info + devm_clk_get + devm_clk_put + devm_free_irq + devm_gpio_request_one + devm_ioremap + devm_ioremap_resource devm_kfree devm_kmalloc devm_memremap + devm_of_platform_populate devm_request_threaded_irq + __devm_reset_control_get + dev_set_name _dev_warn + disable_irq + disable_irq_nosync + dma_alloc_attrs + dma_buf_attach + dma_buf_detach + dma_buf_export + dma_buf_get + dma_buf_map_attachment + dma_buf_put + dma_buf_unmap_attachment + dma_fence_add_callback + dma_fence_array_ops + dma_fence_context_alloc + dma_fence_init + dma_fence_release + dma_fence_remove_callback + dma_fence_signal + dma_fence_signal_timestamp_locked + dma_fence_wait_timeout + dma_free_attrs + dma_map_page_attrs + dma_map_resource + dma_map_sg_attrs + dma_set_coherent_mask + dma_set_mask + dma_sync_single_for_cpu + dma_unmap_resource + dma_unmap_sg_attrs + down_read + down_write + down_write_trylock + enable_irq + __fdget + fd_install + fget + _find_first_zero_bit + _find_next_bit + finish_wait + flush_delayed_work fortify_panic + fput free_irq + __free_pages + generic_file_llseek + __get_free_pages + __get_task_comm + get_unused_fd_flags + gpiod_get_raw_value + gpiod_set_raw_value + gpiod_to_irq + gpio_to_desc + hrtimer_cancel + hrtimer_init + hrtimer_start_range_ns + init_net + __init_rwsem __init_swait_queue_head init_timer_key + init_wait_entry __init_waitqueue_head + iommu_get_domain_for_dev + ioremap_prot + iounmap + is_vmalloc_addr + jiffies + jiffies_to_timespec64 jiffies_to_usecs + kasan_flag_enabled + kasprintf kfree + kimage_voffset __kmalloc kmalloc_caches + kmalloc_large kmalloc_trace + kmem_cache_alloc + kmem_cache_create + kmem_cache_destroy + kmem_cache_free + krealloc + kstrdup kstrtouint + kthread_create_on_node + kthread_should_stop + kthread_stop + ktime_get + ktime_get_mono_fast_ns + ktime_get_real_ts64 + ktime_get_with_offset + kvfree_call_rcu + __list_add_valid_or_report + __list_del_entry_valid_or_report log_post_read_mmio + log_post_write_mmio log_read_mmio + log_write_mmio + memcmp memcpy __memcpy_fromio memset + memstart_addr + mod_timer module_layout + __msecs_to_jiffies + msleep __mutex_init mutex_lock + mutex_trylock mutex_unlock + netlink_unicast + noop_llseek + __num_online_cpus + of_device_get_match_data + of_device_is_available + of_device_is_compatible + of_dma_configure_id + of_find_compatible_node + of_find_device_by_node + of_find_matching_node_and_match + of_find_node_by_name + of_find_node_opts_by_path of_find_property + of_get_named_gpio + of_get_next_child + of_get_property + of_machine_compatible_match + of_match_device + __of_parse_phandle_with_args + of_property_count_elems_of_size + of_property_match_string + of_property_read_string + of_property_read_string_helper of_property_read_u32_index of_property_read_variable_u32_array + of_property_read_variable_u8_array panic + param_ops_bool + perf_trace_buf_alloc + perf_trace_run_bpf_submit + pfn_is_map_memory pid_task + platform_device_unregister __platform_driver_register platform_driver_unregister + platform_get_irq + platform_get_irq_byname + platform_get_resource + platform_get_resource_byname + __platform_register_drivers + platform_unregister_drivers + __pm_runtime_disable + pm_runtime_enable + pm_runtime_force_suspend + __pm_runtime_idle + __pm_runtime_resume + pm_runtime_set_autosuspend_delay + __pm_runtime_suspend + __pm_runtime_use_autosuspend + preempt_schedule + preempt_schedule_notrace + prepare_to_wait_event _printk + put_device __put_task_struct + put_unused_fd + queue_delayed_work_on + queue_work_on + ___ratelimit _raw_spin_lock + _raw_spin_lock_irqsave _raw_spin_unlock + _raw_spin_unlock_irqrestore + refcount_dec_and_mutex_lock + refcount_warn_saturate + register_chrdev_region + register_pm_notifier + release_firmware + remap_pfn_range request_threaded_irq + reset_control_assert + reset_control_reset + schedule schedule_timeout + scnprintf + seq_lseek + seq_printf + seq_puts + seq_read + seq_write + sg_alloc_table + sg_alloc_table_from_pages_segment + sg_free_table + sg_init_table + sg_next + simple_attr_open + simple_attr_read + simple_attr_release + simple_attr_write + single_open + single_release snprintf + soc_device_match + split_page + sprintf + sscanf __stack_chk_fail + strchr + strcmp + strcpy strlen strncmp + strncpy strnlen strscpy + __sw_hweight64 + sync_file_create + sync_file_get_fence sysfs_create_group sysfs_remove_group system_cpucaps system_wq + tegra_bpmp_transfer tegra_ivc_notified tegra_ivc_read_advance tegra_ivc_read_get_next_frame tegra_ivc_reset tegra_ivc_write_advance tegra_ivc_write_get_next_frame + tegra_sku_info + trace_event_buffer_commit + trace_event_buffer_reserve + trace_event_printf + trace_event_raw_init + trace_event_reg + trace_handle_return __traceiter_rwmmio_post_read + __traceiter_rwmmio_post_write __traceiter_rwmmio_read + __traceiter_rwmmio_write __tracepoint_rwmmio_post_read + __tracepoint_rwmmio_post_write __tracepoint_rwmmio_read + __tracepoint_rwmmio_write + trace_print_hex_seq + trace_raw_output_prep + __trace_trigger_soft_disabled + __udelay unregister_chrdev_region + up + up_read + up_write + usleep_range_state + vfree + vmalloc + vmap + vsnprintf + vunmap + vzalloc + wait_for_completion + wait_for_completion_interruptible + wait_for_completion_timeout __wake_up + wake_up_process __warn_printk + xa_destroy + xa_erase + xa_load -# required by ivc-cdev.ko - device_del - devm_free_irq - noop_llseek - remap_pfn_range +# required by cpuidle-tegra-auto.ko + cpuidle_register + cpuidle_unregister + +# required by host1x-emu.ko + nsecs_to_jiffies64 + +# required by host1x.ko + alloc_iova + __free_iova + free_iova + host1x_context_device_bus_type + iommu_detach_group + iova_cache_get + iova_cache_put + pm_generic_freeze + pm_generic_poweroff + pm_generic_restore + pm_generic_thaw + reset_control_bulk_acquire + reset_control_bulk_release # required by ivc_ext.ko - dma_sync_single_for_cpu __memcpy_toio +# required by nvgpu.ko + async_synchronize_cookie + dev_pm_domain_set + pcie_reset_flr + pci_find_host_bridge + pci_ignore_hotplug + tegra_bpmp_get + tegra_fuse_readl + +# required by nvmap.ko + arch_invalidate_pmem + # required by nvsciipc.ko _dev_notice - __fdget find_get_pid - fput platform_device_register_full - platform_device_unregister - sprintf # required by tegra_bpmp.ko clk_hw_determine_rate_no_reparent clk_hw_get_name clk_hw_unregister - debugfs_create_dir - debugfs_create_file - debugfs_remove dentry_path_raw devm_clk_hw_register devm_reset_controller_register - dma_alloc_attrs - dma_free_attrs - _find_next_bit - kmalloc_large - kstrdup - ktime_get of_clk_add_hw_provider - of_device_get_match_data of_genpd_add_provider_onecell - __of_parse_phandle_with_args of_platform_default_populate pm_genpd_init pm_genpd_remove - seq_lseek - seq_read - seq_write single_open_size - single_release - strncpy tegra_bpmp_free_mrq tegra_bpmp_mrq_is_supported tegra_bpmp_mrq_return tegra_bpmp_request_mrq - tegra_bpmp_transfer tegra_bpmp_transfer_atomic - tegra_sku_info # required by tegra_hv.ko - arm64_use_ng_mappings class_create_file_ns - ioremap_prot - iounmap irq_get_irq_data - memstart_addr of_add_property of_chosen - of_find_compatible_node of_irq_get - pfn_is_map_memory tegra_ivc_init # required by tegra_hv_pm_ctl.ko - __alloc_skb find_vpid - finish_wait - init_net - init_wait_entry - msleep __netlink_kernel_create - netlink_unicast __nlmsg_put - prepare_to_wait_event - register_pm_notifier - schedule - strcmp - wait_for_completion_timeout - -# required by tegra_hv_vblk_oops.ko - delayed_work_timer_fn - dma_map_page_attrs - __get_free_pages - is_vmalloc_addr - queue_delayed_work_on # required by tegra_vblk.ko blk_execute_rq @@ -192,41 +433,20 @@ blk_queue_physical_block_size blk_queue_write_cache __blk_rq_map_sg - capable - __cpu_possible_mask del_gendisk device_add_disk - device_create_file - disable_irq disk_check_media_change - dma_map_sg_attrs - dma_unmap_sg_attrs - enable_irq - _find_first_zero_bit - jiffies - kasan_flag_enabled kthread_create_on_cpu - kthread_create_on_node - __list_add_valid_or_report - __list_del_entry_valid_or_report - mod_timer - __num_online_cpus - of_find_node_by_name put_disk - queue_work_on - _raw_spin_lock_irqsave - _raw_spin_unlock_irqrestore __register_blkdev sched_setattr_nocheck set_capacity set_disk_ro - sg_init_table sg_nents - __sw_hweight64 timer_delete unregister_blkdev - vfree - vzalloc - wait_for_completion - wait_for_completion_interruptible - wake_up_process + +# required by tegradisp-drm.ko + drm_edid_override_connector_update + drm_file_get_master + drm_mode_create_dvi_i_properties diff --git a/android/abi_gki_aarch64_oplus b/android/abi_gki_aarch64_oplus index 219baa63c2ff..bcb36ec25b61 100644 --- a/android/abi_gki_aarch64_oplus +++ b/android/abi_gki_aarch64_oplus @@ -543,7 +543,6 @@ __udp6_lib_lookup udp_table unlock_rename - unregister_memory_notifier unregister_net_sysctl_table unregister_sysctl_table unregister_tcf_proto_ops diff --git a/android/abi_gki_aarch64_pixel b/android/abi_gki_aarch64_pixel index d17b443f7c9e..09021c9014e8 100644 --- a/android/abi_gki_aarch64_pixel +++ b/android/abi_gki_aarch64_pixel @@ -434,6 +434,7 @@ devm_clk_bulk_get_optional devm_clk_get devm_clk_get_optional + devm_clk_get_optional_enabled devm_clk_hw_register devm_clk_put devm_devfreq_add_device @@ -581,6 +582,7 @@ dev_set_name dev_vprintk_emit _dev_warn + dget_parent disable_irq disable_irq_nosync disable_percpu_irq @@ -1806,6 +1808,7 @@ param_set_int param_set_uint param_set_uint_minmax + path_put pci_alloc_irq_vectors pci_alloc_irq_vectors_affinity pci_assign_resource @@ -2039,6 +2042,7 @@ radix_tree_lookup radix_tree_next_chunk raise_softirq + random_kmalloc_seed ___ratelimit raw_notifier_call_chain raw_notifier_chain_register @@ -2587,6 +2591,7 @@ tcpm_sourcing_vbus tcpm_tcpc_reset tcpm_unregister_port + tcpm_update_sink_capabilities tcpm_vbus_change teo_cpu_get_util_threshold teo_cpu_set_util_threshold @@ -2691,6 +2696,7 @@ __traceiter_android_vh_binder_set_priority __traceiter_android_vh_calculate_totalreserve_pages __traceiter_android_vh_check_new_page + __traceiter_android_vh_chk_task __traceiter_android_vh_cpu_idle_enter __traceiter_android_vh_cpu_idle_exit __traceiter_android_vh_dump_throttled_rt_tasks @@ -2708,6 +2714,7 @@ __traceiter_android_vh_post_alloc_hook __traceiter_android_vh_prio_inheritance __traceiter_android_vh_prio_restore + __traceiter_android_vh_put_task __traceiter_android_vh_resume_end __traceiter_android_vh_rmqueue __traceiter_android_vh_scheduler_tick @@ -2767,6 +2774,7 @@ __traceiter_softirq_entry __traceiter_softirq_exit __traceiter_suspend_resume + __traceiter_task_newtask __traceiter_workqueue_execute_end __traceiter_workqueue_execute_start trace_output_call @@ -2831,6 +2839,7 @@ __tracepoint_android_vh_binder_set_priority __tracepoint_android_vh_calculate_totalreserve_pages __tracepoint_android_vh_check_new_page + __tracepoint_android_vh_chk_task __tracepoint_android_vh_cpu_idle_enter __tracepoint_android_vh_cpu_idle_exit __tracepoint_android_vh_dump_throttled_rt_tasks @@ -2848,6 +2857,7 @@ __tracepoint_android_vh_post_alloc_hook __tracepoint_android_vh_prio_inheritance __tracepoint_android_vh_prio_restore + __tracepoint_android_vh_put_task __tracepoint_android_vh_resume_end __tracepoint_android_vh_rmqueue __tracepoint_android_vh_scheduler_tick @@ -2909,6 +2919,7 @@ __tracepoint_softirq_entry __tracepoint_softirq_exit __tracepoint_suspend_resume + __tracepoint_task_newtask __tracepoint_workqueue_execute_end __tracepoint_workqueue_execute_start trace_print_array_seq @@ -2953,7 +2964,6 @@ uart_resume_port uart_set_options uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup diff --git a/android/abi_gki_aarch64_pixel_watch b/android/abi_gki_aarch64_pixel_watch index a5621a839612..bb17e383c331 100644 --- a/android/abi_gki_aarch64_pixel_watch +++ b/android/abi_gki_aarch64_pixel_watch @@ -407,6 +407,7 @@ devm_power_supply_register devm_pwm_get devm_qcom_smem_state_get + devm_register_sys_off_handler devm_regmap_add_irq_chip devm_regmap_del_irq_chip devm_regmap_field_alloc @@ -487,6 +488,7 @@ divider_ro_round_rate_parent divider_round_rate_parent dma_alloc_attrs + dma_alloc_noncontiguous dma_alloc_pages dma_async_device_register dma_async_device_unregister @@ -522,6 +524,7 @@ dma_fence_signal_timestamp_locked dma_fence_wait_timeout dma_free_attrs + dma_free_noncontiguous dma_free_pages dma_get_sgtable_attrs dma_get_slave_channel @@ -553,6 +556,8 @@ dma_sync_single_for_device dma_unmap_page_attrs dma_unmap_sg_attrs + dma_vmap_noncontiguous + dma_vunmap_noncontiguous do_trace_netlink_extack double_rq_lock do_wait_intr @@ -632,8 +637,10 @@ drm_crtc_init_with_planes drm_crtc_send_vblank_event drm_crtc_set_max_vblank_count + drm_crtc_vblank_get drm_crtc_vblank_off drm_crtc_vblank_on + drm_crtc_vblank_put drm_crtc_vblank_reset drm_crtc_wait_one_vblank ___drm_dbg @@ -2286,7 +2293,6 @@ uart_remove_one_port uart_resume_port uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup @@ -2440,6 +2446,7 @@ __xa_alloc __xa_alloc_cyclic xa_destroy + __xa_erase xa_erase xa_find xa_find_after diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 9eff65a1b51a..34da707ff877 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -3,7 +3,6 @@ activate_task add_cpu add_device_randomness - add_memory add_timer add_uevent_var add_wait_queue @@ -1153,7 +1152,6 @@ __memcpy_toio memdup_user memmove - memory_block_size_bytes memory_read_from_buffer memparse mempool_alloc @@ -1616,7 +1614,6 @@ register_inet6addr_notifier register_inetaddr_notifier register_kretprobe - register_memory_notifier register_module_notifier register_netdevice register_netdevice_notifier @@ -2274,7 +2271,6 @@ uart_remove_one_port uart_resume_port uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup diff --git a/android/abi_gki_aarch64_virtual_device b/android/abi_gki_aarch64_virtual_device index 2231cd6ffb61..2fc9e671f888 100644 --- a/android/abi_gki_aarch64_virtual_device +++ b/android/abi_gki_aarch64_virtual_device @@ -344,6 +344,7 @@ _raw_spin_unlock_irqrestore __rcu_read_lock __rcu_read_unlock + random_kmalloc_seed refcount_warn_saturate register_netdevice register_netdevice_notifier diff --git a/android/abi_gki_aarch64_vivo b/android/abi_gki_aarch64_vivo index 093e3588d263..12f493f1e6f0 100644 --- a/android/abi_gki_aarch64_vivo +++ b/android/abi_gki_aarch64_vivo @@ -41,7 +41,6 @@ noop_qdisc of_css pfifo_qdisc_ops - pfn_to_online_page proc_mkdir_mode profile_event_register profile_event_unregister @@ -157,7 +156,16 @@ __traceiter_android_vh_lruvec_add_folio __traceiter_android_vh_lruvec_del_folio __traceiter_android_vh_mempool_alloc_skip_wait + __traceiter_android_vh_mm_customize_ac + __traceiter_android_vh_mm_customize_file_is_tiny + __traceiter_android_vh_mm_customize_lru_add_dst + __traceiter_android_vh_mm_customize_pgdat_balanced + __traceiter_android_vh_mm_customize_rmqueue + __traceiter_android_vh_mm_customize_suitable_zone + __traceiter_android_vh_mm_customize_zone_pageset + __traceiter_android_vh_mm_customize_zone_max_order __traceiter_android_vh_mm_free_page + __traceiter_android_vh_mm_isolate_priv_lru __traceiter_android_vh_mmap_region __traceiter_android_vh_mutex_init __traceiter_android_vh_mutex_unlock_slowpath @@ -291,7 +299,16 @@ __tracepoint_android_vh_lruvec_add_folio __tracepoint_android_vh_lruvec_del_folio __tracepoint_android_vh_mempool_alloc_skip_wait + __tracepoint_android_vh_mm_customize_ac + __tracepoint_android_vh_mm_customize_file_is_tiny + __tracepoint_android_vh_mm_customize_lru_add_dst + __tracepoint_android_vh_mm_customize_pgdat_balanced + __tracepoint_android_vh_mm_customize_rmqueue + __tracepoint_android_vh_mm_customize_suitable_zone + __tracepoint_android_vh_mm_customize_zone_pageset + __tracepoint_android_vh_mm_customize_zone_max_order __tracepoint_android_vh_mm_free_page + __tracepoint_android_vh_mm_isolate_priv_lru __tracepoint_android_vh_mmap_region __tracepoint_android_vh_mutex_init __tracepoint_android_vh_mutex_unlock_slowpath diff --git a/android/abi_gki_aarch64_xiaomi_xring b/android/abi_gki_aarch64_xiaomi_xring index bbaae6d45165..e4fa93fafd65 100644 --- a/android/abi_gki_aarch64_xiaomi_xring +++ b/android/abi_gki_aarch64_xiaomi_xring @@ -1725,7 +1725,6 @@ register_inet6addr_notifier register_inetaddr_notifier register_kprobe - register_memory_notifier register_netdev register_netdevice register_netdevice_notifier @@ -2640,7 +2639,6 @@ uart_resume_port uart_set_options uart_suspend_port - uart_try_toggle_sysrq uart_unregister_driver uart_update_timeout uart_write_wakeup @@ -2658,7 +2656,6 @@ unregister_inet6addr_notifier unregister_inetaddr_notifier unregister_kprobe - unregister_memory_notifier unregister_netdev unregister_netdevice_notifier unregister_netdevice_queue diff --git a/android/abi_gki_protected_exports_aarch64 b/android/abi_gki_protected_exports_aarch64 index 9e4b937ac902..89ab2a4d008d 100644 --- a/android/abi_gki_protected_exports_aarch64 +++ b/android/abi_gki_protected_exports_aarch64 @@ -383,10 +383,6 @@ slhc_init slhc_remember slhc_toss slhc_uncompress -tipc_dump_done -tipc_dump_start -tipc_nl_sk_walk -tipc_sk_fill_sock_diag unregister_candev unregister_pppox_proto usb_serial_claim_interface diff --git a/android/gki/aarch64/afdo/README.md b/android/gki/aarch64/afdo/README.md index 8c7781fd62ec..aa5cffe89bc2 100644 --- a/android/gki/aarch64/afdo/README.md +++ b/android/gki/aarch64/afdo/README.md @@ -5,19 +5,22 @@ optimize kernel builds for specific architectures and kernel versions. ## kernel.afdo -kernel.afdo is an AArch64 kernel profile collected on kernel version 6.6.82 ( -SHA b62ea68f41a901d5f07f48bd6f1d3a117d801411, build server ID 13287877) using Pixel 6. +kernel.afdo is an AArch64 kernel profile collected on kernel version 6.6.92 ( +SHA fe630a04152399fa0646fa16cabae8dee2901a20, build server ID P100391429) using Pixel 6. ### Performance improvements -| Benchmark | Improvement | -| --------------------- | ----------- | -| Boot time | 2.2% | -| Cold App launch time | 2.7% | -| Binder-rpc | 4.4% | -| Binder-addints | 14.1% | -| Hwbinder | 17.0% | -| Bionic (syscall_mmap) | 1.6% | +| Benchmark | Improvement | +| --------------------- | ------------------------------------------------------------------------ | +| Boot time | 1.5% | +| Cold App launch time | 3.3% ((Only for two apps, most app launch tests are broken b/432087996)) | +| Binder-rpc | 4.4% | +| Binder-addints | 15.4% | +| Hwbinder | 15.2% | +| Bionic (syscall_mmap) | 5.6% | +| Bionic (pthread) | 1.9% | +| Bionic (stdio) | 5.4% | +| Bionic (all) | 2.9% | Benchmark results were tested on Pixel 6. diff --git a/android/gki/aarch64/afdo/kernel.afdo b/android/gki/aarch64/afdo/kernel.afdo index 3ce73ba9446c..a3c8e7b33e46 100644 Binary files a/android/gki/aarch64/afdo/kernel.afdo and b/android/gki/aarch64/afdo/kernel.afdo differ diff --git a/arch/Kconfig b/arch/Kconfig index 536e0446cfed..d4f60aae9860 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1507,6 +1507,10 @@ config ARCH_HAS_NONLEAF_PMD_YOUNG address translations. Page table walkers that clear the accessed bit may use this capability to reduce their search space. +config MICRODROID + bool "Enables support for Microdroid VM" + default n + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a42e4cd11db2..eec711cdd408 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -208,6 +208,8 @@ void __init bootmem_init(void) find_limits(&min_low_pfn, &max_low_pfn, &max_pfn); + early_memzero((phys_addr_t)min_low_pfn << PAGE_SHIFT, + (phys_addr_t)max_low_pfn << PAGE_SHIFT); early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT, (phys_addr_t)max_low_pfn << PAGE_SHIFT); diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 7b89c07f23b5..464248d14684 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -48,11 +48,13 @@ CONFIG_EXPERT=y # CONFIG_FHANDLE is not set CONFIG_KALLSYMS_ALL=y # CONFIG_RSEQ is not set +# CONFIG_CACHESTAT_SYSCALL is not set CONFIG_PROFILING=y CONFIG_ARCH_SUNXI=y CONFIG_ARCH_HISI=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_TEGRA=y +CONFIG_ARM64_VA_BITS_48=y CONFIG_SCHED_MC=y CONFIG_NR_CPUS=32 CONFIG_PARAVIRT_TIME_ACCOUNTING=y @@ -63,14 +65,11 @@ CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y CONFIG_SETEND_EMULATION=y CONFIG_ARM64_PMEM=y -# CONFIG_ARM64_BTI_KERNEL is not set CONFIG_RANDOMIZE_BASE=y # CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set -CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure kasan.stacktrace=off kvm-arm.mode=protected bootconfig ioremap_guard" CONFIG_CMDLINE_EXTEND=y # CONFIG_DMI is not set -CONFIG_HIBERNATION=y CONFIG_PM_USERSPACE_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 @@ -96,9 +95,11 @@ CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_SHADOW_CALL_STACK=y CONFIG_CFI_CLANG=y +CONFIG_ARCH_MMAP_RND_BITS=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y CONFIG_MODULE_SCMVERSION=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_PROTECT=y @@ -113,14 +114,12 @@ CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_GKI_HACKS_TO_FIX=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=y # CONFIG_SLAB_MERGE_DEFAULT is not set CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_RANDOM_KMALLOC_CACHES=y CONFIG_SHUFFLE_PAGE_ALLOCATOR=y # CONFIG_COMPAT_BRK is not set -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y @@ -152,6 +151,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_NET_IPIP=y CONFIG_NET_IPGRE_DEMUX=y CONFIG_NET_IPGRE=y +CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=y CONFIG_INET_ESP=y CONFIG_INET_UDP_DIAG=y @@ -250,7 +250,6 @@ CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_REJECT=y CONFIG_IP6_NF_MANGLE=y CONFIG_IP6_NF_RAW=y -CONFIG_TIPC=m CONFIG_L2TP=m CONFIG_BRIDGE=y CONFIG_VLAN_8021Q=m @@ -326,6 +325,8 @@ CONFIG_ARM_SCMI_TRANSPORT_VIRTIO=y CONFIG_ARM_SCPI_PROTOCOL=y # CONFIG_ARM_SCPI_POWER_DOMAIN is not set # CONFIG_EFI_ARMSTUB_DTB_LOADER is not set +CONFIG_RESET_ATTACK_MITIGATION=y +CONFIG_EFI_DISABLE_PCI_DMA=y CONFIG_GNSS=y CONFIG_ZRAM=m CONFIG_BLK_DEV_LOOP=y @@ -408,6 +409,8 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_UINPUT=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set +# CONFIG_LEGACY_TIOCSTI is not set +# CONFIG_LDISC_AUTOLOAD is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y @@ -722,7 +725,9 @@ CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y +CONFIG_INIT_ON_FREE_DEFAULT_ON=y CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_RANDSTRUCT_FULL=y CONFIG_CRYPTO_ECDH=y CONFIG_CRYPTO_DES=y CONFIG_CRYPTO_ADIANTUM=y @@ -760,6 +765,8 @@ CONFIG_MODULE_ALLOW_BTF_MISMATCH=y CONFIG_HEADERS_INSTALL=y # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 +# CONFIG_MAGIC_SYSRQ_SERIAL is not set CONFIG_UBSAN=y CONFIG_UBSAN_TRAP=y # CONFIG_UBSAN_SHIFT is not set @@ -778,6 +785,7 @@ CONFIG_PANIC_TIMEOUT=-1 CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_SG=y CONFIG_HIST_TRIGGERS=y CONFIG_PID_IN_CONTEXTIDR=y CONFIG_KUNIT=m diff --git a/arch/arm64/configs/microdroid_defconfig b/arch/arm64/configs/microdroid_defconfig index 2d67e00899b5..a2b7bde89e8c 100644 --- a/arch/arm64/configs/microdroid_defconfig +++ b/arch/arm64/configs/microdroid_defconfig @@ -19,8 +19,11 @@ CONFIG_MEMCG=y CONFIG_BOOT_CONFIG=y CONFIG_EXPERT=y # CONFIG_IO_URING is not set +# CONFIG_RSEQ is not set +# CONFIG_CACHESTAT_SYSCALL is not set CONFIG_PROFILING=y CONFIG_KEXEC_FILE=y +CONFIG_ARM64_VA_BITS_48=y CONFIG_SCHED_MC=y CONFIG_NR_CPUS=32 CONFIG_PARAVIRT_TIME_ACCOUNTING=y @@ -30,39 +33,49 @@ CONFIG_RANDOMIZE_BASE=y CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off cgroup_disable=pressure ioremap_guard panic=-1 bootconfig" CONFIG_CMDLINE_EXTEND=y # CONFIG_EFI is not set -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set +# CONFIG_SUSPEND is not set CONFIG_CPU_FREQ=y CONFIG_ANDROID_V_CPUFREQ_VIRT=y CONFIG_VIRTUALIZATION=y CONFIG_JUMP_LABEL=y CONFIG_SHADOW_CALL_STACK=y CONFIG_CFI_CLANG=y +CONFIG_ARCH_MMAP_RND_BITS=33 +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y +CONFIG_MICRODROID=y # CONFIG_BLOCK_LEGACY_AUTOLOAD is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set # CONFIG_MQ_IOSCHED_DEADLINE is not set # CONFIG_MQ_IOSCHED_KYBER is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=y # CONFIG_SLAB_MERGE_DEFAULT is not set CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_RANDOM_KMALLOC_CACHES=y CONFIG_SHUFFLE_PAGE_ALLOCATOR=y # CONFIG_COMPAT_BRK is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +# CONFIG_ZONE_DMA is not set +# CONFIG_ZONE_DMA32 is not set CONFIG_ANON_VMA_NAME=y CONFIG_USERFAULTFD=y CONFIG_LRU_GEN=y CONFIG_NET=y CONFIG_UNIX=y CONFIG_INET=y +CONFIG_SYN_COOKIES=y +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_IPV6 is not set CONFIG_VSOCKETS=y CONFIG_VIRTIO_VSOCKETS=y # CONFIG_WIRELESS is not set +# CONFIG_ETHTOOL_NETLINK is not set CONFIG_PCI=y CONFIG_PCIEPORTBUS=y CONFIG_PCIEAER=y @@ -70,27 +83,28 @@ CONFIG_PCI_IOV=y # CONFIG_VGA_ARB is not set CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_ENDPOINT=y -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_CACHE is not set +# CONFIG_FW_LOADER is not set CONFIG_ARM_SCMI_PROTOCOL=y # CONFIG_ARM_SCMI_POWER_DOMAIN is not set CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_VIRTIO_BLK=y CONFIG_OPEN_DICE=y CONFIG_VCPU_STALL_DETECTOR=y CONFIG_MD=y +# CONFIG_MD_BITMAP_FILE is not set CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y +# CONFIG_DM_USER is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_LEGACY_PTYS is not set +# CONFIG_LEGACY_TIOCSTI is not set +# CONFIG_LDISC_AUTOLOAD is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y @@ -106,19 +120,20 @@ CONFIG_POWER_RESET_SYSCON=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y -# CONFIG_HID is not set +# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_EDAC=y CONFIG_RTC_CLASS=y # CONFIG_RTC_NVMEM is not set CONFIG_RTC_DRV_PL030=y CONFIG_RTC_DRV_PL031=y -CONFIG_DMABUF_HEAPS=y -CONFIG_DMABUF_SYSFS_STATS=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y -CONFIG_STAGING=y +# CONFIG_SURFACE_PLATFORMS is not set CONFIG_HWSPINLOCK=y +# CONFIG_IOMMU_SUPPORT is not set +# CONFIG_ANDROID_KABI_RESERVE is not set +# CONFIG_ANDROID_VENDOR_OEM_DATA is not set CONFIG_EXT4_FS=y # CONFIG_EXT4_USE_FOR_EXT2 is not set CONFIG_EXT4_FS_POSIX_ACL=y @@ -133,14 +148,17 @@ CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y +CONFIG_INIT_ON_FREE_DEFAULT_ON=y CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_CRYPTO_SHA1=y +CONFIG_RANDSTRUCT_FULL=y CONFIG_CRYPTO_HCTR2=y CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_LZ4=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y CONFIG_CRYPTO_POLYVAL_ARM64_CE=y @@ -155,6 +173,8 @@ CONFIG_DEBUG_INFO_REDUCED=y CONFIG_HEADERS_INSTALL=y # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 +# CONFIG_MAGIC_SYSRQ_SERIAL is not set CONFIG_UBSAN=y CONFIG_UBSAN_TRAP=y # CONFIG_UBSAN_SHIFT is not set @@ -164,11 +184,16 @@ CONFIG_PAGE_OWNER=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_KASAN=y CONFIG_KASAN_HW_TAGS=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=500 +CONFIG_KFENCE_NUM_OBJECTS=63 +CONFIG_KFENCE_STATIC_KEYS=y CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=-1 CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_SG=y CONFIG_HIST_TRIGGERS=y CONFIG_PID_IN_CONTEXTIDR=y # CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 97932fbf973d..e940e6e3d228 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -130,7 +130,11 @@ #ifdef CONFIG_ARM64_FORCE_52BIT #define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) #else -#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3) +/* + * Originally used DEFAULT_MAP_WINDOW_64, switched to DEFAULT_MAP_WINDOW for compatibility with 39-bit mode. + * Will return the value of DEFAULT_MAP_WINDOW_64 if compat_va_39_bit is not enabled. + */ +#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW / 3) #endif /* CONFIG_ARM64_FORCE_52BIT */ #ifndef __ASSEMBLY__ @@ -186,13 +190,19 @@ struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); +// same as mmap_rnd_bits when VA_BITS == 39 +#define MMAP_RND_BITS_39_BIT 24 + /* 1GB of VA */ #ifdef CONFIG_COMPAT #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ - 0x7ff >> (PAGE_SHIFT - 12) : \ - 0x3ffff >> (PAGE_SHIFT - 12)) + ((1UL << mmap_rnd_compat_bits) - 1) >> (PAGE_SHIFT - 12) : \ + (test_thread_flag(TIF_39BIT) ? \ + ((1UL << MMAP_RND_BITS_39_BIT) - 1) >> (PAGE_SHIFT - 12) : \ + ((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12))) + #else -#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) +#define STACK_RND_MASK (((1UL << mmap_rnd_bits) - 1) >> (PAGE_SHIFT - 12)) #endif #ifdef __AARCH64EB__ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 9cc0033513a9..ca0c586379eb 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -115,6 +115,7 @@ enum __kvm_host_smccc_func { __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys, __KVM_HOST_SMCCC_FUNC___pkvm_host_hvc_pd, __KVM_HOST_SMCCC_FUNC___pkvm_stage2_snapshot, + __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iotlb_sync_map, /* * Start of the dynamically registered hypercalls. Start a bit diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index 80a1526684cb..d76ad88a948c 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -559,11 +559,16 @@ static inline unsigned long __hyp_pgtable_moveable_regs_pages(void) return res; } +extern u64 kvm_nvhe_sym(hyp_lm_size_mb); + static inline unsigned long hyp_s1_pgtable_pages(void) { unsigned long res; - res = __hyp_pgtable_moveable_regs_pages(); + if (!kvm_nvhe_sym(hyp_lm_size_mb)) + res = __hyp_pgtable_moveable_regs_pages(); + else + res = __hyp_pgtable_max_pages(kvm_nvhe_sym(hyp_lm_size_mb) * SZ_1M / PAGE_SIZE); /* Allow 1 GiB for private mappings */ res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); diff --git a/arch/arm64/include/asm/kvm_pkvm_module.h b/arch/arm64/include/asm/kvm_pkvm_module.h index 2be3f122212e..77c4b7c717f1 100644 --- a/arch/arm64/include/asm/kvm_pkvm_module.h +++ b/arch/arm64/include/asm/kvm_pkvm_module.h @@ -336,5 +336,16 @@ static inline int pkvm_register_el2_mod_call(dyn_hcall_t hfn, \ res.a1; \ }) + +#define pkvm_el2_mod_call_smccc(id, ...) \ + ({ \ + struct arm_smccc_res res; \ + \ + arm_smccc_1_1_hvc(KVM_HOST_SMCCC_ID(id), \ + ##__VA_ARGS__, &res); \ + WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ + \ + res; \ + }) #endif #endif diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 9911dab93e8b..7e88b0280eec 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -53,6 +53,7 @@ #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) #define TASK_SIZE_64 (UL(1) << vabits_actual) +#define TASK_SIZE_39 (UL(1) << 39) #define TASK_SIZE_MAX (UL(1) << VA_BITS) #ifdef CONFIG_COMPAT @@ -66,11 +67,11 @@ #define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE) #endif /* CONFIG_ARM64_64K_PAGES */ #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) + TASK_SIZE_32 : (test_thread_flag(TIF_39BIT) ? TASK_SIZE_39 : TASK_SIZE_64)) #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ - TASK_SIZE_32 : TASK_SIZE_64) + TASK_SIZE_32 : (test_tsk_thread_flag(tsk, TIF_39BIT) ? TASK_SIZE_39 : TASK_SIZE_64)) #define DEFAULT_MAP_WINDOW (test_thread_flag(TIF_32BIT) ? \ - TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64) + TASK_SIZE_32 : (test_thread_flag(TIF_39BIT) ? TASK_SIZE_39 : DEFAULT_MAP_WINDOW_64)) #else #define TASK_SIZE TASK_SIZE_64 #define DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64 @@ -87,7 +88,7 @@ #ifdef CONFIG_COMPAT #define AARCH32_VECTORS_BASE 0xffff0000 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ - AARCH32_VECTORS_BASE : STACK_TOP_MAX) + AARCH32_VECTORS_BASE : (test_thread_flag(TIF_39BIT) ? TASK_SIZE_39 : STACK_TOP_MAX)) #else #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 553d1bc559c6..c532ceb830aa 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -80,6 +80,7 @@ void arch_setup_new_exec(void); #define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ #define TIF_SME 27 /* SME in use */ #define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */ +#define TIF_39BIT 30 /* compat_va_39_bit mode */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index a1e0cc5353fb..83827384982e 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1873,13 +1873,17 @@ static void fpsimd_flush_cpu_state(void) */ void fpsimd_save_and_flush_cpu_state(void) { + unsigned long flags; + if (!system_supports_fpsimd()) return; WARN_ON(preemptible()); + local_irq_save(flags); __get_cpu_fpsimd_context(); fpsimd_save(); fpsimd_flush_cpu_state(); __put_cpu_fpsimd_context(); + local_irq_restore(flags); } #ifdef CONFIG_KERNEL_MODE_NEON diff --git a/arch/arm64/kvm/hyp/include/nvhe/iommu.h b/arch/arm64/kvm/hyp/include/nvhe/iommu.h index 4afb0b83115e..388ea6e0bd6e 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/iommu.h +++ b/arch/arm64/kvm/hyp/include/nvhe/iommu.h @@ -54,6 +54,8 @@ void kvm_iommu_iotlb_gather_add_page(struct kvm_hyp_iommu_domain *domain, void kvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end, enum kvm_pgtable_prot prot); int kvm_iommu_snapshot_host_stage2(struct kvm_hyp_iommu_domain *domain); +int kvm_iommu_iotlb_sync_map(pkvm_handle_t domain_id, + unsigned long iova, size_t size); #define KVM_IOMMU_PADDR_CACHE_MAX ((size_t)511) /** @@ -112,6 +114,7 @@ static inline void kvm_iommu_unlock(struct kvm_hyp_iommu *iommu) * @map_pages: Map pages in a domain. * @unmap_pages: Unmap pages from a domain. * @iova_to_phys: get physical address from IOVA in a domain. + * @iotlb_sync_map: Sync mapping created using @map_pages to the hardware. */ struct kvm_iommu_ops { int (*init)(unsigned long arg); @@ -138,7 +141,8 @@ struct kvm_iommu_ops { struct iommu_iotlb_gather *gather, struct kvm_iommu_paddr_cache *cache); phys_addr_t (*iova_to_phys)(struct kvm_hyp_iommu_domain *domain, unsigned long iova); - ANDROID_KABI_RESERVE(1); + ANDROID_KABI_USE(1, int (*iotlb_sync_map)(struct kvm_hyp_iommu_domain *domain, + unsigned long iova, size_t size)); ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(4); diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index a5ffc4cd3f70..7fd93d0cda91 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -1574,6 +1574,17 @@ static void handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context *host_c hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs)); } +static void handle___pkvm_host_iommu_iotlb_sync_map(struct kvm_cpu_context *host_ctxt) +{ + unsigned long ret; + DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1); + DECLARE_REG(unsigned long, iova, host_ctxt, 2); + DECLARE_REG(size_t, size, host_ctxt, 3); + + ret = kvm_iommu_iotlb_sync_map(domain, iova, size); + hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs)); +} + static void handle___pkvm_iommu_init(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct kvm_iommu_ops *, ops, host_ctxt, 1); @@ -1671,6 +1682,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__pkvm_host_iommu_iova_to_phys), HANDLE_FUNC(__pkvm_host_hvc_pd), HANDLE_FUNC(__pkvm_stage2_snapshot), + HANDLE_FUNC(__pkvm_host_iommu_iotlb_sync_map), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c index f06b84357eb4..66fd9ed618b4 100644 --- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c +++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c @@ -221,15 +221,17 @@ void __repudiate_host_page(void *addr, unsigned long order, int kvm_iommu_refill(struct kvm_hyp_memcache *host_mc) { + struct kvm_hyp_memcache tmp = *host_mc; + if (!kvm_iommu_ops) return -EINVAL; /* Paired with smp_wmb() in kvm_iommu_init() */ smp_rmb(); - while (host_mc->nr_pages) { - unsigned long order = FIELD_GET(~PAGE_MASK, host_mc->head); - phys_addr_t phys = host_mc->head & PAGE_MASK; + while (tmp.nr_pages) { + unsigned long order = FIELD_GET(~PAGE_MASK, tmp.head); + phys_addr_t phys = tmp.head & PAGE_MASK; struct hyp_pool *pool = &iommu_system_pool; u64 nr_pages; void *addr; @@ -238,16 +240,18 @@ int kvm_iommu_refill(struct kvm_hyp_memcache *host_mc) !IS_ALIGNED(phys, PAGE_SIZE << order)) return -EINVAL; - addr = admit_host_page(host_mc, order); + addr = admit_host_page(&tmp, order); if (!addr) return -EINVAL; + *host_mc = tmp; if (kvm_iommu_donate_from_cma(phys, order)) { hyp_spin_lock(&__block_pools_lock); pool = __get_empty_block_pool(phys); hyp_spin_unlock(&__block_pools_lock); if (!pool) { - __repudiate_host_page(addr, order, host_mc); + __repudiate_host_page(addr, order, &tmp); + *host_mc = tmp; return -EBUSY; } } else { @@ -825,3 +829,28 @@ int kvm_iommu_snapshot_host_stage2(struct kvm_hyp_iommu_domain *domain) return ret; } + +int kvm_iommu_iotlb_sync_map(pkvm_handle_t domain_id, + unsigned long iova, size_t size) +{ + struct kvm_hyp_iommu_domain *domain; + int ret; + + if (!kvm_iommu_ops || !kvm_iommu_ops->iotlb_sync_map) + return -ENODEV; + + if (!size || (iova + size < iova)) + return -EINVAL; + + if (domain_id == KVM_IOMMU_DOMAIN_IDMAP_ID) + return -EINVAL; + + domain = handle_to_domain(domain_id); + + if (!domain || domain_get(domain)) + return -EINVAL; + + ret = kvm_iommu_ops->iotlb_sync_map(domain, iova, size); + domain_put(domain); + return ret; +} diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index afdd36e4ae8a..9fa23be0a8aa 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -398,11 +398,6 @@ static int relinquish_walker(const struct kvm_pgtable_visit_ctx *ctx, phys = kvm_pte_to_phys(pte); phys += ctx->addr - addr; - if (state == PKVM_PAGE_OWNED) { - hyp_poison_page(phys, PAGE_SIZE); - psci_mem_protect_dec(1); - } - data->pa = phys; return 0; @@ -441,7 +436,16 @@ int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu, if (ret) goto end; + if (pkvm_hyp_vcpu_is_protected(vcpu)) { + hyp_poison_page(data.pa, PAGE_SIZE); + psci_mem_protect_dec(1); + } + WARN_ON(host_stage2_set_owner_locked(data.pa, PAGE_SIZE, PKVM_ID_HOST)); + + if (pkvm_ipa_range_has_pvmfw(vm, ipa, ipa + PAGE_SIZE)) + vm->kvm.arch.pkvm.pvmfw_load_addr = PVMFW_INVALID_LOAD_ADDR; + end: guest_unlock_component(vm); host_unlock_component(); @@ -1049,7 +1053,10 @@ static int ___host_check_page_state_range(u64 addr, u64 size, .desired = state, .get_page_state = host_get_mmio_page_state, }; - u64 end = addr + size; + u64 end; + + if (check_add_overflow(addr, size, &end)) + return -EINVAL; hyp_assert_lock_held(&host_mmu.lock); @@ -1079,7 +1086,10 @@ static int __host_check_page_state_range(u64 addr, u64 size, { struct memblock_region *reg; struct kvm_mem_range range; - u64 end = addr + size; + u64 end; + + if (check_add_overflow(addr, size, &end)) + return -EINVAL; /* Can't check the state of both MMIO and memory regions at once */ reg = find_mem_range(addr, &range); @@ -1390,6 +1400,10 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr, .desired = state, .get_page_state = guest_get_page_state, }; + u64 end; + + if (check_add_overflow(addr, size, &end)) + return -EINVAL; hyp_assert_lock_held(&vm->pgtable_lock); return check_page_state_range(&vm->pgt, addr, size, &d); @@ -1738,6 +1752,11 @@ static int __do_share(struct pkvm_mem_transition *tx, break; case PKVM_ID_HYP: ret = hyp_complete_share(checked_tx, tx->completer.prot); + if (ret == -ENOMEM) { + WARN_ON(tx->initiator.id != PKVM_ID_HOST); + WARN_ON(host_initiate_unshare(checked_tx)); + return ret; + } break; case PKVM_ID_FFA: /* @@ -1753,7 +1772,7 @@ static int __do_share(struct pkvm_mem_transition *tx, ret = -EINVAL; } - return ret; + return WARN_ON(ret); } /* @@ -1779,7 +1798,7 @@ static int do_share(struct pkvm_mem_transition *share, return ret; ret = __do_share(share, &checked_tx); - if (WARN_ON(ret)) + if (ret) return ret; *nr_shared = checked_tx.nr_pages; @@ -1962,6 +1981,16 @@ static int __do_donate(struct pkvm_mem_transition *tx) break; case PKVM_ID_HYP: ret = hyp_complete_donation(completer_addr, tx); + if (ret == -ENOMEM) { + struct pkvm_mem_transition abort = { + .nr_pages = tx->nr_pages, + .completer.id = PKVM_ID_HOST, + }; + + WARN_ON(tx->initiator.id != PKVM_ID_HOST); + WARN_ON(host_complete_donation(completer_addr, &abort)); + return -ENOMEM; + } break; case PKVM_ID_GUEST: ret = guest_complete_donation(completer_addr, tx); @@ -1970,7 +1999,7 @@ static int __do_donate(struct pkvm_mem_transition *tx) ret = -EINVAL; } - return ret; + return WARN_ON(ret); } /* @@ -1990,7 +2019,7 @@ static int do_donate(struct pkvm_mem_transition *donation) if (ret) return ret; - return WARN_ON(__do_donate(donation)); + return __do_donate(donation); } int __pkvm_host_share_hyp(u64 pfn) @@ -2592,10 +2621,14 @@ static int guest_get_valid_pte(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa, size_t size = PAGE_SIZE << order; u64 phys = hyp_pfn_to_phys(pfn); u32 level; + u64 end; if (order && size != PMD_SIZE) return -EINVAL; + if (check_add_overflow(phys, size, &end)) + return -EINVAL; + WARN_ON(kvm_pgtable_get_leaf(&vm->pgt, ipa, pte, &level)); if (kvm_granule_size(level) != size) diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index e78f42a6e39e..8f44bccfd0c1 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -113,7 +113,13 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size, int __hyp_allocator_map(unsigned long va, phys_addr_t phys) { - return __pkvm_create_mappings(va, PAGE_SIZE, phys, PAGE_HYP); + int ret = __pkvm_create_mappings(va, PAGE_SIZE, phys, PAGE_HYP); + + /* Let's not confuse the hyp_alloc callers who will try to top-up pointlessly on -ENOMEM */ + if (ret == -ENOMEM) + ret = -EBUSY; + + return ret; } #ifdef CONFIG_NVHE_EL2_DEBUG @@ -541,6 +547,7 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr) return ret; } +/* Note: The caller has to use a local copy of the arg */ void *admit_host_page(void *arg, unsigned long order) { phys_addr_t p; @@ -599,12 +606,19 @@ int refill_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *host_mc) { unsigned long order; void *p; + struct kvm_hyp_memcache tmp = *host_mc; + u64 nr_pages; - while (host_mc->nr_pages) { - order = FIELD_GET(~PAGE_MASK, host_mc->head); - p = admit_host_page(host_mc, order); + while (tmp.nr_pages) { + order = FIELD_GET(~PAGE_MASK, tmp.head); + if (check_shl_overflow(1UL, order, &nr_pages)) + return -EINVAL; + + p = admit_host_page(&tmp, order); if (!p) return -EINVAL; + *host_mc = tmp; + hyp_virt_to_page(p)->order = order; hyp_set_page_refcounted(hyp_virt_to_page(p)); hyp_put_page(pool, p); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index feaad44fe204..345fd0f0c485 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -662,11 +662,18 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, { int ret = 0; u32 mp_state; + struct kvm_hyp_req *hyp_reqs; if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1)) return -EBUSY; - hyp_vcpu->vcpu.arch.hyp_reqs = kern_hyp_va(host_vcpu->arch.hyp_reqs); + hyp_reqs = READ_ONCE(host_vcpu->arch.hyp_reqs); + if (!PAGE_ALIGNED(hyp_reqs)) { + hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1); + return -EINVAL; + } + + hyp_vcpu->vcpu.arch.hyp_reqs = kern_hyp_va(hyp_reqs); if (hyp_pin_shared_mem(hyp_vcpu->vcpu.arch.hyp_reqs, hyp_vcpu->vcpu.arch.hyp_reqs + 1)) { hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1); diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 140c170b4f09..83ec0ca4754a 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -29,6 +29,8 @@ phys_addr_t pvmfw_size; #define hyp_percpu_size ((unsigned long)__per_cpu_end - \ (unsigned long)__per_cpu_start) +u64 hyp_lm_size_mb; + static void *vmemmap_base; static void *vm_table_base; static void *hyp_pgt_base; diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index 1bd41b08d968..bf530b3c7920 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -528,7 +528,7 @@ static const struct sys_reg_desc_reset pvm_sys_reg_reset_vals[] = { RESET_VAL(CPACR_EL1, 0), RESET_VAL(ZCR_EL1, 0), RESET_VAL(TCR_EL1, 0), - RESET_VAL(VBAR_EL1, 0), + RESET_VAL(VBAR_EL1, 0x1de7ec7edbadc000ULL), RESET_VAL(CONTEXTIDR_EL1, 0), RESET_FUNC(AMAIR_EL1, reset_amair_el1), RESET_VAL(CNTKCTL_EL1, 0), diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c index 140e9a17a5ce..20460aa89f99 100644 --- a/arch/arm64/kvm/hyp/nvhe/trace.c +++ b/arch/arm64/kvm/hyp/nvhe/trace.c @@ -472,7 +472,7 @@ int __pkvm_load_tracing(unsigned long desc_hva, size_t desc_size) struct hyp_trace_desc *desc = (struct hyp_trace_desc *)kern_hyp_va(desc_hva); struct trace_page_desc *trace_pdesc = &desc->page_desc; struct rb_page_desc *pdesc; - int ret, cpu; + int ret, pdesc_cpu; if (!desc_size || !PAGE_ALIGNED(desc_hva) || !PAGE_ALIGNED(desc_size)) return -EINVAL; @@ -486,9 +486,9 @@ int __pkvm_load_tracing(unsigned long desc_hva, size_t desc_size) trace_clock_update(&desc->clock_data); - for_each_rb_page_desc(pdesc, cpu, trace_pdesc) { + for_each_rb_page_desc(pdesc, pdesc_cpu, trace_pdesc) { struct hyp_rb_per_cpu *cpu_buffer; - int cpu; + unsigned int cpu; ret = -EINVAL; if (!rb_cpu_fits_desc(pdesc, desc_hva + desc_size)) diff --git a/arch/arm64/kvm/hyp_events.c b/arch/arm64/kvm/hyp_events.c index 086931bec32c..66ae6c6a469f 100644 --- a/arch/arm64/kvm/hyp_events.c +++ b/arch/arm64/kvm/hyp_events.c @@ -269,30 +269,36 @@ static struct hyp_event_mod_tables { struct hyp_event *hyp_trace_find_event(int id) { - struct hyp_event *event = __hyp_events_start + id; + struct hyp_event *event; + struct hyp_event_table *table; + int i, j; - if ((unsigned long)event >= (unsigned long)__hyp_events_end) { - struct hyp_event_table *table; - - event = NULL; - id -= nr_events(__hyp_events_start, __hyp_events_end); - - rcu_read_lock(); - table = rcu_dereference(mod_event_tables.tables); + for (event = __hyp_events_start; event < __hyp_events_end; event++) { + if (event->id == id) + return event; + if (event->id > id) + return NULL; + } - for (int i = 0; i < mod_event_tables.nr_tables; i++) { - if (table->nr_events <= id) { - id -= table->nr_events; - table++; - continue; + event = NULL; + rcu_read_lock(); + table = rcu_dereference(mod_event_tables.tables); + for (i = 0; i < mod_event_tables.nr_tables; i++, table++) { + event = table->start; + for (j = 0; j < table->nr_events; j++, event++) { + if (event->id == id) + goto end; + if (event->id > id) { + event = NULL; + goto end; } - - event = table->start + id; - break; } - rcu_read_unlock(); + event = NULL; } +end: + rcu_read_unlock(); + return event; } diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 61b524129366..addae8d0891c 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -174,6 +174,12 @@ static int __init register_moveable_regions(void) return 0; } +static int __init early_hyp_lm_size_mb_cfg(char *arg) +{ + return kstrtoull(arg, 10, &kvm_nvhe_sym(hyp_lm_size_mb)); +} +early_param("kvm-arm.hyp_lm_size_mb", early_hyp_lm_size_mb_cfg); + void __init kvm_hyp_reserve(void) { u64 hyp_mem_pages = 0; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7c85aa9aeeb3..cdd910dfef8b 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -425,18 +425,15 @@ void __init arm64_memblock_init(void) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; - u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); - int parange = cpuid_feature_extract_unsigned_field( - mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); - s64 range = linear_region_size - - BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); + u64 range = linear_region_size - + (memblock_end_of_DRAM() - memblock_start_of_DRAM()); /* * If the size of the linear region exceeds, by a sufficient - * margin, the size of the region that the physical memory can - * span, randomize the linear region as well. + * margin, the size of the region that the available physical + * memory spans, randomize the linear region as well. */ - if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) { + if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { range /= ARM64_MEMSTART_ALIGN; memstart_addr -= ARM64_MEMSTART_ALIGN * ((range * memstart_offset_seed) >> 16); @@ -466,6 +463,7 @@ void __init bootmem_init(void) min = PFN_UP(memblock_start_of_DRAM()); max = PFN_DOWN(memblock_end_of_DRAM()); + early_memzero(min << PAGE_SHIFT, max << PAGE_SHIFT); early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); max_pfn = max_low_pfn = max; diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 6fe64231eb62..1ff15d4e5dc7 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -52,6 +52,7 @@ CONFIG_EXPERT=y # CONFIG_PCSPKR_PLATFORM is not set CONFIG_KALLSYMS_ALL=y # CONFIG_RSEQ is not set +# CONFIG_CACHESTAT_SYSCALL is not set CONFIG_PROFILING=y CONFIG_SMP=y CONFIG_X86_X2APIC=y @@ -63,9 +64,9 @@ CONFIG_NR_CPUS=32 # CONFIG_X86_5LEVEL is not set # CONFIG_MTRR_SANITIZER is not set CONFIG_EFI=y +CONFIG_EFI_STUB=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure bootconfig" -CONFIG_HIBERNATION=y CONFIG_PM_USERSPACE_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 @@ -88,9 +89,11 @@ CONFIG_KVM_AMD=y CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_CFI_CLANG=y +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y CONFIG_MODULE_SCMVERSION=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_PROTECT=y @@ -104,15 +107,13 @@ CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_GKI_HACKS_TO_FIX=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=y # CONFIG_SLAB_MERGE_DEFAULT is not set CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_RANDOM_KMALLOC_CACHES=y CONFIG_SHUFFLE_PAGE_ALLOCATOR=y # CONFIG_COMPAT_BRK is not set -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y CONFIG_CLEANCACHE=y @@ -143,6 +144,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_NET_IPIP=y CONFIG_NET_IPGRE_DEMUX=y CONFIG_NET_IPGRE=y +CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=y CONFIG_INET_ESP=y CONFIG_INET_UDP_DIAG=y @@ -306,6 +308,8 @@ CONFIG_PCI_ENDPOINT=y CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_FW_CACHE is not set CONFIG_REGMAP_KUNIT=m +CONFIG_RESET_ATTACK_MITIGATION=y +CONFIG_EFI_DISABLE_PCI_DMA=y CONFIG_GNSS=y CONFIG_OF=y CONFIG_ZRAM=m @@ -388,6 +392,8 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_UINPUT=y # CONFIG_VT is not set # CONFIG_LEGACY_PTYS is not set +# CONFIG_LEGACY_TIOCSTI is not set +# CONFIG_LDISC_AUTOLOAD is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y @@ -656,7 +662,9 @@ CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y +CONFIG_INIT_ON_FREE_DEFAULT_ON=y CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_RANDSTRUCT_FULL=y CONFIG_CRYPTO_ECDH=y CONFIG_CRYPTO_DES=y CONFIG_CRYPTO_ADIANTUM=y @@ -691,6 +699,8 @@ CONFIG_MODULE_ALLOW_BTF_MISMATCH=y CONFIG_HEADERS_INSTALL=y # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 +# CONFIG_MAGIC_SYSRQ_SERIAL is not set CONFIG_UBSAN=y CONFIG_UBSAN_TRAP=y # CONFIG_UBSAN_SHIFT is not set @@ -707,6 +717,7 @@ CONFIG_PANIC_TIMEOUT=-1 CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_SG=y CONFIG_HIST_TRIGGERS=y CONFIG_UNWINDER_FRAME_POINTER=y CONFIG_KUNIT=m diff --git a/arch/x86/configs/microdroid_defconfig b/arch/x86/configs/microdroid_defconfig index 603199122fa2..96b760e96a49 100644 --- a/arch/x86/configs/microdroid_defconfig +++ b/arch/x86/configs/microdroid_defconfig @@ -20,6 +20,9 @@ CONFIG_MEMCG=y # CONFIG_RD_LZO is not set CONFIG_BOOT_CONFIG=y CONFIG_EXPERT=y +# CONFIG_IO_URING is not set +# CONFIG_RSEQ is not set +# CONFIG_CACHESTAT_SYSCALL is not set CONFIG_PROFILING=y CONFIG_KEXEC_FILE=y CONFIG_SMP=y @@ -32,29 +35,29 @@ CONFIG_NR_CPUS=32 CONFIG_EFI=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure ioremap_guard panic=-1 bootconfig acpi=noirq" -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set +# CONFIG_SUSPEND is not set CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_TIMES=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_JUMP_LABEL=y +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y +CONFIG_MICRODROID=y # CONFIG_BLOCK_LEGACY_AUTOLOAD is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_MSDOS_PARTITION is not set # CONFIG_MQ_IOSCHED_DEADLINE is not set # CONFIG_MQ_IOSCHED_KYBER is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=y # CONFIG_SLAB_MERGE_DEFAULT is not set CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_RANDOM_KMALLOC_CACHES=y CONFIG_SHUFFLE_PAGE_ALLOCATOR=y # CONFIG_COMPAT_BRK is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y +# CONFIG_ZONE_DMA is not set CONFIG_ANON_VMA_NAME=y CONFIG_USERFAULTFD=y CONFIG_LRU_GEN=y @@ -64,9 +67,18 @@ CONFIG_DAMON_RECLAIM=y CONFIG_NET=y CONFIG_UNIX=y CONFIG_INET=y +CONFIG_SYN_COOKIES=y +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_CUBIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +# CONFIG_IPV6 is not set CONFIG_VSOCKETS=y CONFIG_VIRTIO_VSOCKETS=y # CONFIG_WIRELESS is not set +# CONFIG_ETHTOOL_NETLINK is not set CONFIG_PCI=y CONFIG_PCIEPORTBUS=y CONFIG_PCIEAER=y @@ -74,24 +86,25 @@ CONFIG_PCI_MSI=y CONFIG_PCI_IOV=y # CONFIG_VGA_ARB is not set CONFIG_PCI_ENDPOINT=y -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_CACHE is not set +# CONFIG_FW_LOADER is not set CONFIG_OF=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_VIRTIO_BLK=y CONFIG_MD=y +# CONFIG_MD_BITMAP_FILE is not set CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y +# CONFIG_DM_USER is not set # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_LEGACY_PTYS is not set +# CONFIG_LEGACY_TIOCSTI is not set +# CONFIG_LDISC_AUTOLOAD is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y @@ -122,11 +135,12 @@ CONFIG_MFD_SYSCON=y # CONFIG_USB_SUPPORT is not set CONFIG_EDAC=y CONFIG_RTC_CLASS=y -CONFIG_DMABUF_HEAPS=y -CONFIG_DMABUF_SYSFS_STATS=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_STAGING=y +# CONFIG_IOMMU_SUPPORT is not set +# CONFIG_ANDROID_KABI_RESERVE is not set +# CONFIG_ANDROID_VENDOR_OEM_DATA is not set CONFIG_LIBNVDIMM=y CONFIG_EXT4_FS=y # CONFIG_EXT4_USE_FOR_EXT2 is not set @@ -193,13 +207,17 @@ CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SECURITY_NETWORK=y CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y +CONFIG_INIT_ON_FREE_DEFAULT_ON=y CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_RANDSTRUCT_FULL=y CONFIG_CRYPTO_HCTR2=y CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_LZ4=y CONFIG_CRYPTO_AES_NI_INTEL=y CONFIG_CRYPTO_POLYVAL_CLMUL_NI=y CONFIG_CRYPTO_SHA1_SSSE3=y @@ -212,6 +230,8 @@ CONFIG_DEBUG_INFO_REDUCED=y CONFIG_HEADERS_INSTALL=y # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 +# CONFIG_MAGIC_SYSRQ_SERIAL is not set CONFIG_UBSAN=y CONFIG_UBSAN_TRAP=y # CONFIG_UBSAN_SHIFT is not set @@ -222,10 +242,12 @@ CONFIG_DEBUG_STACK_USAGE=y CONFIG_KFENCE=y CONFIG_KFENCE_SAMPLE_INTERVAL=500 CONFIG_KFENCE_NUM_OBJECTS=63 +CONFIG_KFENCE_STATIC_KEYS=y CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_TIMEOUT=-1 CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_WQ_WATCHDOG=y CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_SG=y CONFIG_HIST_TRIGGERS=y CONFIG_UNWINDER_FRAME_POINTER=y diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 0e4434373f6d..ea054ee8a15a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -817,6 +817,7 @@ void __init init_mem_mapping(void) x86_init.hyper.init_mem_mapping(); + early_memzero(0, max_pfn_mapped << PAGE_SHIFT); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } diff --git a/build_virt.sh b/build_virt.sh new file mode 100755 index 000000000000..cb041d6e2934 --- /dev/null +++ b/build_virt.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -e + +CLANG_VERSION="r510928" + +[[ "${ARCH}" =~ aarch64|x86_64 ]] || (echo "unknown or undefined ARCH" && exit 1) + +cert_pem=$(mktemp) +cert_x509=$(mktemp) +sign_file=$(mktemp) + +trap '{ rm -f -- "$sign_file" "$cert_pem" "$cert_x509"; }' EXIT + +# clean out/ to avoid confusion with signing keys +test -d out/ && rm -rf out/ + +function find_in_out(){ + find out/bazel/output_user_root/*/execroot -type f -name $1 +} + +tools/bazel run --lto=full "$@" //common:kernel_${ARCH}_dist -- --dist_dir=common_dist + +cp $(find_in_out "signing_key.pem") ${cert_pem} +cp $(find_in_out "signing_key.x509") ${cert_x509} + +tools/bazel run --lto=full "$@" //common-modules/virtual-device:virtual_device_${ARCH}_dist -- --dist_dir=virt_dist + +prebuilts/clang/host/linux-x86/clang-${CLANG_VERSION}/bin/clang common/scripts/sign-file.c -lssl -lcrypto -o ${sign_file} +find virt_dist/ -type f -name "*.ko" \ + -exec ${sign_file} sha256 ${cert_pem} ${cert_x509} {} \; + +mapfile -t common_sig < <(modinfo common_dist/*.ko | grep "sig_key" | awk '{print $NF}') +mapfile -t virt_sig < <(modinfo virt_dist/*.ko | grep "sig_key" | awk '{print $NF}') +if [[ "$ARCH" == "x86_64" ]]; then [[ $common_sig == $virt_sig ]] && echo "Signature verification success" || echo "Signature verification failure"; fi diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 00ffd7ed2ffc..a3eb6a548023 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -682,3 +682,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_folio_remove_rmap_ptes); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pageset_update); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_xhci_full_reset_on_remove); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mempool_alloc_skip_wait); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_ac); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_rmqueue); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_suitable_zone); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_zone_max_order); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_zone_pageset); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_lru_add_dst); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_isolate_priv_lru); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_pgdat_balanced); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_file_is_tiny); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 0b997d397f0b..bdef5d8a8a09 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -574,6 +574,13 @@ static void wakeup_source_activate(struct wakeup_source *ws) /* Increment the counter of events in progress. */ cec = atomic_inc_return(&combined_event_count); + /* + * wakeup_source_activate() aborts suspend only if events_check_enabled + * is set (see pm_wakeup_pending()). Similarly, abort suspend during + * fs_sync only if events_check_enabled is set. + */ + if (events_check_enabled) + suspend_abort_fs_sync(); trace_wakeup_source_activate(ws->name, cec); } @@ -939,6 +946,7 @@ EXPORT_SYMBOL_GPL(pm_wakeup_pending); void pm_system_wakeup(void) { atomic_inc(&pm_abort_suspend); + suspend_abort_fs_sync(); s2idle_wake(); } EXPORT_SYMBOL_GPL(pm_system_wakeup); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 0b02ced1eb33..cb1344c646ac 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -31,10 +31,15 @@ #include #include +#ifndef __GENKSYMS__ +#include +#endif #include #include "dma-buf-sysfs-stats.h" +DEFINE_STATIC_KEY_TRUE(dmabuf_accounting_key); + struct dma_buf_list { struct list_head head; struct mutex lock; @@ -115,6 +120,9 @@ static void dma_buf_release(struct dentry *dentry) if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) dma_resv_fini(dmabuf->resv); + if (atomic64_read(&dmabuf->nr_task_refs)) + pr_alert("destroying dmabuf with non-zero task refs\n"); + WARN_ON(!list_empty(&dmabuf->attachments)); module_put(dmabuf->owner); kfree(dmabuf->name); @@ -162,9 +170,405 @@ static struct file_system_type dma_buf_fs_type = { .kill_sb = kill_anon_super, }; +struct task_dma_buf_record_preload { + local_lock_t lock; + size_t size; + struct list_head list; +}; + +static DEFINE_PER_CPU(struct task_dma_buf_record_preload, dmabuf_rec_reloads); + +static struct kmem_cache *task_dmabuf_record_cachep; + +static void __init init_task_dmabuf_record_pool(void) +{ + int cpu; + + task_dmabuf_record_cachep = kmem_cache_create("task_dmabuf_record", + sizeof(struct task_dma_buf_record), 0, + SLAB_PANIC | SLAB_ACCOUNT, NULL); + + for_each_possible_cpu(cpu) { + struct task_dma_buf_record_preload *preload; + + preload = &per_cpu(dmabuf_rec_reloads, cpu); + local_lock_init(&preload->lock); + INIT_LIST_HEAD(&preload->list); + preload->size = 0; + } +} + +/* + * Load up this CPU's task_dma_buf_record_preload list with requested number of + * records. On success, returns true, with preemption disabled. On error, + * return false with preemption not disabled. + */ +static bool task_dmabuf_records_preload(size_t count) +{ + struct task_dma_buf_record_preload *preload; + + local_lock(&dmabuf_rec_reloads.lock); + preload = this_cpu_ptr(&dmabuf_rec_reloads); + while (preload->size < count) { + struct task_dma_buf_record *rec; + + local_unlock(&dmabuf_rec_reloads.lock); + rec = kmem_cache_alloc(task_dmabuf_record_cachep, GFP_KERNEL); + if (!rec) + return false; + + local_lock(&dmabuf_rec_reloads.lock); + preload = this_cpu_ptr(&dmabuf_rec_reloads); + if (preload->size < count) { + list_add(&rec->node, &preload->list); + preload->size++; + } else { + kmem_cache_free(task_dmabuf_record_cachep, rec); + } + } + + return true; +} + +static inline void task_dmabuf_records_preload_end(void) +{ + local_unlock(&dmabuf_rec_reloads.lock); +} + +static struct task_dma_buf_record *alloc_task_dmabuf_record(void) +{ + struct task_dma_buf_record_preload *preload; + struct task_dma_buf_record *rec = NULL; + + lockdep_assert_held(this_cpu_ptr(&dmabuf_rec_reloads.lock)); + preload = this_cpu_ptr(&dmabuf_rec_reloads); + if (preload->size > 0) { + rec = list_first_entry(&preload->list, typeof(*rec), node); + list_del(&rec->node); + preload->size--; + } + + return rec; +} + +#define MAX_PCP_POOL_SIZE 32 + +static void free_task_dmabuf_record(struct task_dma_buf_record *rec) +{ + struct task_dma_buf_record_preload *preload; + + local_lock(&dmabuf_rec_reloads.lock); + preload = this_cpu_ptr(&dmabuf_rec_reloads); + if (preload->size < MAX_PCP_POOL_SIZE) { + list_add(&rec->node, &preload->list); + preload->size++; + } else { + kmem_cache_free(task_dmabuf_record_cachep, rec); + } + local_unlock(&dmabuf_rec_reloads.lock); +} + +static void trim_task_dmabuf_records_locked(void) +{ + struct task_dma_buf_record_preload *preload; + + lockdep_assert_held(this_cpu_ptr(&dmabuf_rec_reloads.lock)); + preload = this_cpu_ptr(&dmabuf_rec_reloads); + while (preload->size > MAX_PCP_POOL_SIZE) { + struct task_dma_buf_record *rec; + + rec = list_first_entry(&preload->list, typeof(*rec), node); + list_del(&rec->node); + preload->size--; + kmem_cache_free(task_dmabuf_record_cachep, rec); + } +} + +static void trim_task_dmabuf_records(void) +{ + local_lock(&dmabuf_rec_reloads.lock); + trim_task_dmabuf_records_locked(); + local_unlock(&dmabuf_rec_reloads.lock); +} + +static struct task_dma_buf_record *find_task_dmabuf_record( + struct task_dma_buf_info *dmabuf_info, struct dma_buf *dmabuf) +{ + struct task_dma_buf_record *rec; + + lockdep_assert_held(&dmabuf_info->lock); + + list_for_each_entry(rec, &dmabuf_info->dmabufs, node) + if (dmabuf == rec->dmabuf) + return rec; + + return NULL; +} + +static void add_task_dmabuf_record(struct task_dma_buf_info *dmabuf_info, + struct dma_buf *dmabuf, + struct task_dma_buf_record *rec) +{ + lockdep_assert_held(&dmabuf_info->lock); + + rec->dmabuf = dmabuf; + rec->refcnt = 1; + list_add(&rec->node, &dmabuf_info->dmabufs); + dmabuf_info->dmabuf_count++; + dmabuf_info->rss += dmabuf->size; + if (dmabuf_info->rss > dmabuf_info->rss_hwm) + dmabuf_info->rss_hwm = dmabuf_info->rss; + trace_dmabuf_rss_stat(dmabuf_info->rss, dmabuf->size, dmabuf); + atomic64_inc(&dmabuf->nr_task_refs); +} + +/** + * dma_buf_account_task - Account a dmabuf to a task + * @dmabuf: [in] pointer to dma_buf + * @task: [in] pointer to task_struct + * + * When a process obtains a dmabuf file descriptor, or maps a dmabuf, this + * function attributes the provided @dmabuf to the @task. The first time @dmabuf + * is attributed to @task, the buffer's size is added to the @task's dmabuf RSS. + * + * Return: + * * 0 on success + * * A negative error code upon error + */ +int dma_buf_account_task(struct dma_buf *dmabuf, struct task_struct *task) +{ + struct task_dma_buf_info *dmabuf_info = task->dmabuf_info; + struct task_dma_buf_record *rec; + + if (!static_key_enabled(&dmabuf_accounting_key)) + return 0; + + if (!dmabuf_info) + return 0; + + if (!task_dmabuf_records_preload(1)) + return -ENOMEM; + + spin_lock(&dmabuf_info->lock); + rec = find_task_dmabuf_record(dmabuf_info, dmabuf); + if (rec) { + ++rec->refcnt; + trim_task_dmabuf_records_locked(); + } else { + rec = alloc_task_dmabuf_record(); + WARN_ON(!rec); + add_task_dmabuf_record(dmabuf_info, dmabuf, rec); + } + spin_unlock(&dmabuf_info->lock); + task_dmabuf_records_preload_end(); + + return 0; +} + +/** + * dma_buf_unaccount_task - Unaccount a dmabuf from a task + * @dmabuf: [in] pointer to dma_buf + * @task: [in] pointer to task_struct + * + * When a process closes a dmabuf file descriptor, or unmaps a dmabuf, this + * function removes the provided @dmabuf attribution from the @task. When all + * references to @dmabuf are removed from @task, the buffer's size is removed + * from the task's dmabuf RSS. + */ +void dma_buf_unaccount_task(struct dma_buf *dmabuf, struct task_struct *task) +{ + struct task_dma_buf_info *dmabuf_info = task->dmabuf_info; + struct task_dma_buf_record *rec; + + if (!static_key_enabled(&dmabuf_accounting_key)) + return; + + if (!dmabuf_info) + return; + + spin_lock(&dmabuf_info->lock); + rec = find_task_dmabuf_record(dmabuf_info, dmabuf); + if (rec) { + if (--rec->refcnt == 0) { + list_del(&rec->node); + dmabuf_info->dmabuf_count--; + dmabuf_info->rss -= dmabuf->size; + trace_dmabuf_rss_stat(dmabuf_info->rss, -dmabuf->size, dmabuf); + atomic64_dec(&dmabuf->nr_task_refs); + } else { + rec = NULL; + } + } else { + pr_err("Could not find dmabuf %lu in unaccount for task %d\n", + file_inode(dmabuf->file)->i_ino, task_pid_nr(task)); + } + spin_unlock(&dmabuf_info->lock); + if (rec) + free_task_dmabuf_record(rec); +} + +static struct task_dma_buf_info *alloc_task_dma_buf_info(void) +{ + struct task_dma_buf_info *dmabuf_info; + + dmabuf_info = kzalloc(sizeof(*dmabuf_info), GFP_KERNEL); + if (!dmabuf_info) + return NULL; + + refcount_set(&dmabuf_info->refcnt, 1); + spin_lock_init(&dmabuf_info->lock); + INIT_LIST_HEAD(&dmabuf_info->dmabufs); + + return dmabuf_info; +} + +static struct task_dma_buf_info *dup_dma_buf_info(struct task_dma_buf_info *from) +{ + struct task_dma_buf_info *to; + struct task_dma_buf_record *from_rec, *to_rec; + unsigned int count; + int retries = 0; + + /* Allocate now before locked section below. */ + to = alloc_task_dma_buf_info(); + if (!to) + return NULL; + + /* Read required count racily, before obtaining dmabuf_info->lock */ + count = READ_ONCE(from->dmabuf_count); + if (!task_dmabuf_records_preload(count)) + goto err_list_copy; + +retry: + spin_lock(&from->lock); + if (from->dmabuf_count > count) { + /* We don't have enough reserved records, allocate more */ + count = from->dmabuf_count; + + spin_unlock(&from->lock); + task_dmabuf_records_preload_end(); + if (!task_dmabuf_records_preload(count)) + goto err_list_copy; + + /* Limit the number of retries to avoid live-lock */ + if (retries++ > 5) { + task_dmabuf_records_preload_end(); + goto err_list_copy; + } + + goto retry; + } + + /* All required records are reserved */ + list_for_each_entry(from_rec, &from->dmabufs, node) { + to_rec = alloc_task_dmabuf_record(); + WARN_ON(!to_rec); + to_rec->dmabuf = from_rec->dmabuf; + to_rec->refcnt = from_rec->refcnt; + list_add(&to_rec->node, &to->dmabufs); + atomic64_inc(&to_rec->dmabuf->nr_task_refs); + } + to->dmabuf_count = from->dmabuf_count; + to->rss = from->rss; + to->rss_hwm = to->rss; + spin_unlock(&from->lock); + + trim_task_dmabuf_records_locked(); + task_dmabuf_records_preload_end(); + + return to; + +err_list_copy: + trim_task_dmabuf_records(); + kfree(to); + + return NULL; +} + +int copy_dmabuf_info(u64 clone_flags, struct task_struct *task) +{ + struct task_dma_buf_info *parent_dmabuf_info = current->dmabuf_info; + struct task_dma_buf_info *child_dmabuf_info; + bool share_vm = clone_flags & CLONE_VM; + bool share_fs = clone_flags & CLONE_FILES; + + if (!static_key_enabled(&dmabuf_accounting_key)) + return 0; + + /* kthreads are not supported */ + if (task->flags & PF_KTHREAD) { + task->dmabuf_info = NULL; + return 0; + } + + /* + * Non-kthread direct descendants of pid 0 are roots of their own task_dma_buf_info trees, + * even if they want to partially share with pid 0. Init does this. We assume no dmabuf + * sharing will actually occur through pid 0. + */ + if (unlikely(!task_pid_nr(current))) { + task->dmabuf_info = alloc_task_dma_buf_info(); + if (!task->dmabuf_info) + return -ENOMEM; + + return 0; + } + + /* + * Partial sharing is not supported. + * Children of such tasks are also not supported. + */ + if (share_vm != share_fs || !parent_dmabuf_info) { + task->dmabuf_info = NULL; + return 0; + } + + /* + * Full sharing: Both MM and FD references to dmabufs are shared with + * the parent, so they can both share the same dmabuf_info. + */ + if (share_vm && share_fs) { + refcount_inc(&parent_dmabuf_info->refcnt); + task->dmabuf_info = parent_dmabuf_info; + return 0; + } + + /* + * No sharing: Both MM and FD references to dmabufs are duplicated in the child. We + * duplicate the dmabuf accounting info into the child as well here. + */ + child_dmabuf_info = dup_dma_buf_info(parent_dmabuf_info); + if (!child_dmabuf_info) + return -ENOMEM; + + task->dmabuf_info = child_dmabuf_info; + + return 0; +} + +void put_dmabuf_info(struct task_struct *task) +{ + if (!task->dmabuf_info) + return; + + if (!refcount_dec_and_test(&task->dmabuf_info->refcnt)) + return; + + if (task->dmabuf_info->rss) + pr_alert("destroying task with non-zero dmabuf rss %lu\n", task->dmabuf_info->rss); + + if (!list_empty(&task->dmabuf_info->dmabufs) || task->dmabuf_info->dmabuf_count > 0) + pr_alert("destroying task with non-empty dmabuf list %zu %u\n", + list_count_nodes(&task->dmabuf_info->dmabufs), + task->dmabuf_info->dmabuf_count); + + kfree(task->dmabuf_info); +} + static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; + int ret; if (!is_dma_buf_file(file)) return -EINVAL; @@ -180,7 +584,15 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) dmabuf->size >> PAGE_SHIFT) return -EINVAL; - return dmabuf->ops->mmap(dmabuf, vma); + ret = dmabuf->ops->mmap(dmabuf, vma); + if (!ret) { + int err = dma_buf_account_task(dmabuf, current); + + if (err) + pr_err("dmabuf accounting failed during mmap operation, err %d\n", err); + } + + return ret; } static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) @@ -557,6 +969,13 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) spin_unlock(&dmabuf->name_lock); } +static int dma_buf_flush(struct file *file, fl_owner_t id) +{ + /* When dmabuf FD is closed we should unaccount it */ + dma_buf_unaccount_task(file->private_data, current); + return 0; +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_file_release, .mmap = dma_buf_mmap_internal, @@ -565,6 +984,7 @@ static const struct file_operations dma_buf_fops = { .unlocked_ioctl = dma_buf_ioctl, .compat_ioctl = compat_ptr_ioctl, .show_fdinfo = dma_buf_show_fdinfo, + .flush = dma_buf_flush, }; /* @@ -716,6 +1136,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->resv = resv; } + atomic64_set(&dmabuf->nr_task_refs, 0); + file->private_data = dmabuf; file->f_path.dentry->d_fsdata = dmabuf; dmabuf->file = file; @@ -1555,6 +1977,8 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial); int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { + int ret; + if (WARN_ON(!dmabuf || !vma)) return -EINVAL; @@ -1575,7 +1999,15 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, vma_set_file(vma, dmabuf->file); vma->vm_pgoff = pgoff; - return dmabuf->ops->mmap(dmabuf, vma); + ret = dmabuf->ops->mmap(dmabuf, vma); + if (!ret) { + int err = dma_buf_account_task(dmabuf, current); + + if (err) + pr_err("dmabuf accounting failed during mmap operation, err %d\n", err); + } + + return ret; } EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); @@ -1817,6 +2249,24 @@ static inline void dma_buf_uninit_debugfs(void) } #endif +static int __init setup_early_dmabuf_accounting(char *str) +{ + bool enable; + + if (kstrtobool(str, &enable)) + return -EINVAL; + + if (enable != static_key_enabled(&dmabuf_accounting_key)) { + if (enable) + static_branch_enable(&dmabuf_accounting_key); + else + static_branch_disable(&dmabuf_accounting_key); + } + + return 0; +} +early_param("dmabuf_accounting", setup_early_dmabuf_accounting); + static int __init dma_buf_init(void) { int ret; @@ -1831,6 +2281,7 @@ static int __init dma_buf_init(void) mutex_init(&db_list.lock); INIT_LIST_HEAD(&db_list.head); + init_task_dmabuf_record_pool(); dma_buf_init_debugfs(); return 0; } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index dcf2e1ebf9c5..632f665f3e38 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -91,8 +91,7 @@ static bool sysrq_on_mask(int mask) static int __init sysrq_always_enabled_setup(char *str) { - sysrq_always_enabled = true; - pr_info("sysrq always enabled.\n"); + pr_info("sysrq always enabled ignored.\n"); return 1; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 7be18e45ba5a..867ca76e1ecd 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -5632,7 +5632,6 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; unsigned long flags; - u32 hwq_num, utag; int tag; for (tag = 0; tag < hba->nutrs; tag++) { @@ -5642,9 +5641,9 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, test_bit(SCMD_STATE_COMPLETE, &cmd->state)) continue; - utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); - hwq_num = blk_mq_unique_tag_to_hwq(utag); - hwq = &hba->uhq[hwq_num]; + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) + continue; if (force_compl) { ufshcd_mcq_compl_all_cqes_lock(hba, hwq); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 52baa0c3a0b1..978fe510e7b5 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -67,6 +67,8 @@ */ #define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */ +extern int deny_new_usb; + /* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ @@ -5388,6 +5390,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, goto done; return; } + + if (deny_new_usb) { + dev_err(&port_dev->dev, "denied insert of USB device on port %d\n", port1); + goto done; + } + if (hub_is_superspeed(hub->hdev)) unit_load = 150; else diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index f5731d465cd7..88713368942f 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1418,7 +1418,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) struct usb_composite_dev *cdev = c->cdev; struct f_ncm *ncm = func_to_ncm(f); struct usb_string *us; - int status = 0; + int status; struct usb_ep *ep; struct f_ncm_opts *ncm_opts; @@ -1436,17 +1436,22 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc; } - mutex_lock(&ncm_opts->lock); - gether_set_gadget(ncm_opts->net, cdev->gadget); - if (!ncm_opts->bound) + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to ncm_opts->bound access + */ + if (!ncm_opts->bound) { + mutex_lock(&ncm_opts->lock); + gether_set_gadget(ncm_opts->net, cdev->gadget); status = gether_register_netdev(ncm_opts->net); - mutex_unlock(&ncm_opts->lock); - - if (status) - goto fail; - - ncm_opts->bound = true; - + mutex_unlock(&ncm_opts->lock); + if (status) + goto fail; + ncm_opts->bound = true; + } us = usb_gstrings_attach(cdev, ncm_strings, ARRAY_SIZE(ncm_string_defs)); if (IS_ERR(us)) { diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 8f7786a91d88..257e45df2cc1 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -2916,8 +2916,15 @@ static struct config_group *uvcg_framebased_make(struct config_group *group, 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }; + struct uvcg_color_matching *color_match; + struct config_item *streaming; struct uvcg_framebased *h; + streaming = group->cg_item.ci_parent; + color_match = uvcg_format_get_default_color_match(streaming); + if (!color_match) + return ERR_PTR(-EINVAL); + h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return ERR_PTR(-ENOMEM); @@ -2936,6 +2943,9 @@ static struct config_group *uvcg_framebased_make(struct config_group *group, INIT_LIST_HEAD(&h->fmt.frames); h->fmt.type = UVCG_FRAMEBASED; + + h->fmt.color_matching = color_match; + color_match->refcnt++; config_group_init_type_name(&h->fmt.group, name, &uvcg_framebased_type); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index a4120a25428e..ce6fd7cd8936 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -699,6 +699,8 @@ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) } EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect); +extern int deny_new_usb; + static int usb_gadget_connect_locked(struct usb_gadget *gadget) __must_hold(&gadget->udc->connect_lock) { @@ -709,6 +711,12 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget) goto out; } + if (deny_new_usb) { + dev_err(&gadget->dev, "blocked USB gadget connection\n"); + ret = -EPERM; + goto out; + } + if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) { /* * If the gadget isn't usable (because it is deactivated, diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 480e5d3b7b7e..a9e187e3316a 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -361,6 +361,8 @@ struct tcpm_port { bool pd_supported; enum typec_port_type port_type; + bool ignore_alt_modes; + /* * Set to true when vbus is greater than VSAFE5V min. * Set to false when vbus falls below vSinkDisconnect max threshold. @@ -1939,7 +1941,11 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, port->vdm_state = VDM_STATE_DONE; } - if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) { + if (port->ignore_alt_modes) { + tcpm_log(port, "tcpm_handle_vdm_request: ignore_alt_modes is set"); + } + + if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo) && !port->ignore_alt_modes) { /* * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in * advance because we are dropping the lock but may send VDMs soon. @@ -6713,6 +6719,58 @@ static void tcpm_fw_get_pd_revision(struct tcpm_port *port, struct fwnode_handle port->pd_rev.ver_minor = val[3]; } +static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo, unsigned int nr_pdo) +{ + unsigned int i; + + if (nr_pdo > PDO_MAX_OBJECTS) + nr_pdo = PDO_MAX_OBJECTS; + + for (i = 0; i < nr_pdo; i++) + dest_pdo[i] = src_pdo[i]; + + return nr_pdo; +} + +#define DO_NOT_IGNORE_ALT_MODES (UINT_MAX - 1) +#define IGNORE_ALT_MODES (UINT_MAX) + +int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, unsigned int nr_pdo, + unsigned int operating_snk_mw) +{ + // reuse tcpm_update_sink_capabilities to avoid changing ABI + if (nr_pdo == DO_NOT_IGNORE_ALT_MODES || nr_pdo == IGNORE_ALT_MODES) { + port->ignore_alt_modes = nr_pdo == IGNORE_ALT_MODES; + return 0; + } + + if (tcpm_validate_caps(port, pdo, nr_pdo)) + return -EINVAL; + + mutex_lock(&port->lock); + port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo); + port->operating_snk_mw = operating_snk_mw; + port->update_sink_caps = true; + + switch (port->state) { + case SNK_NEGOTIATE_CAPABILITIES: + case SNK_NEGOTIATE_PPS_CAPABILITIES: + case SNK_READY: + case SNK_TRANSITION_SINK: + case SNK_TRANSITION_SINK_VBUS: + if (port->pps_data.active) + tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0); + else + tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0); + break; + default: + break; + } + mutex_unlock(&port->lock); + return 0; +} +EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); + /* Power Supply access to expose source power information */ enum tcpm_psy_online_states { TCPM_PSY_OFFLINE = 0, diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index d94a06008ff6..9d6c6cdac050 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -114,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, struct sk_buff *skb; unsigned out, in; size_t nbytes; + u32 offset; int head; skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); @@ -156,7 +157,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, } iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len); - payload_len = skb->len; + offset = VIRTIO_VSOCK_SKB_CB(skb)->offset; + payload_len = skb->len - offset; hdr = virtio_vsock_hdr(skb); /* If the packet is greater than the space available in the @@ -197,8 +199,10 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, break; } - nbytes = copy_to_iter(skb->data, payload_len, &iov_iter); - if (nbytes != payload_len) { + if (skb_copy_datagram_iter(skb, + offset, + &iov_iter, + payload_len)) { kfree_skb(skb); vq_err(vq, "Faulted on copying pkt buf\n"); break; @@ -212,13 +216,13 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, vhost_add_used(vq, head, sizeof(*hdr) + payload_len); added = true; - skb_pull(skb, payload_len); + VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len; total_len += payload_len; /* If we didn't send all the payload we can requeue the packet * to send it with the next available buffer. */ - if (skb->len > 0) { + if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) { hdr->flags |= cpu_to_le32(flags_to_restore); /* We are queueing the same skb to handle @@ -340,6 +344,10 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, len = iov_length(vq->iov, out); + if (len < VIRTIO_VSOCK_SKB_HEADROOM || + len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) + return NULL; + /* len contains both payload and hdr */ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); if (!skb) @@ -363,18 +371,15 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, return skb; /* The pkt is too big or the length in the header is invalid */ - if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || - payload_len + sizeof(*hdr) > len) { + if (payload_len + sizeof(*hdr) > len) { kfree_skb(skb); return NULL; } - virtio_vsock_skb_rx_put(skb); + virtio_vsock_skb_put(skb, payload_len); - nbytes = copy_from_iter(skb->data, payload_len, &iov_iter); - if (nbytes != payload_len) { - vq_err(vq, "Expected %zu byte payload, got %zu bytes\n", - payload_len, nbytes); + if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) { + vq_err(vq, "Failed to copy %zu byte payload\n", payload_len); kfree_skb(skb); return NULL; } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 12381173f02c..0155fbd4d681 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1028,6 +1028,15 @@ static int load_elf_binary(struct linux_binprm *bprm) /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ SET_PERSONALITY2(*elf_ex, &arch_state); + +#ifdef CONFIG_ARM64 + if (bprm->compat_va_39_bit) { + set_thread_flag(TIF_39BIT); + } else { + clear_thread_flag(TIF_39BIT); + } +#endif + if (elf_read_implies_exec(*elf_ex, executable_stack)) current->personality |= READ_IMPLIES_EXEC; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9ae458635eb7..781d09c7684c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -774,7 +774,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) call_rcu(&epi->rcu, epi_rcu_free); percpu_counter_dec(&ep->user->epoll_watches); - return ep_refcount_dec_and_test(ep); + return true; } /* @@ -782,14 +782,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) */ static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi) { - WARN_ON_ONCE(__ep_remove(ep, epi, false)); + if (__ep_remove(ep, epi, false)) + WARN_ON_ONCE(ep_refcount_dec_and_test(ep)); } static void ep_clear_and_put(struct eventpoll *ep) { struct rb_node *rbp, *next; struct epitem *epi; - bool dispose; /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) @@ -822,10 +822,8 @@ static void ep_clear_and_put(struct eventpoll *ep) cond_resched(); } - dispose = ep_refcount_dec_and_test(ep); mutex_unlock(&ep->mtx); - - if (dispose) + if (ep_refcount_dec_and_test(ep)) ep_free(ep); } @@ -1005,7 +1003,7 @@ void eventpoll_release_file(struct file *file) dispose = __ep_remove(ep, epi, true); mutex_unlock(&ep->mtx); - if (dispose) + if (dispose && ep_refcount_dec_and_test(ep)) ep_free(ep); goto again; } diff --git a/fs/exec.c b/fs/exec.c index cf487b0e49dd..5971b70b250c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -67,6 +67,7 @@ #include #include #include +#include #include #include @@ -285,6 +286,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm) mm->stack_vm = mm->total_vm = 1; mmap_write_unlock(mm); bprm->p = vma->vm_end - sizeof(void *); + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + bprm->p ^= get_random_u32() & ~PAGE_MASK; return 0; err: mmap_write_unlock(mm); @@ -1934,6 +1937,10 @@ static int do_execveat_common(int fd, struct filename *filename, goto out_ret; } +#define FLAG_COMPAT_VA_39_BIT (1 << 30) + bprm->compat_va_39_bit = flags & FLAG_COMPAT_VA_39_BIT; + flags &= ~FLAG_COMPAT_VA_39_BIT; // flag validation fails when it sees an unknown flag + retval = count(argv, MAX_ARG_STRINGS); if (retval == 0) pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 62046f0fb351..dd73ada5c61b 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1247,7 +1247,7 @@ static int block_operations(struct f2fs_sb_info *sbi) retry_flush_quotas: f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { - int locked; + bool need_lock = sbi->umount_lock_holder != current; if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) { set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); @@ -1256,11 +1256,13 @@ static int block_operations(struct f2fs_sb_info *sbi) } f2fs_unlock_all(sbi); - /* only failed during mount/umount/freeze/quotactl */ - locked = down_read_trylock(&sbi->sb->s_umount); - f2fs_quota_sync(sbi->sb, -1); - if (locked) + /* don't grab s_umount lock during mount/umount/remount/freeze/quotactl */ + if (!need_lock) { + f2fs_do_quota_sync(sbi->sb, -1); + } else if (down_read_trylock(&sbi->sb->s_umount)) { + f2fs_do_quota_sync(sbi->sb, -1); up_read(&sbi->sb->s_umount); + } cond_resched(); goto retry_flush_quotas; } @@ -1868,7 +1870,8 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi) struct cp_control cpc; cpc.reason = __get_cp_reason(sbi); - if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { + if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC || + sbi->umount_lock_holder == current) { int ret; f2fs_down_write(&sbi->gc_lock); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 11696d72cd8d..dc308b5119f1 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -16,6 +16,21 @@ #include "xattr.h" #include +static inline bool f2fs_should_fallback_to_linear(struct inode *dir) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(dir); + + switch (f2fs_get_lookup_mode(sbi)) { + case LOOKUP_PERF: + return false; + case LOOKUP_COMPAT: + return true; + case LOOKUP_AUTO: + return !sb_no_casefold_compat_fallback(sbi->sb); + } + return false; +} + #if IS_ENABLED(CONFIG_UNICODE) extern struct kmem_cache *f2fs_cf_name_slab; #endif @@ -409,7 +424,7 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, out: #if IS_ENABLED(CONFIG_UNICODE) - if (!sb_no_casefold_compat_fallback(dir->i_sb) && + if (f2fs_should_fallback_to_linear(dir) && IS_CASEFOLDED(dir) && !de && use_hash) { use_hash = false; goto start_find_entry; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index cf9007a253da..441c81946cc0 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -856,6 +856,7 @@ struct f2fs_inode_info { /* linked in global inode list for cache donation */ struct list_head gdonate_list; pgoff_t donate_start, donate_end; /* inclusive */ + atomic_t open_count; /* # of open files */ struct task_struct *atomic_write_task; /* store atomic write task */ struct extent_tree *extent_tree[NR_EXTENT_CACHES]; @@ -1670,6 +1671,7 @@ struct f2fs_sb_info { unsigned int nquota_files; /* # of quota sysfile */ struct f2fs_rwsem quota_sem; /* blocking cp for flags */ + struct task_struct *umount_lock_holder; /* s_umount lock holder */ /* # of pages, see count_type */ atomic_t nr_pages[NR_COUNT_TYPE]; @@ -3613,6 +3615,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); void f2fs_update_inode(struct inode *inode, struct page *node_page); void f2fs_update_inode_page(struct inode *inode); int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); +void f2fs_remove_donate_inode(struct inode *inode); void f2fs_evict_inode(struct inode *inode); void f2fs_handle_failed_inode(struct inode *inode); @@ -3690,7 +3693,7 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync); void f2fs_inode_synced(struct inode *inode); int f2fs_dquot_initialize(struct inode *inode); int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); -int f2fs_quota_sync(struct super_block *sb, int type); +int f2fs_do_quota_sync(struct super_block *sb, int type); loff_t max_file_blocks(struct inode *inode); void f2fs_quota_off_umount(struct super_block *sb); void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); @@ -4822,6 +4825,47 @@ static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi, f2fs_invalidate_compress_pages_range(sbi, blkaddr, len); } +enum f2fs_lookup_mode { + LOOKUP_PERF, + LOOKUP_COMPAT, + LOOKUP_AUTO, +}; + +/* + * For bit-packing in f2fs_mount_info->alloc_mode + */ +#define ALLOC_MODE_BITS 1 +#define LOOKUP_MODE_BITS 2 + +#define ALLOC_MODE_SHIFT 0 +#define LOOKUP_MODE_SHIFT (ALLOC_MODE_SHIFT + ALLOC_MODE_BITS) + +#define ALLOC_MODE_MASK (((1 << ALLOC_MODE_BITS) - 1) << ALLOC_MODE_SHIFT) +#define LOOKUP_MODE_MASK (((1 << LOOKUP_MODE_BITS) - 1) << LOOKUP_MODE_SHIFT) + +static inline int f2fs_get_alloc_mode(struct f2fs_sb_info *sbi) +{ + return (F2FS_OPTION(sbi).alloc_mode & ALLOC_MODE_MASK) >> ALLOC_MODE_SHIFT; +} + +static inline void f2fs_set_alloc_mode(struct f2fs_sb_info *sbi, int mode) +{ + F2FS_OPTION(sbi).alloc_mode &= ~ALLOC_MODE_MASK; + F2FS_OPTION(sbi).alloc_mode |= (mode << ALLOC_MODE_SHIFT); +} + +static inline enum f2fs_lookup_mode f2fs_get_lookup_mode(struct f2fs_sb_info *sbi) +{ + return (F2FS_OPTION(sbi).alloc_mode & LOOKUP_MODE_MASK) >> LOOKUP_MODE_SHIFT; +} + +static inline void f2fs_set_lookup_mode(struct f2fs_sb_info *sbi, + enum f2fs_lookup_mode mode) +{ + F2FS_OPTION(sbi).alloc_mode &= ~LOOKUP_MODE_MASK; + F2FS_OPTION(sbi).alloc_mode |= (mode << LOOKUP_MODE_SHIFT); +} + #define EFSBADCRC EBADMSG /* Bad CRC detected */ #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f8832212ee37..fdc67b59f782 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -618,7 +618,10 @@ static int f2fs_file_open(struct inode *inode, struct file *filp) if (err) return err; - return finish_preallocate_blocks(inode); + err = finish_preallocate_blocks(inode); + if (!err) + atomic_inc(&F2FS_I(inode)->open_count); + return err; } void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) @@ -1998,6 +2001,9 @@ static long f2fs_fallocate(struct file *file, int mode, static int f2fs_release_file(struct inode *inode, struct file *filp) { + if (atomic_dec_and_test(&F2FS_I(inode)->open_count)) + f2fs_remove_donate_inode(inode); + /* * f2fs_release_file is called at every close calls. So we should * not drop any inmemory pages by close called by other process. @@ -4798,6 +4804,7 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) struct inode *inode = file_inode(iocb->ki_filp); const loff_t pos = iocb->ki_pos; ssize_t ret; + bool dio; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; @@ -4806,12 +4813,15 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos, iov_iter_count(to), READ); + dio = f2fs_should_use_dio(inode, iocb, to); + /* In LFS mode, if there is inflight dio, wait for its completion */ if (f2fs_lfs_mode(F2FS_I_SB(inode)) && - get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE)) + get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE) && + (!f2fs_is_pinned_file(inode) || !dio)) inode_dio_wait(inode); - if (f2fs_should_use_dio(inode, iocb, to)) { + if (dio) { ret = f2fs_dio_read_iter(iocb, to); } else { ret = filemap_read(iocb, to, 0); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 7d783e115e4b..c82ad61d25e9 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -814,7 +814,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; } -static void f2fs_remove_donate_inode(struct inode *inode) +void f2fs_remove_donate_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index f2e5260fef8d..f19b9ce65a84 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2915,7 +2915,7 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) return SIT_I(sbi)->last_victim[ALLOC_NEXT]; /* find segments from 0 to reuse freed segments */ - if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) + if (f2fs_get_alloc_mode(sbi) == ALLOC_MODE_REUSE) return 0; return curseg->segno; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 2bc3c52814d5..71da0813f20d 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -176,6 +176,7 @@ enum { Opt_memory_mode, Opt_age_extent_cache, Opt_errors, + Opt_lookup_mode, Opt_err, }; @@ -255,6 +256,7 @@ static match_table_t f2fs_tokens = { {Opt_memory_mode, "memory=%s"}, {Opt_age_extent_cache, "age_extent_cache"}, {Opt_errors, "errors=%s"}, + {Opt_lookup_mode, "lookup_mode=%s"}, {Opt_err, NULL}, }; @@ -998,9 +1000,9 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) return -ENOMEM; if (!strcmp(name, "default")) { - F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; + f2fs_set_alloc_mode(sbi, ALLOC_MODE_DEFAULT); } else if (!strcmp(name, "reuse")) { - F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; + f2fs_set_alloc_mode(sbi, ALLOC_MODE_REUSE); } else { kfree(name); return -EINVAL; @@ -1300,6 +1302,22 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) } kfree(name); break; + case Opt_lookup_mode: + name = match_strdup(&args[0]); + if (!name) + return -ENOMEM; + if (!strcmp(name, "perf")) { + f2fs_set_lookup_mode(sbi, LOOKUP_PERF); + } else if (!strcmp(name, "compat")) { + f2fs_set_lookup_mode(sbi, LOOKUP_COMPAT); + } else if (!strcmp(name, "auto")) { + f2fs_set_lookup_mode(sbi, LOOKUP_AUTO); + } else { + kfree(name); + return -EINVAL; + } + kfree(name); + break; default: f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", p); @@ -1415,6 +1433,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) /* Initialize f2fs-specific inode info */ atomic_set(&fi->dirty_pages, 0); atomic_set(&fi->i_compr_blocks, 0); + atomic_set(&fi->open_count, 0); init_f2fs_rwsem(&fi->i_sem); spin_lock_init(&fi->i_size_lock); INIT_LIST_HEAD(&fi->dirty_list); @@ -1724,22 +1743,28 @@ int f2fs_sync_fs(struct super_block *sb, int sync) static int f2fs_freeze(struct super_block *sb) { + struct f2fs_sb_info *sbi = F2FS_SB(sb); + if (f2fs_readonly(sb)) return 0; /* IO error happened before */ - if (unlikely(f2fs_cp_error(F2FS_SB(sb)))) + if (unlikely(f2fs_cp_error(sbi))) return -EIO; /* must be clean, since sync_filesystem() was already called */ - if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY)) + if (is_sbi_flag_set(sbi, SBI_IS_DIRTY)) return -EINVAL; + sbi->umount_lock_holder = current; + /* Let's flush checkpoints and stop the thread. */ - f2fs_flush_ckpt_thread(F2FS_SB(sb)); + f2fs_flush_ckpt_thread(sbi); + + sbi->umount_lock_holder = NULL; /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */ - set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING); + set_sbi_flag(sbi, SBI_IS_FREEZING); return 0; } @@ -2084,9 +2109,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) if (sbi->sb->s_flags & SB_INLINECRYPT) seq_puts(seq, ",inlinecrypt"); - if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) + if (f2fs_get_alloc_mode(sbi) == ALLOC_MODE_DEFAULT) seq_printf(seq, ",alloc_mode=%s", "default"); - else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) + else if (f2fs_get_alloc_mode(sbi) == ALLOC_MODE_REUSE) seq_printf(seq, ",alloc_mode=%s", "reuse"); if (test_opt(sbi, DISABLE_CHECKPOINT)) @@ -2122,6 +2147,13 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC) seq_printf(seq, ",errors=%s", "panic"); + if (f2fs_get_lookup_mode(sbi) == LOOKUP_PERF) + seq_show_option(seq, "lookup_mode", "perf"); + else if (f2fs_get_lookup_mode(sbi) == LOOKUP_COMPAT) + seq_show_option(seq, "lookup_mode", "compat"); + else if (f2fs_get_lookup_mode(sbi) == LOOKUP_AUTO) + seq_show_option(seq, "lookup_mode", "auto"); + return 0; } @@ -2149,9 +2181,9 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount) F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <= SMALL_VOLUME_SEGMENTS) - F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; + f2fs_set_alloc_mode(sbi, ALLOC_MODE_REUSE); else - F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; + f2fs_set_alloc_mode(sbi, ALLOC_MODE_DEFAULT); F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); @@ -2186,6 +2218,8 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount) #endif f2fs_build_fault_attr(sbi, 0, 0); + + f2fs_set_lookup_mode(sbi, LOOKUP_PERF); } #ifdef CONFIG_QUOTA @@ -2323,6 +2357,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) org_mount_opt = sbi->mount_opt; old_sb_flags = sb->s_flags; + sbi->umount_lock_holder = current; + #ifdef CONFIG_QUOTA org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { @@ -2546,6 +2582,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) limit_reserve_root(sbi); *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); + + sbi->umount_lock_holder = NULL; return 0; restore_checkpoint: if (need_enable_checkpoint) { @@ -2586,6 +2624,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) #endif sbi->mount_opt = org_mount_opt; sb->s_flags = old_sb_flags; + + sbi->umount_lock_holder = NULL; return err; } @@ -2902,7 +2942,7 @@ static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type) return ret; } -int f2fs_quota_sync(struct super_block *sb, int type) +int f2fs_do_quota_sync(struct super_block *sb, int type) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct quota_info *dqopt = sb_dqopt(sb); @@ -2950,11 +2990,21 @@ int f2fs_quota_sync(struct super_block *sb, int type) return ret; } +static int f2fs_quota_sync(struct super_block *sb, int type) +{ + int ret; + + F2FS_SB(sb)->umount_lock_holder = current; + ret = f2fs_do_quota_sync(sb, type); + F2FS_SB(sb)->umount_lock_holder = NULL; + return ret; +} + static int f2fs_quota_on(struct super_block *sb, int type, int format_id, const struct path *path) { struct inode *inode; - int err; + int err = 0; /* if quota sysfile exists, deny enabling quota with specific file */ if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) { @@ -2965,31 +3015,34 @@ static int f2fs_quota_on(struct super_block *sb, int type, int format_id, if (path->dentry->d_sb != sb) return -EXDEV; - err = f2fs_quota_sync(sb, type); + F2FS_SB(sb)->umount_lock_holder = current; + + err = f2fs_do_quota_sync(sb, type); if (err) - return err; + goto out; inode = d_inode(path->dentry); err = filemap_fdatawrite(inode->i_mapping); if (err) - return err; + goto out; err = filemap_fdatawait(inode->i_mapping); if (err) - return err; + goto out; err = dquot_quota_on(sb, type, format_id, path); if (err) - return err; + goto out; inode_lock(inode); F2FS_I(inode)->i_flags |= F2FS_QUOTA_DEFAULT_FL; f2fs_set_inode_flags(inode); inode_unlock(inode); f2fs_mark_inode_dirty_sync(inode, false); - - return 0; +out: + F2FS_SB(sb)->umount_lock_holder = NULL; + return err; } static int __f2fs_quota_off(struct super_block *sb, int type) @@ -3000,7 +3053,7 @@ static int __f2fs_quota_off(struct super_block *sb, int type) if (!inode || !igrab(inode)) return dquot_quota_off(sb, type); - err = f2fs_quota_sync(sb, type); + err = f2fs_do_quota_sync(sb, type); if (err) goto out_put; @@ -3023,6 +3076,8 @@ static int f2fs_quota_off(struct super_block *sb, int type) struct f2fs_sb_info *sbi = F2FS_SB(sb); int err; + F2FS_SB(sb)->umount_lock_holder = current; + err = __f2fs_quota_off(sb, type); /* @@ -3032,6 +3087,9 @@ static int f2fs_quota_off(struct super_block *sb, int type) */ if (is_journalled_quota(sbi)) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); + + F2FS_SB(sb)->umount_lock_holder = NULL; + return err; } @@ -3164,7 +3222,7 @@ int f2fs_dquot_initialize(struct inode *inode) return 0; } -int f2fs_quota_sync(struct super_block *sb, int type) +int f2fs_do_quota_sync(struct super_block *sb, int type) { return 0; } @@ -4720,6 +4778,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto free_compress_inode; + sbi->umount_lock_holder = current; #ifdef CONFIG_QUOTA /* Enable quota usage during mount */ if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { @@ -4850,6 +4909,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); cleancache_init_fs(sb); + + sbi->umount_lock_holder = NULL; return 0; sync_free_meta: @@ -4954,6 +5015,8 @@ static void kill_f2fs_super(struct super_block *sb) struct f2fs_sb_info *sbi = F2FS_SB(sb); if (sb->s_root) { + sbi->umount_lock_holder = current; + set_sbi_flag(sbi, SBI_IS_CLOSE); f2fs_stop_gc_thread(sbi); f2fs_stop_discard_thread(sbi); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 46216f0a203a..e9a2cbca84b7 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -281,6 +281,22 @@ static ssize_t encoding_flags_show(struct f2fs_attr *a, le16_to_cpu(F2FS_RAW_SUPER(sbi)->s_encoding_flags)); } +static ssize_t effective_lookup_mode_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + switch (f2fs_get_lookup_mode(sbi)) { + case LOOKUP_PERF: + return sysfs_emit(buf, "perf\n"); + case LOOKUP_COMPAT: + return sysfs_emit(buf, "compat\n"); + case LOOKUP_AUTO: + if (sb_no_casefold_compat_fallback(sbi->sb)) + return sysfs_emit(buf, "auto:perf\n"); + return sysfs_emit(buf, "auto:compat\n"); + } + return 0; +} + static ssize_t mounted_time_sec_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -1174,6 +1190,7 @@ F2FS_GENERAL_RO_ATTR(current_reserved_blocks); F2FS_GENERAL_RO_ATTR(unusable); F2FS_GENERAL_RO_ATTR(encoding); F2FS_GENERAL_RO_ATTR(encoding_flags); +F2FS_GENERAL_RO_ATTR(effective_lookup_mode); F2FS_GENERAL_RO_ATTR(mounted_time_sec); F2FS_GENERAL_RO_ATTR(main_blkaddr); F2FS_GENERAL_RO_ATTR(pending_discard); @@ -1290,6 +1307,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(current_reserved_blocks), ATTR_LIST(encoding), ATTR_LIST(encoding_flags), + ATTR_LIST(effective_lookup_mode), ATTR_LIST(mounted_time_sec), #ifdef CONFIG_F2FS_STAT_FS ATTR_LIST(cp_foreground_calls), @@ -1703,6 +1721,68 @@ static int __maybe_unused disk_map_seq_show(struct seq_file *seq, return 0; } +static int __maybe_unused donation_list_seq_show(struct seq_file *seq, + void *offset) +{ + struct super_block *sb = seq->private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + struct inode *inode; + struct f2fs_inode_info *fi; + struct dentry *dentry; + char *buf, *path; + int i; + + buf = f2fs_getname(sbi); + if (!buf) + return 0; + + seq_printf(seq, "Donation List\n"); + seq_printf(seq, " # of files : %u\n", sbi->donate_files); + seq_printf(seq, " %-50s %10s %20s %20s %22s\n", + "File path", "Status", "Donation offset (kb)", + "Donation size (kb)", "File cached size (kb)"); + seq_printf(seq, "---\n"); + + for (i = 0; i < sbi->donate_files; i++) { + spin_lock(&sbi->inode_lock[DONATE_INODE]); + if (list_empty(&sbi->inode_list[DONATE_INODE])) { + spin_unlock(&sbi->inode_lock[DONATE_INODE]); + break; + } + fi = list_first_entry(&sbi->inode_list[DONATE_INODE], + struct f2fs_inode_info, gdonate_list); + list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]); + inode = igrab(&fi->vfs_inode); + spin_unlock(&sbi->inode_lock[DONATE_INODE]); + + if (!inode) + continue; + + inode_lock_shared(inode); + + dentry = d_find_alias(inode); + if (!dentry) { + path = NULL; + } else { + path = dentry_path_raw(dentry, buf, PATH_MAX); + if (IS_ERR(path)) + goto next; + } + seq_printf(seq, " %-50s %10s %20llu %20llu %22llu\n", + path ? path : "", + is_inode_flag_set(inode, FI_DONATE_FINISHED) ? + "Evicted" : "Donated", + (loff_t)fi->donate_start << (PAGE_SHIFT - 10), + (loff_t)(fi->donate_end + 1) << (PAGE_SHIFT - 10), + (loff_t)inode->i_mapping->nrpages << (PAGE_SHIFT - 10)); +next: + inode_unlock_shared(inode); + iput(inode); + } + f2fs_putname(buf); + return 0; +} + int __init f2fs_init_sysfs(void) { int ret; @@ -1794,6 +1874,8 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi) discard_plist_seq_show, sb); proc_create_single_data("disk_map", 0444, sbi->s_proc, disk_map_seq_show, sb); + proc_create_single_data("donation_list", 0444, sbi->s_proc, + donation_list_seq_show, sb); return 0; put_feature_list_kobj: kobject_put(&sbi->s_feature_list_kobj); diff --git a/fs/file.c b/fs/file.c index 1f1181b189bf..c3effc10d430 100644 --- a/fs/file.c +++ b/fs/file.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "internal.h" @@ -593,6 +594,14 @@ void fd_install(unsigned int fd, struct file *file) struct files_struct *files = current->files; struct fdtable *fdt; + if (is_dma_buf_file(file)) { + int err = dma_buf_account_task(file->private_data, current); + + if (err) + pr_err("dmabuf accounting failed during fd_install operation, err %d\n", + err); + } + rcu_read_lock_sched(); if (unlikely(files->resize_in_progress)) { diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c index 21fb9cdd20aa..68f820d5188c 100644 --- a/fs/fuse/backing.c +++ b/fs/fuse/backing.c @@ -406,23 +406,26 @@ int fuse_lseek_backing(struct fuse_bpf_args *fa, struct file *file, loff_t offse struct file *backing_file = fuse_file->backing_file; loff_t ret; - /* TODO: Handle changing of the file handle */ if (offset == 0) { if (whence == SEEK_CUR) { flo->offset = file->f_pos; - return flo->offset; + return 0; } if (whence == SEEK_SET) { flo->offset = vfs_setpos(file, 0, 0); - return flo->offset; + return 0; } } inode_lock(file->f_inode); backing_file->f_pos = file->f_pos; ret = vfs_llseek(backing_file, fli->offset, fli->whence); - flo->offset = ret; + + if (!IS_ERR(ERR_PTR(ret))) { + flo->offset = ret; + ret = 0; + } inode_unlock(file->f_inode); return ret; } @@ -2363,8 +2366,11 @@ static bool filldir(struct dir_context *ctx, const char *name, int namelen, return true; } -static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx) +static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx, + loff_t next_offset) { + char *buffstart = buf; + while (nbytes >= FUSE_NAME_OFFSET) { struct fuse_dirent *dirent = (struct fuse_dirent *) buf; size_t reclen = FUSE_DIRENT_SIZE(dirent); @@ -2378,12 +2384,18 @@ static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx) ctx->pos = dirent->off; if (!dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino, - dirent->type)) - break; + dirent->type)) { + // If we can't make any progress, user buffer is too small + if (buf == buffstart) + return -EINVAL; + else + return 0; + } buf += reclen; nbytes -= reclen; } + ctx->pos = next_offset; return 0; } @@ -2430,13 +2442,12 @@ void *fuse_readdir_finalize(struct fuse_bpf_args *fa, struct file *backing_dir = ff->backing_file; int err = 0; - err = parse_dirfile(fa->out_args[1].value, fa->out_args[1].size, ctx); + err = parse_dirfile(fa->out_args[1].value, fa->out_args[1].size, ctx, fro->offset); *force_again = !!fro->again; if (*force_again && !*allow_force) err = -EINVAL; - ctx->pos = fro->offset; - backing_dir->f_pos = fro->offset; + backing_dir->f_pos = ctx->pos; free_page((unsigned long) fa->out_args[1].value); return ERR_PTR(err); diff --git a/fs/proc/base.c b/fs/proc/base.c index 7cff02bc816e..e0a407c7dd1d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -100,6 +100,7 @@ #include #include #include +#include #include #include #include "internal.h" @@ -2925,6 +2926,7 @@ LSM_DIR_OPS(apparmor); static const struct pid_entry attr_dir_stuff[] = { ATTR(NULL, "current", 0666), + ATTR(NULL, "selinux_flags", 0666), ATTR(NULL, "prev", 0444), ATTR(NULL, "exec", 0666), ATTR(NULL, "fscreate", 0666), @@ -3304,6 +3306,121 @@ static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns, } #endif /* CONFIG_STACKLEAK_METRICS */ +#ifdef CONFIG_DMA_SHARED_BUFFER +static int proc_dmabuf_rss_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + struct task_dma_buf_info *dmabuf_info = task->dmabuf_info; + + if (dmabuf_info) { + unsigned long rss; + + spin_lock(&dmabuf_info->lock); + rss = dmabuf_info->rss; + spin_unlock(&dmabuf_info->lock); + seq_printf(m, "%lu\n", rss); + } + + return 0; +} + +static int proc_dmabuf_rss_hwm_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct task_struct *task; + int ret = 0; + + task = get_proc_task(inode); + if (!task) + return -ESRCH; + + if (task->dmabuf_info) { + unsigned long rss_hwm; + + spin_lock(&task->dmabuf_info->lock); + rss_hwm = task->dmabuf_info->rss_hwm; + spin_unlock(&task->dmabuf_info->lock); + seq_printf(m, "%lu\n", rss_hwm); + } + + put_task_struct(task); + + return ret; +} + +static int proc_dmabuf_rss_hwm_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, proc_dmabuf_rss_hwm_show, inode); +} + +static ssize_t +proc_dmabuf_rss_hwm_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + struct task_struct *task; + unsigned long long val; + int ret; + + ret = kstrtoull_from_user(buf, count, 10, &val); + if (ret) + return ret; + + if (val != 0) + return -EINVAL; + + task = get_proc_task(inode); + if (!task) + return -ESRCH; + + if (!task->dmabuf_info) { + ret = -ENOENT; + } else { + spin_lock(&task->dmabuf_info->lock); + task->dmabuf_info->rss_hwm = task->dmabuf_info->rss; + spin_unlock(&task->dmabuf_info->lock); + } + + put_task_struct(task); + + return ret < 0 ? ret : count; +} + +static const struct file_operations proc_dmabuf_rss_hwm_operations = { + .open = proc_dmabuf_rss_hwm_open, + .write = proc_dmabuf_rss_hwm_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int proc_dmabuf_pss_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + struct task_dma_buf_record *rec; + + if (task->dmabuf_info) { + unsigned long pss = 0; + + spin_lock(&task->dmabuf_info->lock); + list_for_each_entry(rec, &task->dmabuf_info->dmabufs, node) { + s64 refs = atomic64_read(&rec->dmabuf->nr_task_refs); + + if (refs <= 0) { + pr_err("dmabuf has refs <= 0 %lld\n", refs); + continue; + } + + pss += rec->dmabuf->size / (size_t)refs; + } + spin_unlock(&task->dmabuf_info->lock); + seq_printf(m, "%lu\n", pss); + } + + return 0; +} +#endif + /* * Thread groups */ @@ -3427,6 +3544,11 @@ static const struct pid_entry tgid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif +#ifdef CONFIG_DMA_SHARED_BUFFER + ONE("dmabuf_rss", 0444, proc_dmabuf_rss_show), + REG("dmabuf_rss_hwm", 0644, proc_dmabuf_rss_hwm_operations), + ONE("dmabuf_pss", 0444, proc_dmabuf_pss_show), +#endif }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 785bda3f2d0f..3b9e96a3af0c 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1755,8 +1755,11 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, user_uffdio_copy = (struct uffdio_copy __user *) arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_copy->copy))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_copy, user_uffdio_copy, @@ -1811,8 +1814,11 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, @@ -1914,8 +1920,11 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) user_uffdio_continue = (struct uffdio_continue __user *)arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_continue, user_uffdio_continue, @@ -1971,8 +1980,11 @@ static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long user_uffdio_poison = (struct uffdio_poison __user *)arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_poison->updated))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_poison, user_uffdio_poison, @@ -2035,8 +2047,12 @@ static int userfaultfd_move(struct userfaultfd_ctx *ctx, user_uffdio_move = (struct uffdio_move __user *) arg; - if (atomic_read(&ctx->mmap_changing)) - return -EAGAIN; + ret = -EAGAIN; + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_move->move))) + return -EFAULT; + goto out; + } if (copy_from_user(&uffdio_move, user_uffdio_move, /* don't copy "move" last field */ diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8d51f69f9f5e..7433f691087d 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -49,6 +49,8 @@ struct linux_binprm { struct cred *cred; /* new credentials */ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ unsigned int per_clear; /* bits to clear in current->personality */ + /* if non-zero, emulate VA_BITS == 39, needed for programs that break when VA_BITS > 39 */ + int compat_va_39_bit; int argc, envc; const char *filename; /* Name of binary as seen by procps */ const char *interp; /* Name of the binary really executed. Most diff --git a/include/linux/cache.h b/include/linux/cache.h index 9900d20b76c2..9c285f4169aa 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -37,6 +37,8 @@ #define __ro_after_init __section(".data..ro_after_init") #endif +#define __read_only __ro_after_init + #ifndef ____cacheline_aligned #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) #endif diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 64d67293d76b..3ca7a08a90f4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -24,6 +24,8 @@ #include #include #include +#include +#include struct device; struct dma_buf; @@ -531,7 +533,13 @@ struct dma_buf { } *sysfs_entry; #endif - ANDROID_KABI_RESERVE(1); + /** + * @nr_task_refs: + * + * The number of tasks that reference this buffer. For calculating PSS. + */ + ANDROID_KABI_USE(1, atomic64_t nr_task_refs); + ANDROID_KABI_RESERVE(2); }; @@ -639,6 +647,63 @@ struct dma_buf_export_info { ANDROID_KABI_RESERVE(2); }; +/** + * struct task_dma_buf_record and struct task_dma_buf_info will NEVER be exposed + * to vendor modules, except possibly via an opaque pointer. Their definitions + * can therefore be hidden from MODVERSIONS CRC machinery, allowing arbitrary + * future changes. + */ +#ifdef __GENKSYMS__ + +struct task_dma_buf_record; +struct task_dma_buf_info; + +#else + +/** + * struct task_dma_buf_record - Holds the number of (VMA and FD) references to a + * dmabuf by a collection of tasks that share both mm_struct and files_struct. + * This is the list entry type for @task_dma_buf_info dmabufs list. + * + * @node: Stores the list this record is on. + * @dmabuf: The dmabuf this record is for. + * @refcnt: The number of VMAs and FDs that reference @dmabuf by the tasks that + * share this record. + */ +struct task_dma_buf_record { + struct list_head node; + struct dma_buf *dmabuf; + unsigned long refcnt; +}; + +/** + * struct task_dma_buf_info - Holds RSS and RSS HWM counters, and a list of + * dmabufs for alltasks that share both mm_struct and files_struct. + * + * @rss: The sum of all dmabuf memory referenced by the task(s) via memory + * mappings or file descriptors in bytes. Buffers referenced more than + * once by the process (multiple mmaps, multiple FDs, or any combination + * of both mmaps and FDs) only cause the buffer to be accounted to the + * process once. Partial mappings cause the full size of the buffer to be + * accounted, regardless of the size of the mapping. + * @rss_hwm: The maximum value of @rss over the lifetime of this struct. (Unless + * reset by userspace.) + * @refcnt: The number of tasks sharing this struct. + * @lock: Lock protecting @rss, @dmabufs, and @dmabuf_count. + * @dmabufs: List of all dmabufs referenced by the task(s). + * @dmabuf_count: The number of elements on the @dmabufs list. + */ +struct task_dma_buf_info { + unsigned long rss; + unsigned long rss_hwm; + refcount_t refcnt; + spinlock_t lock; + struct list_head dmabufs; + unsigned int dmabuf_count; +}; + +#endif + /** * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name @@ -691,7 +756,6 @@ dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) return !!attach->importer_ops; } -int is_dma_buf_file(struct file *file); int dma_buf_get_each(int (*callback)(const struct dma_buf *dmabuf, void *private), void *private); struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, @@ -741,4 +805,26 @@ int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); long dma_buf_set_name(struct dma_buf *dmabuf, const char *name); int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags); + +#ifdef CONFIG_DMA_SHARED_BUFFER + +int is_dma_buf_file(struct file *file); +int dma_buf_account_task(struct dma_buf *dmabuf, struct task_struct *task); +void dma_buf_unaccount_task(struct dma_buf *dmabuf, struct task_struct *task); +int copy_dmabuf_info(u64 clone_flags, struct task_struct *task); +void put_dmabuf_info(struct task_struct *task); + +#else /* CONFIG_DMA_SHARED_BUFFER */ + +static inline int is_dma_buf_file(struct file *file) { return 0; } +static inline int dma_buf_account_task(struct dma_buf *dmabuf, + struct task_struct *task) { return 0; } +static inline void dma_buf_unaccount_task(struct dma_buf *dmabuf, + struct task_struct *task) {} +static inline int copy_dmabuf_info(u64 clone_flags, + struct task_struct *task) { return 0; } +static inline void put_dmabuf_info(struct task_struct *task) {} + +#endif /* CONFIG_DMA_SHARED_BUFFER */ + #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 05019f1e48e4..61a74e655653 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -265,6 +265,14 @@ static inline void tag_clear_highpage(struct page *page) void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2); #else + +static inline void verify_zero_highpage(struct page *page) +{ + void *kaddr = kmap_atomic(page); + BUG_ON(memchr_inv(kaddr, 0, PAGE_SIZE)); + kunmap_atomic(kaddr); +} + static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index f537228a8495..c048f9512ac4 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2687,7 +2687,6 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) #define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 #define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 -#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 /** * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index d03a89389766..91a661729954 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -90,7 +90,7 @@ struct ipv6_devconf { ANDROID_KABI_RESERVE(1); ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); - ANDROID_KABI_BACKPORT_OK(4); + ANDROID_KABI_BACKPORT_USE(4, struct { __u8 ra_honor_pio_pflag; __u8 padding4[7]; }); }; struct ipv6_params { diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 97a8b21eb033..669581ad395a 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -122,6 +122,9 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb, int ipv6_skb_to_auditdata(struct sk_buff *skb, struct common_audit_data *ad, u8 *proto); +void dump_common_audit_data(struct audit_buffer *ab, + struct common_audit_data *a); + void common_lsm_audit(struct common_audit_data *a, void (*pre_audit)(struct audit_buffer *, void *), void (*post_audit)(struct audit_buffer *, void *)); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 0d999b355557..d84c4d279daf 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -604,6 +604,7 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } static inline void memtest_report_meminfo(struct seq_file *m) { } #endif +extern void early_memzero(phys_addr_t start, phys_addr_t end); extern void __init_memblock memblock_memsize_record(const char *name, phys_addr_t base, phys_addr_t size, bool nomap, bool reusable); diff --git a/include/linux/sched.h b/include/linux/sched.h index 1299b4497d87..61256088b1f4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -70,6 +70,7 @@ struct seq_file; struct sighand_struct; struct signal_struct; struct task_delay_info; +struct task_dma_buf_info; struct task_group; struct user_event_mm; @@ -1516,7 +1517,9 @@ struct task_struct { */ struct callback_head l1d_flush_kill; #endif - ANDROID_KABI_RESERVE(1); + + ANDROID_KABI_USE(1, struct task_dma_buf_info *dmabuf_info); + ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(4); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index deb90cf4bffb..2d26844b4a48 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -131,6 +131,11 @@ struct kmem_cache { unsigned long random; #endif +#ifdef CONFIG_SLAB_CANARY + unsigned long random_active; + unsigned long random_inactive; +#endif + #ifdef CONFIG_NUMA /* * Defragmentation by allocating from a remote node. diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 53ae2273c859..034ed27e448d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -340,6 +340,7 @@ extern void arch_suspend_enable_irqs(void); extern int pm_suspend(suspend_state_t state); extern bool sync_on_suspend_enabled; +extern void suspend_abort_fs_sync(void); #else /* !CONFIG_SUSPEND */ #define suspend_valid_only_mem NULL @@ -360,6 +361,7 @@ static inline bool idle_should_enter_s2idle(void) { return false; } static inline void __init pm_states_init(void) {} static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {} static inline void s2idle_wake(void) {} +static inline void suspend_abort_fs_sync(void) {} #endif /* !CONFIG_SUSPEND */ /* struct pbe is used for creating lists of pages that should be restored diff --git a/include/linux/swap.h b/include/linux/swap.h index 798cbc9b6735..8bc60a00141d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -429,10 +429,17 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) +#define MIN_SWAPPINESS 0 +#define MAX_SWAPPINESS 200 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, unsigned int reclaim_options); +extern unsigned long try_to_free_mem_cgroup_pages_with_swappiness(struct mem_cgroup *memcg, + unsigned long nr_pages, + gfp_t gfp_mask, + unsigned int reclaim_options, + int *swappiness); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index ab7ca872950b..9d83aed225ad 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -161,6 +161,10 @@ struct tcpm_port; struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); void tcpm_unregister_port(struct tcpm_port *port); +int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, + unsigned int nr_pdo, + unsigned int operating_snk_mw); + void tcpm_vbus_change(struct tcpm_port *port); void tcpm_cc_change(struct tcpm_port *port); void tcpm_sink_frs(struct tcpm_port *port); diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index b5e9f603a75a..24e9dc62dc8b 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -12,6 +12,7 @@ struct virtio_vsock_skb_cb { bool reply; bool tap_delivered; + u32 offset; }; #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb)) @@ -46,31 +47,50 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb) VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false; } -static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) +static inline void virtio_vsock_skb_put(struct sk_buff *skb, u32 len) { - u32 len; + DEBUG_NET_WARN_ON_ONCE(skb->len); - len = le32_to_cpu(virtio_vsock_hdr(skb)->len); - - if (len > 0) + if (skb_is_nonlinear(skb)) + skb->len = len; + else skb_put(skb, len); } -static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) +static inline struct sk_buff * +__virtio_vsock_alloc_skb_with_frags(unsigned int header_len, + unsigned int data_len, + gfp_t mask) { struct sk_buff *skb; + int err; - if (size < VIRTIO_VSOCK_SKB_HEADROOM) - return NULL; - - skb = alloc_skb(size, mask); + skb = alloc_skb_with_frags(header_len, data_len, + PAGE_ALLOC_COSTLY_ORDER, &err, mask); if (!skb) return NULL; skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM); + skb->data_len = data_len; return skb; } +static inline struct sk_buff * +virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask) +{ + return __virtio_vsock_alloc_skb_with_frags(size, 0, mask); +} + +static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) +{ + if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + return virtio_vsock_alloc_linear_skb(size, mask); + + size -= VIRTIO_VSOCK_SKB_HEADROOM; + return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM, + size, mask); +} + static inline void virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb) { @@ -110,10 +130,14 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) return (size_t)(skb_end_pointer(skb) - skb->head); } -#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) +/* Dimension the RX SKB so that the entire thing fits exactly into + * a single 4KiB page. This avoids wasting memory due to alloc_skb() + * rounding up to the next page order and also means that we + * don't leave higher-order pages sitting around in the RX queue. + */ +#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4) #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL -#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE virtio_transport_max_vsock_pkt_buf_size -extern uint virtio_transport_max_vsock_pkt_buf_size; +#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) enum { VSOCK_VQ_RX = 0, /* for host to guest data */ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 5bc5f9ae7d34..be85afe038cb 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -36,10 +36,22 @@ struct prefix_info { struct __packed { #if defined(__BIG_ENDIAN_BITFIELD) __u8 onlink : 1, - autoconf : 1, + autoconf : 1, +# ifdef __GENKSYMS__ reserved : 6; +# else + routeraddr : 1, + preferpd : 1, + reserved : 4; +# endif #elif defined(__LITTLE_ENDIAN_BITFIELD) +# ifdef __GENKSYMS__ __u8 reserved : 6, +# else + __u8 reserved : 4, + preferpd : 1, + routeraddr : 1, +# endif autoconf : 1, onlink : 1; #else diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 73dbc130d215..9e07d44856dc 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -119,10 +119,6 @@ struct wiphy; * restrictions. * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel. * @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT - * @IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT: Client connection with VLP AP - * not permitted using this channel - * @IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT: Client connection with AFC AP - * not permitted using this channel */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, @@ -147,8 +143,6 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_NO_320MHZ = 1<<19, IEEE80211_CHAN_NO_EHT = 1<<20, IEEE80211_CHAN_DFS_CONCURRENT = 1<<21, - IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22, - IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23, }; #define IEEE80211_CHAN_NO_HT40 \ @@ -4849,7 +4843,7 @@ struct cfg80211_ops { * enum wiphy_flags - wiphy capability flags * * @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split - * into two, first for legacy bands and second for 6 GHz. + * into two, first for legacy bands and second for UHB. * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this * wiphy at all * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 68f5280a41a4..896f8de946d0 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -8,6 +8,7 @@ #include #include #include +#include TRACE_EVENT(kmem_cache_alloc, @@ -487,6 +488,30 @@ TRACE_EVENT(rss_stat, __print_symbolic(__entry->member, TRACE_MM_PAGES), __entry->size) ); + +TRACE_EVENT(dmabuf_rss_stat, + + TP_PROTO(size_t rss, ssize_t rss_delta, struct dma_buf *dmabuf), + + TP_ARGS(rss, rss_delta, dmabuf), + + TP_STRUCT__entry( + __field(size_t, rss) + __field(ssize_t, rss_delta) + __field(unsigned long, i_ino) + ), + + TP_fast_assign( + __entry->rss = rss; + __entry->rss_delta = rss_delta; + __entry->i_ino = file_inode(dmabuf->file)->i_ino; + ), + + TP_printk("rss=%zu delta=%zd i_ino=%lu", + __entry->rss, + __entry->rss_delta, + __entry->i_ino) + ); #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h index 8087138ba33c..5b26dbb21445 100644 --- a/include/trace/hooks/mm.h +++ b/include/trace/hooks/mm.h @@ -615,6 +615,28 @@ DECLARE_HOOK(android_vh_pageset_update, DECLARE_HOOK(android_vh_mempool_alloc_skip_wait, TP_PROTO(gfp_t *gfp_flags, bool *skip_wait), TP_ARGS(gfp_flags, skip_wait)); +DECLARE_HOOK(android_vh_mm_customize_ac, + TP_PROTO(gfp_t gfp, unsigned int order, struct zonelist **zonelist, + struct zoneref **preferred_zoneref, enum zone_type *highest_zoneidx, + unsigned int *alloc_flags), + TP_ARGS(gfp, order, zonelist, preferred_zoneref, highest_zoneidx, alloc_flags)); +DECLARE_HOOK(android_vh_mm_customize_rmqueue, + TP_PROTO(struct zone *zone, unsigned int order, unsigned int *alloc_flags, + int *migratetype), + TP_ARGS(zone, order, alloc_flags, migratetype)); +DECLARE_HOOK(android_vh_mm_customize_suitable_zone, + TP_PROTO(struct zone *zone, gfp_t gfp, int order, enum zone_type highest_zoneidx, + bool *use_this_zone, bool *suitable), + TP_ARGS(zone, gfp, order, highest_zoneidx, use_this_zone, suitable)); +DECLARE_HOOK(android_vh_mm_customize_zone_max_order, + TP_PROTO(struct zone *zone, int *max_order), + TP_ARGS(zone, max_order)); +DECLARE_HOOK(android_vh_mm_customize_zone_pageset, + TP_PROTO(struct zone *zone, int *new_high, int *new_batch), + TP_ARGS(zone, new_high, new_batch)); +DECLARE_HOOK(android_vh_mm_customize_lru_add_dst, + TP_PROTO(struct lruvec *lruvec, struct folio *src, struct folio *dst, bool *added), + TP_ARGS(lruvec, src, dst, added)); #endif /* _TRACE_HOOK_MM_H */ /* This part must be outside protection */ diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h index c0b0e9d453a1..61e2fd8692b1 100644 --- a/include/trace/hooks/sched.h +++ b/include/trace/hooks/sched.h @@ -488,6 +488,14 @@ DECLARE_HOOK(android_vh_set_task_comm, TP_PROTO(struct task_struct *p), TP_ARGS(p)); +DECLARE_HOOK(android_vh_chk_task, + TP_PROTO(struct task_struct **pp, struct rq *rq), + TP_ARGS(pp, rq)); + +DECLARE_HOOK(android_vh_put_task, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + #endif /* _TRACE_HOOK_SCHED_H */ /* This part must be outside protection */ #include diff --git a/include/trace/hooks/vmscan.h b/include/trace/hooks/vmscan.h index 4bdcfc29c2f4..8bba24542944 100644 --- a/include/trace/hooks/vmscan.h +++ b/include/trace/hooks/vmscan.h @@ -133,6 +133,17 @@ DECLARE_HOOK(android_vh_direct_reclaim_end, DECLARE_HOOK(android_vh_should_split_folio_to_list, TP_PROTO(struct folio *folio, bool *should_split_to_list), TP_ARGS(folio, should_split_to_list)); +DECLARE_HOOK(android_vh_mm_isolate_priv_lru, + TP_PROTO(unsigned long nr_to_scan, struct lruvec *lruvec, enum lru_list lru, + struct list_head *dst, int reclaim_idx, bool may_unmap, + unsigned long *nr_scanned, unsigned long *nr_taken), + TP_ARGS(nr_to_scan, lruvec, lru, dst, reclaim_idx, may_unmap, nr_scanned, nr_taken)); +DECLARE_HOOK(android_vh_mm_customize_pgdat_balanced, + TP_PROTO(int order, int highest_zoneidx, bool *balanced, bool *customized), + TP_ARGS(order, highest_zoneidx, balanced, customized)); +DECLARE_HOOK(android_vh_mm_customize_file_is_tiny, + TP_PROTO(unsigned int may_swap, int order, int highest_zoneidx, bool *file_is_tiny), + TP_ARGS(may_swap, order, highest_zoneidx, file_is_tiny)); #endif /* _TRACE_HOOK_VMSCAN_H */ /* This part must be outside protection */ #include diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d676ed2b246e..b58cf764e31d 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -143,6 +143,7 @@ #define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */ #define AUDIT_MAC_CALIPSO_ADD 1418 /* NetLabel: add CALIPSO DOI entry */ #define AUDIT_MAC_CALIPSO_DEL 1419 /* NetLabel: del CALIPSO DOI entry */ +#define AUDIT_SELINUX_TSEC_FLAG_DENIAL 1499 #define AUDIT_FIRST_KERN_ANOM_MSG 1700 #define AUDIT_LAST_KERN_ANOM_MSG 1799 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 056c6694d1f1..8315d0f7e467 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen * Copyright 2008 Colin McCabe * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -4254,10 +4254,6 @@ enum nl80211_wmm_rule { * allowed for peer-to-peer or adhoc communication under the control * of a DFS master which operates on the same channel (FCC-594280 D01 * Section B.3). Should be used together with %NL80211_RRF_DFS only. - * @NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP - * not allowed using this channel - * @NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP - * not allowed using this channel * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4298,8 +4294,6 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_NO_EHT, NL80211_FREQUENCY_ATTR_PSD, NL80211_FREQUENCY_ATTR_DFS_CONCURRENT, - NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT, - NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4312,10 +4306,6 @@ enum nl80211_frequency_attr { #define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR #define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \ NL80211_FREQUENCY_ATTR_IR_CONCURRENT -#define NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT \ - NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT -#define NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT \ - NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT /** * enum nl80211_bitrate_attr - bitrate attributes @@ -4513,8 +4503,6 @@ enum nl80211_sched_scan_match_attr { peer-to-peer or adhoc communication under the control of a DFS master which operates on the same channel (FCC-594280 D01 Section B.3). Should be used together with %NL80211_RRF_DFS only. - * @NL80211_RRF_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP not allowed - * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -4537,8 +4525,6 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_EHT = 1<<19, NL80211_RRF_PSD = 1<<20, NL80211_RRF_DFS_CONCURRENT = 1<<21, - NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22, - NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR @@ -4547,8 +4533,6 @@ enum nl80211_reg_rule_flags { #define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\ NL80211_RRF_NO_HT40PLUS) #define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT -#define NL80211_RRF_NO_UHB_VLP_CLIENT NL80211_RRF_NO_6GHZ_VLP_CLIENT -#define NL80211_RRF_NO_UHB_AFC_CLIENT NL80211_RRF_NO_6GHZ_AFC_CLIENT /* For backport compatibility with older userspace */ #define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS) @@ -5096,17 +5080,11 @@ enum nl80211_bss_use_for { * BSS isn't possible * @NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY: NSTR nonprimary links aren't * supported by the device, and this BSS entry represents one. - * @NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH: STA is not supporting - * the AP power type (SP, VLP, AP) that the AP uses. */ enum nl80211_bss_cannot_use_reasons { NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1 << 0, - NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 1 << 1, }; -#define NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH \ - NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH - /** * enum nl80211_bss - netlink attributes for a BSS * diff --git a/init/init_task.c b/init/init_task.c index 31ceb0e469f7..d80c007ab59b 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -214,6 +214,7 @@ struct task_struct init_task .android_vendor_data1 = {0, }, .android_oem_data1 = {0, }, #endif + .dmabuf_info = NULL, }; EXPORT_SYMBOL(init_task); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4866e653d340..76b94233322f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -561,7 +561,7 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); -int bpf_jit_harden __read_mostly; +int bpf_jit_harden __read_mostly = 2; long bpf_jit_limit __read_mostly; long bpf_jit_limit_max __read_mostly; diff --git a/kernel/fork.c b/kernel/fork.c index 75b1a4458a7e..8c6bfb33e617 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -101,6 +101,7 @@ #include #include #include +#include #include #include @@ -1000,6 +1001,8 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); + trace_android_vh_put_task(tsk); + put_dmabuf_info(tsk); io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); @@ -1195,6 +1198,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; tsk->worker_private = NULL; + tsk->dmabuf_info = NULL; kcov_task_init(tsk); kmsan_task_create(tsk); @@ -2629,6 +2633,12 @@ __latent_entropy struct task_struct *copy_process( p->rethooks.first = NULL; #endif + retval = copy_dmabuf_info(clone_flags, p); + if (retval) { + pr_err("failed to copy dmabuf accounting info, err %d\n", retval); + goto bad_fork_put_pidfd; + } + /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted that the new process's css_set can be changed @@ -2637,7 +2647,7 @@ __latent_entropy struct task_struct *copy_process( */ retval = cgroup_can_fork(p, args); if (retval) - goto bad_fork_put_pidfd; + goto bad_fork_cleanup_dmabuf; /* * Now that the cgroups are pinned, re-clone the parent cgroup and put @@ -2782,6 +2792,8 @@ __latent_entropy struct task_struct *copy_process( spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); cgroup_cancel_fork(p, args); +bad_fork_cleanup_dmabuf: + put_dmabuf_info(p); bad_fork_put_pidfd: if (clone_flags & CLONE_PIDFD) { fput(pidfile); diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 98a7e7477c3d..b077b62d1202 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -265,7 +265,7 @@ static int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, * This is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs * and hung_task_check_interval_secs */ -static const unsigned long hung_task_timeout_max = (LONG_MAX / HZ); +static const unsigned long hung_task_timeout_max __read_only = (LONG_MAX / HZ); static struct ctl_table hung_task_sysctls[] = { #ifdef CONFIG_SMP { diff --git a/kernel/power/process.c b/kernel/power/process.c index 7e517bb0ff28..a22a91b7e3d9 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -140,7 +140,6 @@ int freeze_processes(void) if (!pm_freezing) static_branch_inc(&freezer_active); - pm_wakeup_clear(0); pm_freezing = true; error = try_to_freeze_tasks(true); if (!error) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 8972ebab0461..a4c291700f8f 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "power.h" @@ -75,6 +76,23 @@ bool pm_suspend_default_s2idle(void) } EXPORT_SYMBOL_GPL(pm_suspend_default_s2idle); +static bool suspend_fs_sync_queued; +static DEFINE_SPINLOCK(suspend_fs_sync_lock); +static DECLARE_COMPLETION(suspend_fs_sync_complete); + +/** + * suspend_abort_fs_sync - Abort fs_sync to abort suspend early + * + * This function aborts the fs_sync stage of suspend so that suspend itself can + * be aborted early. + */ +void suspend_abort_fs_sync(void) +{ + spin_lock(&suspend_fs_sync_lock); + complete(&suspend_fs_sync_complete); + spin_unlock(&suspend_fs_sync_lock); +} + void s2idle_set_ops(const struct platform_s2idle_ops *ops) { unsigned int sleep_flags; @@ -399,6 +417,68 @@ void __weak arch_suspend_enable_irqs(void) local_irq_enable(); } + +static bool pm_fs_abort; +module_param(pm_fs_abort, bool, 0644); +MODULE_PARM_DESC(pm_fs_abort, + "Flag to enable abort during fs_sync phase of suspend"); + +static void sync_filesystems_fn(struct work_struct *work) +{ + ksys_sync_helper(); + + spin_lock(&suspend_fs_sync_lock); + suspend_fs_sync_queued = false; + complete(&suspend_fs_sync_complete); + spin_unlock(&suspend_fs_sync_lock); +} +static DECLARE_WORK(sync_filesystems, sync_filesystems_fn); + +/** + * suspend_fs_sync_with_abort - Trigger fs_sync with ability to abort + * + * Return 0 on successful file system sync, otherwise returns -EBUSY if file + * system sync was aborted. + */ +static int suspend_fs_sync_with_abort(void) +{ + if (!pm_fs_abort) { + ksys_sync_helper(); + return 0; + } + bool need_suspend_fs_sync_requeue; + +Start_fs_sync: + spin_lock(&suspend_fs_sync_lock); + reinit_completion(&suspend_fs_sync_complete); + /* + * Handle the case where a suspend immediately follows a previous + * suspend that was aborted during fs_sync. In this case, wait for the + * previous filesystem sync to finish. Then do another filesystem sync + * so any subsequent filesystem changes are synced before suspending. + */ + if (suspend_fs_sync_queued) { + need_suspend_fs_sync_requeue = true; + } else { + need_suspend_fs_sync_requeue = false; + suspend_fs_sync_queued = true; + schedule_work(&sync_filesystems); + } + spin_unlock(&suspend_fs_sync_lock); + + /* + * Completion is triggered by fs_sync finishing or a suspend abort + * signal, whichever comes first + */ + wait_for_completion(&suspend_fs_sync_complete); + if (pm_wakeup_pending()) + return -EBUSY; + if (need_suspend_fs_sync_requeue) + goto Start_fs_sync; + + return 0; +} + /** * suspend_enter - Make the system enter the given sleep state. * @state: System sleep state to enter. @@ -597,10 +677,13 @@ static int enter_state(suspend_state_t state) if (state == PM_SUSPEND_TO_IDLE) s2idle_begin(); + pm_wakeup_clear(0); if (sync_on_suspend_enabled) { trace_suspend_resume(TPS("sync_filesystems"), 0, true); - ksys_sync_helper(); + error = suspend_fs_sync_with_abort(); trace_suspend_resume(TPS("sync_filesystems"), 0, false); + if (error) + goto Unlock; } pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 8bf37da3a317..f66ce94b3c72 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1128,6 +1128,17 @@ static unsigned int __init add_to_rb(struct printk_ringbuffer *rb, static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata; +static void print_log_buf_usage_stats(void) +{ + unsigned int descs_count = log_buf_len >> PRB_AVGBITS; + size_t meta_data_size; + + meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info)); + + pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n", + log_buf_len, meta_data_size, log_buf_len + meta_data_size); +} + void __init setup_log_buf(int early) { struct printk_info *new_infos; @@ -1157,20 +1168,25 @@ void __init setup_log_buf(int early) if (!early && !new_log_buf_len) log_buf_add_cpu(); - if (!new_log_buf_len) + if (!new_log_buf_len) { + /* Show the memory stats only once. */ + if (!early) + goto out; + return; + } new_descs_count = new_log_buf_len >> PRB_AVGBITS; if (new_descs_count == 0) { pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len); - return; + goto out; } new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN); if (unlikely(!new_log_buf)) { pr_err("log_buf_len: %lu text bytes not available\n", new_log_buf_len); - return; + goto out; } new_descs_size = new_descs_count * sizeof(struct prb_desc); @@ -1233,7 +1249,7 @@ void __init setup_log_buf(int early) prb_next_seq(&printk_rb_static) - seq); } - pr_info("log_buf_len: %u bytes\n", log_buf_len); + print_log_buf_usage_stats(); pr_info("early log buf free: %u(%u%%)\n", free, (free * 100) / __LOG_BUF_LEN); return; @@ -1242,6 +1258,8 @@ void __init setup_log_buf(int early) memblock_free(new_descs, new_descs_size); err_free_log_buf: memblock_free(new_log_buf, new_log_buf_len); +out: + print_log_buf_usage_stats(); } static bool __read_mostly ignore_loglevel; diff --git a/kernel/printk/sysctl.c b/kernel/printk/sysctl.c index c228343eeb97..b0d4b2c7e105 100644 --- a/kernel/printk/sysctl.c +++ b/kernel/printk/sysctl.c @@ -11,6 +11,12 @@ static const int ten_thousand = 10000; +/* External variables not in a header file. */ +#if IS_ENABLED(CONFIG_USB) +int deny_new_usb __read_mostly = 0; +EXPORT_SYMBOL(deny_new_usb); +#endif + static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { @@ -76,6 +82,17 @@ static struct ctl_table printk_sysctls[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, +#if IS_ENABLED(CONFIG_USB) + { + .procname = "deny_new_usb2", + .data = &deny_new_usb, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax_sysadmin, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif {} }; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 41f11c0f834e..80e7169ee74d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6170,6 +6170,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (unlikely(p == RETRY_TASK)) goto restart; + trace_android_vh_chk_task(&p, rq); + /* Assume the next prioritized class is idle_sched_class */ if (!p) { put_prev_task(rq, prev); @@ -6184,6 +6186,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) for_each_class(class) { p = class->pick_next_task(rq); + trace_android_vh_chk_task(&p, rq); if (p) return p; } diff --git a/kernel/sched/vendor_hooks.c b/kernel/sched/vendor_hooks.c index 788b1dfd27bc..5c941d9740f6 100644 --- a/kernel/sched/vendor_hooks.c +++ b/kernel/sched/vendor_hooks.c @@ -124,3 +124,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_iowait); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prio_inheritance); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prio_restore); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_task_comm); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_chk_task); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_put_task); diff --git a/kernel/softirq.c b/kernel/softirq.c index 28207ff4f110..241a3a29310a 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -63,7 +63,7 @@ DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); #endif -static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; +static struct softirq_action softirq_vec[NR_SOFTIRQS] __ro_after_init __aligned(PAGE_SIZE); DEFINE_PER_CPU(struct task_struct *, ksoftirqd); EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c279c4ecc5d9..c685147e9514 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -82,10 +82,10 @@ #endif /* shared constants to be used in various sysctls */ -const int sysctl_vals[] = { 0, 1, 2, 3, 4, 100, 200, 1000, 3000, INT_MAX, 65535, -1 }; +const int sysctl_vals[] __read_only = { 0, 1, 2, 3, 4, 100, 200, 1000, 3000, INT_MAX, 65535, -1 }; EXPORT_SYMBOL(sysctl_vals); -const unsigned long sysctl_long_vals[] = { 0, 1, LONG_MAX }; +const unsigned long sysctl_long_vals[] __read_only = { 0, 1, LONG_MAX }; EXPORT_SYMBOL_GPL(sysctl_long_vals); #if defined(CONFIG_SYSCTL) @@ -93,12 +93,12 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); /* Constants used for minimum and maximum */ #ifdef CONFIG_PERF_EVENTS -static const int six_hundred_forty_kb = 640 * 1024; +static const int six_hundred_forty_kb __read_only = 640 * 1024; #endif -static const int ngroups_max = NGROUPS_MAX; -static const int cap_last_cap = CAP_LAST_CAP; +static const int ngroups_max __read_only = NGROUPS_MAX; +static const int cap_last_cap __read_only = CAP_LAST_CAP; #ifdef CONFIG_PROC_SYSCTL diff --git a/mm/Kconfig b/mm/Kconfig index 2b86f4fd9abd..90ba01632194 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -325,6 +325,23 @@ config SLAB_FREELIST_HARDENED sanity-checking than others. This option is most effective with CONFIG_SLUB. +config SLAB_CANARY + depends on SLUB + depends on !SLAB_MERGE_DEFAULT + bool "SLAB canaries" + default y + help + Place canaries at the end of kernel slab allocations, sacrificing + some performance and memory usage for security. + + Canaries can detect some forms of heap corruption when allocations + are freed and as part of the HARDENED_USERCOPY feature. It provides + basic use-after-free detection for HARDENED_USERCOPY. + + Canaries absorb small overflows (rendering them harmless), mitigate + non-NUL terminated C string overflows on 64-bit via a guaranteed zero + byte and provide basic double-free detection. + config SLUB_STATS default n bool "Enable SLUB performance statistics" diff --git a/mm/Makefile b/mm/Makefile index 29037c2c8c99..ac8119ff4924 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -50,7 +50,7 @@ ifdef CONFIG_64BIT mmu-$(CONFIG_MMU) += mseal.o endif -obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ +obj-y := memzero.o filemap.o mempool.o oom_kill.o fadvise.o \ maccess.o page-writeback.o folio-compat.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6ac6febf6fad..792193e57784 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2886,11 +2886,13 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, static void unmap_folio(struct folio *folio) { - enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | - TTU_SYNC; + enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC; VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + if (folio_test_pmd_mappable(folio)) + ttu_flags |= TTU_SPLIT_HUGE_PMD; + /* * Anon pages need migration entries to preserve them, but file * pages can simply be left unmapped, then faulted back on demand. @@ -3119,6 +3121,12 @@ static void reset_src_folio(struct folio *src) static bool lru_add_dst(struct lruvec *lruvec, struct folio *src, struct folio *dst) { + bool added = false; + + trace_android_vh_mm_customize_lru_add_dst(lruvec, src, dst, &added); + if (added) + return true; + if (folio_can_split(src)) return false; @@ -3338,7 +3346,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, if (nr_dropped) shmem_uncharge(head->mapping->host, nr_dropped); - remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0); + remap_page(folio, nr, (can_split && PageAnon(head)) ? RMP_USE_SHARED_ZEROPAGE : 0); for (i = 0; i < nr; i++) { struct page *subpage = folio_dst_page(folio, i); diff --git a/mm/kfence/report.c b/mm/kfence/report.c index c509aed326ce..1e77ef5f0db3 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -272,6 +272,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r lockdep_on(); + BUG_ON(IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)); check_panic_on_warn("KFENCE"); /* We encountered a memory safety error, taint the kernel! */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ba4dc0146c54..eec8dde4ec4c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -2479,9 +2480,10 @@ static unsigned long reclaim_high(struct mem_cgroup *memcg, memcg_memory_event(memcg, MEMCG_HIGH); psi_memstall_enter(&pflags); - nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, + nr_reclaimed += try_to_free_mem_cgroup_pages_with_swappiness(memcg, nr_pages, gfp_mask, - MEMCG_RECLAIM_MAY_SWAP); + MEMCG_RECLAIM_MAY_SWAP, + NULL); psi_memstall_leave(&pflags); } while ((memcg = parent_mem_cgroup(memcg)) && !mem_cgroup_is_root(memcg)); @@ -2771,8 +2773,8 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, raised_max_event = true; psi_memstall_enter(&pflags); - nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, - gfp_mask, reclaim_options); + nr_reclaimed = try_to_free_mem_cgroup_pages_with_swappiness(mem_over_limit, nr_pages, + gfp_mask, reclaim_options, NULL); psi_memstall_leave(&pflags); if (mem_cgroup_margin(mem_over_limit) >= nr_pages) @@ -6688,8 +6690,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, continue; } - reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, - GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP); + reclaimed = try_to_free_mem_cgroup_pages_with_swappiness(memcg, nr_pages - high, + GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL); if (!reclaimed && !nr_retries--) break; @@ -6737,8 +6739,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, } if (nr_reclaims) { - if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, - GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP)) + if (!try_to_free_mem_cgroup_pages_with_swappiness(memcg, nr_pages - max, + GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP, NULL)) nr_reclaims--; continue; } @@ -6863,19 +6865,50 @@ static ssize_t memory_oom_group_write(struct kernfs_open_file *of, return nbytes; } +enum { + MEMORY_RECLAIM_SWAPPINESS = 0, + MEMORY_RECLAIM_NULL, +}; + +static const match_table_t tokens = { + { MEMORY_RECLAIM_SWAPPINESS, "swappiness=%d"}, + { MEMORY_RECLAIM_NULL, NULL }, +}; + static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); unsigned int nr_retries = MAX_RECLAIM_RETRIES; unsigned long nr_to_reclaim, nr_reclaimed = 0; + int swappiness = -1; unsigned int reclaim_options; - int err; + char *old_buf, *start; + substring_t args[MAX_OPT_ARGS]; buf = strstrip(buf); - err = page_counter_memparse(buf, "", &nr_to_reclaim); - if (err) - return err; + + old_buf = buf; + nr_to_reclaim = memparse(buf, &buf) / PAGE_SIZE; + if (buf == old_buf) + return -EINVAL; + + buf = strstrip(buf); + + while ((start = strsep(&buf, " ")) != NULL) { + if (!strlen(start)) + continue; + switch (match_token(start, tokens, args)) { + case MEMORY_RECLAIM_SWAPPINESS: + if (match_int(&args[0], &swappiness)) + return -EINVAL; + if (swappiness < MIN_SWAPPINESS || swappiness > MAX_SWAPPINESS) + return -EINVAL; + break; + default: + return -EINVAL; + } + } reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; while (nr_reclaimed < nr_to_reclaim) { @@ -6894,8 +6927,10 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, if (!nr_retries) lru_add_drain_all(); - reclaimed = try_to_free_mem_cgroup_pages(memcg, - batch_size, GFP_KERNEL, reclaim_options); + reclaimed = try_to_free_mem_cgroup_pages_with_swappiness(memcg, + batch_size, GFP_KERNEL, + reclaim_options, + swappiness == -1 ? NULL : &swappiness); if (!reclaimed && !nr_retries--) return -EAGAIN; diff --git a/mm/memzero.c b/mm/memzero.c new file mode 100644 index 000000000000..c7a88ee89b81 --- /dev/null +++ b/mm/memzero.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +void __init early_memzero(phys_addr_t start, phys_addr_t end) +{ + u64 i; + phys_addr_t this_start, this_end; + + pr_info("Early memory zeroing"); + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &this_start, + &this_end, NULL) { + this_start = clamp(this_start, start, end); + this_end = clamp(this_end, start, end); + if (this_start < this_end) { + pr_info(" zero %pa - %pa\n", &this_start, &this_end); + memzero_explicit(__va(this_start), this_end - this_start); + } + } +} diff --git a/mm/mmap.c b/mm/mmap.c index 78640961ab74..685f4ba0ef9e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -144,8 +145,11 @@ static void remove_vma(struct vm_area_struct *vma, bool unreachable) { might_sleep(); vma_close(vma); - if (vma->vm_file) + if (vma->vm_file) { + if (is_dma_buf_file(vma->vm_file)) + dma_buf_unaccount_task(vma->vm_file->private_data, current); fput(vma->vm_file); + } mpol_put(vma_policy(vma)); if (unreachable) __vm_area_free(vma); @@ -223,6 +227,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) newbrk = __PAGE_ALIGN(brk); oldbrk = __PAGE_ALIGN(mm->brk); + /* properly handle unaligned min_brk as an empty heap */ + if (min_brk & ~__PAGE_MASK) { + if (brk == min_brk) + newbrk -= __PAGE_SIZE; + if (mm->brk == min_brk) + oldbrk -= __PAGE_SIZE; + } if (oldbrk == newbrk) { mm->brk = brk; goto success; @@ -2417,8 +2428,15 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, if (err) goto out_free_mpol; - if (new->vm_file) + if (new->vm_file) { get_file(new->vm_file); + if (is_dma_buf_file(new->vm_file)) { + int acct_err = dma_buf_account_task(new->vm_file->private_data, current); + + if (acct_err) + pr_err("failed to account dmabuf, err %d\n", acct_err); + } + } if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1ecd3793e40..90e37bb950c3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -869,7 +869,10 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, static int zone_max_order(struct zone *zone) { - return zone->order && zone_idx(zone) == ZONE_NOMERGE ? zone->order : MAX_ORDER; + int max_order = zone->order && zone_idx(zone) == ZONE_NOMERGE ? zone->order : MAX_ORDER; + + trace_android_vh_mm_customize_zone_max_order(zone, &max_order); + return max_order; } /* @@ -1695,6 +1698,12 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags { post_alloc_hook(page, order, gfp_flags); + if (IS_ENABLED(CONFIG_PAGE_SANITIZE_VERIFY) && want_init_on_free()) { + int i; + for (i = 0; i < (1 << order); i++) + verify_zero_highpage(page + i); + } + if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); @@ -3087,6 +3096,8 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); + trace_android_vh_mm_customize_rmqueue(zone, order, &alloc_flags, &migratetype); + if (likely(pcp_allowed_order(order))) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); @@ -3546,12 +3557,25 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, z = ac->preferred_zoneref; for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, ac->nodemask) { + bool use_this_zone = false; + bool suitable = true; struct page *page; unsigned long mark; if (!zone_is_suitable(zone, order)) continue; + trace_android_vh_mm_customize_suitable_zone(zone, gfp_mask, order, ac->highest_zoneidx, + &use_this_zone, &suitable); + if (!suitable) + continue; + + if (use_this_zone) + goto try_this_zone; + + /* + * This hook is deprecated by trace_android_vh_mm_customize_suitable_zone. + */ trace_android_vh_should_skip_zone(zone, gfp_mask, order, ac->migratetype, &should_skip_zone); if (should_skip_zone) @@ -4993,6 +5017,9 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, &alloc_gfp, &alloc_flags)) return NULL; + trace_android_vh_mm_customize_ac(gfp, order, &ac.zonelist, &ac.preferred_zoneref, + &ac.highest_zoneidx, &alloc_flags); + trace_android_rvh_try_alloc_pages_gfp(&page, order, gfp, gfp_zone(gfp)); if (page) goto out; @@ -5953,6 +5980,8 @@ static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) zone->pageset_batch == new_batch) return; + trace_android_vh_mm_customize_zone_pageset(zone, &new_high, &new_batch); + zone->pageset_high = new_high; zone->pageset_batch = new_batch; diff --git a/mm/pgsize_migration.c b/mm/pgsize_migration.c index 07dd1f3fa7d8..8f103f5bb4d6 100644 --- a/mm/pgsize_migration.c +++ b/mm/pgsize_migration.c @@ -180,8 +180,31 @@ static inline bool linker_ctx(void) vma = lock_vma_under_rcu(mm, instruction_pointer(regs)); - /* Current execution context, the VMA must be present */ - BUG_ON(!vma); + /* + * lock_vma_under_rcu() is a try-lock that can fail if the + * VMA is already locked for modification. + * + * Fallback to finding the vma under mmap read lock. + */ + if (!vma) { + mmap_read_lock(mm); + + vma = find_vma(mm, instruction_pointer(regs)); + + /* Current execution context, the VMA must be present */ + BUG_ON(!vma); + + /* + * We cannot use vma_start_read() as it may fail due to + * false locked (see comment in vma_start_read()). We + * can avoid that by directly locking vm_lock under + * mmap_lock, which guarantees that nobody can lock the + * vma for write (vma_start_write()) under us. + */ + down_read(&vma->vm_lock->lock); + + mmap_read_unlock(mm); + } file = vma->vm_file; if (!file) diff --git a/mm/slab.h b/mm/slab.h index 06ed16c67b60..a3a8037be4c7 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -694,10 +694,14 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) return s; cachep = virt_to_cache(x); +#ifdef CONFIG_BUG_ON_DATA_CORRUPTION + BUG_ON(cachep && cachep != s); +#else if (WARN(cachep && cachep != s, "%s: Wrong slab cache. %s but object is from %s\n", __func__, s->name, cachep->name)) print_tracking(cachep, x); +#endif return cachep; } @@ -726,7 +730,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s) * back there or track user information then we can * only use the space before that information. */ - if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) + if ((s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) || IS_ENABLED(CONFIG_SLAB_CANARY)) return s->inuse; /* * Else we can use all the padding etc for the allocation diff --git a/mm/slab_common.c b/mm/slab_common.c index 59ae933a0fe5..8a5bd94a3f62 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -36,10 +36,10 @@ #undef CREATE_TRACE_POINTS #include -enum slab_state slab_state; +enum slab_state slab_state __ro_after_init; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); -struct kmem_cache *kmem_cache; +struct kmem_cache *kmem_cache __ro_after_init; static LIST_HEAD(slab_caches_to_rcu_destroy); static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); @@ -59,7 +59,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work, /* * Merge control. If this is set then no merging of slab caches will occur. */ -static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); +static bool slab_nomerge __ro_after_init = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); static int __init setup_slab_nomerge(char *str) { diff --git a/mm/slub.c b/mm/slub.c index 904e081d6a91..859bcfb75345 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -612,6 +613,55 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, return false; } +#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLAB_CANARY) +/* + * See comment in calculate_sizes(). + */ +static inline bool freeptr_outside_object(struct kmem_cache *s) +{ + return s->offset >= s->inuse; +} + +/* + * Return offset of the end of info block which is inuse + free pointer if + * not overlapping with object. + */ +static inline unsigned int get_info_end(struct kmem_cache *s) +{ + if (freeptr_outside_object(s)) + return s->inuse + sizeof(void *); + else + return s->inuse; +} +#endif + +#ifdef CONFIG_SLAB_CANARY +static inline unsigned long *get_canary(struct kmem_cache *s, void *object) +{ + return object + get_info_end(s); +} + +static inline unsigned long get_canary_value(const void *canary, unsigned long value) +{ + return (value ^ (unsigned long)canary) & CANARY_MASK; +} + +static inline void set_canary(struct kmem_cache *s, void *object, unsigned long value) +{ + unsigned long *canary = get_canary(s, object); + *canary = get_canary_value(canary, value); +} + +static inline void check_canary(struct kmem_cache *s, void *object, unsigned long value) +{ + unsigned long *canary = get_canary(s, object); + BUG_ON(*canary != get_canary_value(canary, value)); +} +#else +#define set_canary(s, object, value) +#define check_canary(s, object, value) +#endif + #ifdef CONFIG_SLUB_DEBUG static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; static DEFINE_SPINLOCK(object_map_lock); @@ -684,13 +734,13 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p) * Debug settings: */ #if defined(CONFIG_SLUB_DEBUG_ON) -static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; +static slab_flags_t slub_debug __ro_after_init = DEBUG_DEFAULT_FLAGS; #else -static slab_flags_t slub_debug; +static slab_flags_t slub_debug __ro_after_init; #endif -static char *slub_debug_string; -static int disable_higher_order_debug; +static char *slub_debug_string __ro_after_init; +static int disable_higher_order_debug __ro_after_init; /* * slub is about to manipulate internal object metadata. This memory lies @@ -741,26 +791,6 @@ static void print_section(char *level, char *text, u8 *addr, metadata_access_disable(); } -/* - * See comment in calculate_sizes(). - */ -static inline bool freeptr_outside_object(struct kmem_cache *s) -{ - return s->offset >= s->inuse; -} - -/* - * Return offset of the end of info block which is inuse + free pointer if - * not overlapping with object. - */ -static inline unsigned int get_info_end(struct kmem_cache *s) -{ - if (freeptr_outside_object(s)) - return s->inuse + sizeof(void *); - else - return s->inuse; -} - static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { @@ -768,6 +798,9 @@ static struct track *get_track(struct kmem_cache *s, void *object, p = object + get_info_end(s); + if (IS_ENABLED(CONFIG_SLAB_CANARY)) + p = (void *)p + sizeof(void *); + return kasan_reset_tag(p + alloc); } @@ -1011,6 +1044,9 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) off = get_info_end(s); + if (IS_ENABLED(CONFIG_SLAB_CANARY)) + off += sizeof(void *); + if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); @@ -1167,9 +1203,10 @@ static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, * Meta data starts here. * * A. Free pointer (if we cannot overwrite object on free) - * B. Tracking data for SLAB_STORE_USER - * C. Original request size for kmalloc object (SLAB_STORE_USER enabled) - * D. Padding to reach required alignment boundary or at minimum + * B. Canary for SLAB_CANARY + * C. Tracking data for SLAB_STORE_USER + * D. Original request size for kmalloc object (SLAB_STORE_USER enabled) + * E. Padding to reach required alignment boundary or at minimum * one word if debugging is on to be able to detect writes * before the word boundary. * @@ -1187,6 +1224,9 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) { unsigned long off = get_info_end(s); /* The end of info */ + if (IS_ENABLED(CONFIG_SLAB_CANARY)) + off += sizeof(void *); + if (s->flags & SLAB_STORE_USER) { /* We also have user information there */ off += 2 * sizeof(struct track); @@ -1825,8 +1865,16 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, * production configuration these hooks all should produce no code at all. */ static __always_inline bool slab_free_hook(struct kmem_cache *s, - void *x, bool init) + void *x, bool init, bool canary) { + /* + * Postpone setting the inactive canary until the metadata + * has potentially been cleared at the end of this function. + */ + if (canary) { + check_canary(s, x, s->random_active); + } + kmemleak_free_recursive(x, s->flags); kmsan_slab_free(s, x); @@ -1857,6 +1905,11 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, memset((char *)kasan_reset_tag(x) + s->inuse, 0, s->size - s->inuse - rsize); } + + if (canary) { + set_canary(s, x, s->random_inactive); + } + /* KASAN might put x into memory quarantine, delaying its reuse. */ return kasan_slab_free(s, x, init); } @@ -1871,7 +1924,7 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *old_tail = *tail ? *tail : *head; if (is_kfence_address(next)) { - slab_free_hook(s, next, false); + slab_free_hook(s, next, false, false); return true; } @@ -1884,7 +1937,7 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, next = get_freepointer(s, object); /* If object's reuse doesn't have to be delayed */ - if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { + if (!slab_free_hook(s, object, slab_want_init_on_free(s), true)) { /* Move object to the new freelist */ set_freepointer(s, object, *head); *head = object; @@ -1908,6 +1961,7 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, static void *setup_object(struct kmem_cache *s, void *object) { setup_object_debug(s, object); + set_canary(s, object, s->random_inactive); object = kasan_init_slab_obj(s, object); if (unlikely(s->ctor)) { kasan_unpoison_object_data(s, object); @@ -3534,6 +3588,11 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list maybe_wipe_obj_freeptr(s, object); init = slab_want_init_on_alloc(gfpflags, s); + if (object) { + check_canary(s, object, s->random_inactive); + set_canary(s, object, s->random_active); + } + out: /* * When init equals 'true', like for kzalloc() family, only @@ -4005,7 +4064,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, { struct kmem_cache_cpu *c; unsigned long irqflags; - int i; + int i, k; /* * Drain objects in the per cpu slab, while disabling local @@ -4060,6 +4119,13 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); slub_put_cpu_ptr(s->cpu_slab); + for (k = 0; k < i; k++) { + if (!is_kfence_address(p[k])) { + check_canary(s, p[k], s->random_inactive); + set_canary(s, p[k], s->random_active); + } + } + return i; error: @@ -4148,10 +4214,10 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); * and increases the number of allocations possible without having to * take the list_lock. */ -static unsigned int slub_min_order; -static unsigned int slub_max_order = +static unsigned int slub_min_order __ro_after_init; +static unsigned int slub_max_order __ro_after_init = IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER; -static unsigned int slub_min_objects; +static unsigned int slub_min_objects __ro_after_init; /* * Calculate the order of allocation given an slab object size. @@ -4342,6 +4408,7 @@ static void early_kmem_cache_node_alloc(int node) init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); init_tracking(kmem_cache_node, n); #endif + set_canary(kmem_cache_node, n, kmem_cache_node->random_active); n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false); slab->freelist = get_freepointer(kmem_cache_node, n); slab->inuse = 1; @@ -4508,6 +4575,9 @@ static int calculate_sizes(struct kmem_cache *s) s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); } + if (IS_ENABLED(CONFIG_SLAB_CANARY)) + size += sizeof(void *); + #ifdef CONFIG_SLUB_DEBUG if (flags & SLAB_STORE_USER) { /* @@ -4581,6 +4651,10 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) #ifdef CONFIG_SLAB_FREELIST_HARDENED s->random = get_random_long(); #endif +#ifdef CONFIG_SLAB_CANARY + s->random_active = get_random_long(); + s->random_inactive = get_random_long(); +#endif if (!calculate_sizes(s)) goto error; @@ -4845,6 +4919,9 @@ void __check_heap_object(const void *ptr, unsigned long n, offset -= s->red_left_pad; } + if (!is_kfence) + check_canary(s, (void *)ptr - offset, s->random_active); + /* Allow address range falling entirely within usercopy region. */ if (offset >= s->useroffset && offset - s->useroffset <= s->usersize && diff --git a/mm/util.c b/mm/util.c index 52057c1c8a1b..f77b4a44ac7b 100644 --- a/mm/util.c +++ b/mm/util.c @@ -378,9 +378,9 @@ unsigned long __weak arch_randomize_brk(struct mm_struct *mm) { /* Is the current task 32bit ? */ if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) - return randomize_page(mm->brk, SZ_32M); + return mm->brk + get_random_long() % SZ_32M + PAGE_SIZE; - return randomize_page(mm->brk, SZ_1G); + return mm->brk + get_random_long() % SZ_1G + PAGE_SIZE; } unsigned long arch_mmap_rnd(void) @@ -390,6 +390,10 @@ unsigned long arch_mmap_rnd(void) #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS if (is_compat_task()) rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); +#ifdef CONFIG_ARM64 + else if (test_thread_flag(TIF_39BIT)) + rnd = get_random_long() & ((1UL << MMAP_RND_BITS_39_BIT) - 1); +#endif else #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); diff --git a/mm/vmscan.c b/mm/vmscan.c index 4c62b503788b..b2e6f98e2932 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -104,6 +104,11 @@ struct scan_control { unsigned long anon_cost; unsigned long file_cost; +#ifdef CONFIG_MEMCG + /* Swappiness value for proactive reclaim. Always use sc_swappiness()! */ + int *proactive_swappiness; +#endif + /* Can active folios be deactivated as part of reclaim? */ #define DEACTIVATE_ANON 1 #define DEACTIVATE_FILE 2 @@ -196,7 +201,7 @@ struct scan_control { #endif /* - * From 0 .. 200. Higher means more swappy. + * From 0 .. MAX_SWAPPINESS. Higher means more swappy. */ int vm_swappiness = 60; @@ -479,6 +484,13 @@ static bool writeback_throttling_sane(struct scan_control *sc) #endif return false; } + +static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) +{ + if (sc->proactive && sc->proactive_swappiness) + return *sc->proactive_swappiness; + return mem_cgroup_swappiness(memcg); +} #else static int prealloc_memcg_shrinker(struct shrinker *shrinker) { @@ -515,6 +527,11 @@ static bool writeback_throttling_sane(struct scan_control *sc) { return true; } + +static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) +{ + return READ_ONCE(vm_swappiness); +} #endif static void set_task_reclaim_state(struct task_struct *task, @@ -2390,6 +2407,12 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, unsigned long skipped = 0; unsigned long scan, total_scan, nr_pages; LIST_HEAD(folios_skipped); + unsigned long nr_scanned_before = *nr_scanned; + + trace_android_vh_mm_isolate_priv_lru(nr_to_scan, lruvec, lru, dst, sc->reclaim_idx, + sc->may_unmap, nr_scanned, &nr_taken); + if (*nr_scanned != nr_scanned_before) + return nr_taken; total_scan = 0; scan = 0; @@ -2647,7 +2670,7 @@ unsigned long shrink_inactive_list(unsigned long nr_to_scan, enum lru_list lru) { LIST_HEAD(folio_list); - unsigned long nr_scanned; + unsigned long nr_scanned = 0; unsigned int nr_reclaimed = 0; unsigned long nr_taken; struct reclaim_stat stat; @@ -2770,7 +2793,7 @@ static void shrink_active_list(unsigned long nr_to_scan, enum lru_list lru) { unsigned long nr_taken; - unsigned long nr_scanned; + unsigned long nr_scanned = 0; unsigned long vm_flags; LIST_HEAD(l_hold); /* The folios which were snipped off */ LIST_HEAD(l_active); @@ -3014,6 +3037,15 @@ static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) return inactive * inactive_ratio < active; } +static void customize_sc_file_is_tiny(struct scan_control *sc) +{ + bool file_is_tiny = sc->file_is_tiny; + + trace_android_vh_mm_customize_file_is_tiny(sc->may_swap, sc->order, + sc->reclaim_idx, &file_is_tiny); + sc->file_is_tiny = file_is_tiny; +} + enum scan_balance { SCAN_EQUAL, SCAN_FRACT, @@ -3131,6 +3163,8 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) !(sc->may_deactivate & DEACTIVATE_ANON) && anon >> sc->priority; } + + customize_sc_file_is_tiny(sc); } /* @@ -3146,7 +3180,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec); unsigned long anon_cost, file_cost, total_cost; - int swappiness = mem_cgroup_swappiness(memcg); + int swappiness = sc_swappiness(sc, memcg); u64 fraction[ANON_AND_FILE]; u64 denominator = 0; /* gcc */ enum scan_balance scan_balance; @@ -3229,7 +3263,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, ap = swappiness * (total_cost + 1); ap /= anon_cost + 1; - fp = (200 - swappiness) * (total_cost + 1); + fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); fp /= file_cost + 1; fraction[0] = ap; @@ -3436,7 +3470,7 @@ static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) return 0; - swappiness = mem_cgroup_swappiness(memcg); + swappiness = sc_swappiness(sc, memcg); trace_android_vh_tune_swappiness(&swappiness); return swappiness; @@ -5268,7 +5302,7 @@ static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx { int type, tier; struct ctrl_pos sp, pv; - int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; + int gain[ANON_AND_FILE] = { swappiness, MAX_SWAPPINESS - swappiness }; /* * Compare the first tier of anon with that of file to determine which @@ -5312,7 +5346,7 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw type = LRU_GEN_ANON; else if (swappiness == 1) type = LRU_GEN_FILE; - else if (swappiness == 200) + else if (swappiness == MAX_SWAPPINESS) type = LRU_GEN_ANON; else type = get_type_to_scan(lruvec, swappiness, &tier); @@ -6265,9 +6299,9 @@ static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, lruvec = get_lruvec(memcg, nid); - if (swappiness < 0) + if (swappiness < MIN_SWAPPINESS) swappiness = get_swappiness(lruvec, sc); - else if (swappiness > 200) + else if (swappiness > MAX_SWAPPINESS) goto done; switch (cmd) { @@ -7379,15 +7413,17 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, return sc.nr_reclaimed; } -unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, - unsigned long nr_pages, - gfp_t gfp_mask, - unsigned int reclaim_options) +unsigned long try_to_free_mem_cgroup_pages_with_swappiness(struct mem_cgroup *memcg, + unsigned long nr_pages, + gfp_t gfp_mask, + unsigned int reclaim_options, + int *swappiness) { unsigned long nr_reclaimed; unsigned int noreclaim_flag; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), + .proactive_swappiness = swappiness, .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), .reclaim_idx = MAX_NR_ZONES - 1, @@ -7417,6 +7453,15 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, return nr_reclaimed; } + +unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, + unsigned long nr_pages, + gfp_t gfp_mask, + unsigned int reclaim_options) +{ + return try_to_free_mem_cgroup_pages_with_swappiness( + memcg, nr_pages, gfp_mask, reclaim_options, NULL); +} EXPORT_SYMBOL_GPL(try_to_free_mem_cgroup_pages); #endif @@ -7478,8 +7523,15 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) { int i; unsigned long mark = -1; + bool customized = false; + bool balanced = false; struct zone *zone; + trace_android_vh_mm_customize_pgdat_balanced(order, highest_zoneidx, + &balanced, &customized); + if (customized) + return balanced; + /* * Check watermarks bottom-up as lower zones are more likely to * meet watermarks. diff --git a/modules.bzl b/modules.bzl index e6d7523a9c84..76572399d7f6 100644 --- a/modules.bzl +++ b/modules.bzl @@ -79,8 +79,6 @@ _COMMON_GKI_MODULES_LIST = [ "net/mac802154/mac802154.ko", "net/nfc/nfc.ko", "net/rfkill/rfkill.ko", - "net/tipc/diag.ko", - "net/tipc/tipc.ko", "net/tls/tls.ko", "net/vmw_vsock/vmw_vsock_virtio_transport.ko", ] @@ -110,6 +108,8 @@ _X86_GKI_MODULES_LIST = [ _X86_64_GKI_MODULES_LIST = [ # keep sorted "drivers/ptp/ptp_kvm.ko", + "net/tipc/diag.ko", + "net/tipc/tipc.ko", ] # buildifier: disable=unnamed-macro diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e6881662b558..df52c1843e68 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -123,6 +123,15 @@ #include #include +int sysctl_reserved_port_bind __read_mostly = 1; + +#define AID_INET KGIDT_INIT(3003) + +static inline int current_has_network(void) +{ + return in_egroup_p(AID_INET) || capable(CAP_NET_RAW); +} + /* The inetsw table contains everything that inet_create needs to * build a new socket. */ @@ -263,6 +272,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, if (protocol < 0 || protocol >= IPPROTO_MAX) return -EINVAL; + if (!current_has_network()) + return -EACCES; + sock->state = SS_UNCONNECTED; /* Look for the requested type/protocol pair. */ diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 99946d4b02ca..9f051ca9f192 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -237,6 +237,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .ioam6_id = IOAM6_DEFAULT_IF_ID, .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, .ndisc_evict_nocarrier = 1, + .ra_honor_pio_pflag = 0, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -299,6 +300,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .ioam6_id = IOAM6_DEFAULT_IF_ID, .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE, .ndisc_evict_nocarrier = 1, + .ra_honor_pio_pflag = 0, }; /* Check if link is ready: is it up and is a valid qdisc available */ @@ -2765,6 +2767,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) u32 addr_flags = 0; struct inet6_dev *in6_dev; struct net *net = dev_net(dev); + bool ignore_autoconf = false; pinfo = (struct prefix_info *) opt; @@ -2858,7 +2861,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) /* Try to figure out our local address for this prefix */ - if (pinfo->autoconf && in6_dev->cnf.autoconf) { + ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd; + if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) { struct in6_addr addr; bool tokenized = false, dev_addr_generated = false; @@ -6916,6 +6920,15 @@ static const struct ctl_table addrconf_sysctl[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "ra_honor_pio_pflag", + .data = &ipv6_devconf.ra_honor_pio_pflag, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, #ifdef CONFIG_IPV6_ROUTER_PREF { .procname = "accept_ra_rtr_pref", diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0129c93b4e56..11068e8bf4cc 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -70,6 +70,13 @@ #include +#define AID_INET KGIDT_INIT(3003) + +static inline int current_has_network(void) +{ + return in_egroup_p(AID_INET) || capable(CAP_NET_RAW); +} + #include "ip6_offload.h" MODULE_AUTHOR("Cast of dozens"); @@ -133,6 +140,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, if (protocol < 0 || protocol >= IPPROTO_MAX) return -EINVAL; + if (!current_has_network()) + return -EACCES; + /* Look for the requested type/protocol pair. */ lookup_protocol: err = -ESOCKTNOSUPPORT; diff --git a/net/tls/tls.h b/net/tls/tls.h index c13f3dc3b0fc..cfccd2babe90 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -198,7 +198,7 @@ void tls_strp_msg_done(struct tls_strparser *strp); int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb); void tls_rx_msg_ready(struct tls_strparser *strp); -void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); +bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst); diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 1852fac3e72b..bcb7250228b3 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -474,7 +474,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) strp->stm.offset = offset; } -void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) +bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) { struct strp_msg *rxm; struct tls_msg *tlm; @@ -483,8 +483,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); if (!strp->copy_mode && force_refresh) { - if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) - return; + if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) { + WRITE_ONCE(strp->msg_ready, 0); + memset(&strp->stm, 0, sizeof(strp->stm)); + return false; + } tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); } @@ -494,6 +497,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) rxm->offset = strp->stm.offset; tlm = tls_msg(strp->anchor); tlm->control = strp->mark; + + return true; } /* Called with lock held on lower socket */ diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 4a9a3aed5d6d..ba931025df27 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1367,7 +1367,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, return sock_intr_errno(timeo); } - tls_strp_msg_load(&ctx->strp, released); + if (unlikely(!tls_strp_msg_load(&ctx->strp, released))) + return tls_rx_rec_wait(sk, psock, nonblock, false); return 1; } @@ -1760,6 +1761,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout) return tls_decrypt_sg(sk, NULL, sgout, &darg); } +/* All records returned from a recvmsg() call must have the same type. + * 0 is not a valid content type. Use it as "no type reported, yet". + */ static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, u8 *control) { @@ -2003,8 +2007,10 @@ int tls_sw_recvmsg(struct sock *sk, if (err < 0) goto end; + /* process_rx_list() will set @control if it processed any records */ copied = err; - if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more) + if (len <= copied || rx_more || + (control && control != TLS_RECORD_TYPE_DATA)) goto end; target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 2925f5d27ad3..5d7c68214d57 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -63,6 +63,17 @@ struct virtio_vsock { u32 guest_cid; bool seqpacket_allow; + + /* These fields are used only in tx path in function + * 'virtio_transport_send_pkt_work()', so to save + * stack space in it, place both of them here. Each + * pointer from 'out_sgs' points to the corresponding + * element in 'out_bufs' - this is initialized in + * 'virtio_vsock_probe()'. Both fields are protected + * by 'tx_lock'. +1 is needed for packet header. + */ + struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1]; + struct scatterlist out_bufs[MAX_SKB_FRAGS + 1]; }; static u32 virtio_transport_get_local_cid(void) @@ -100,8 +111,8 @@ virtio_transport_send_pkt_work(struct work_struct *work) vq = vsock->vqs[VSOCK_VQ_TX]; for (;;) { - struct scatterlist hdr, buf, *sgs[2]; int ret, in_sg = 0, out_sg = 0; + struct scatterlist **sgs; struct sk_buff *skb; bool reply; @@ -110,12 +121,43 @@ virtio_transport_send_pkt_work(struct work_struct *work) break; reply = virtio_vsock_skb_reply(skb); - - sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb))); - sgs[out_sg++] = &hdr; - if (skb->len > 0) { - sg_init_one(&buf, skb->data, skb->len); - sgs[out_sg++] = &buf; + sgs = vsock->out_sgs; + sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb), + sizeof(*virtio_vsock_hdr(skb))); + out_sg++; + + if (!skb_is_nonlinear(skb)) { + if (skb->len > 0) { + sg_init_one(sgs[out_sg], skb->data, skb->len); + out_sg++; + } + } else { + struct skb_shared_info *si; + int i; + + /* If skb is nonlinear, then its buffer must contain + * only header and nothing more. Data is stored in + * the fragged part. + */ + WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb))); + + si = skb_shinfo(skb); + + for (i = 0; i < si->nr_frags; i++) { + skb_frag_t *skb_frag = &si->frags[i]; + void *va; + + /* We will use 'page_to_virt()' for the userspace page + * here, because virtio or dma-mapping layers will call + * 'virt_to_phys()' later to fill the buffer descriptor. + * We don't touch memory at "virtual" address of this page. + */ + va = page_to_virt(skb_frag->bv_page); + sg_init_one(sgs[out_sg], + va + skb_frag->bv_offset, + skb_frag->bv_len); + out_sg++; + } } ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL); @@ -221,7 +263,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { - int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; + int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; struct scatterlist pkt, *p; struct virtqueue *vq; struct sk_buff *skb; @@ -230,7 +272,7 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) vq = vsock->vqs[VSOCK_VQ_RX]; do { - skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL); + skb = virtio_vsock_alloc_linear_skb(total_len, GFP_KERNEL); if (!skb) break; @@ -497,8 +539,9 @@ static void virtio_transport_rx_work(struct work_struct *work) do { virtqueue_disable_cb(vq); for (;;) { + unsigned int len, payload_len; + struct virtio_vsock_hdr *hdr; struct sk_buff *skb; - unsigned int len; if (!virtio_transport_more_replies(vsock)) { /* Stop rx until the device processes already @@ -515,13 +558,22 @@ static void virtio_transport_rx_work(struct work_struct *work) vsock->rx_buf_nr--; /* Drop short/long packets */ - if (unlikely(len < sizeof(struct virtio_vsock_hdr) || + if (unlikely(len < sizeof(*hdr) || len > virtio_vsock_skb_len(skb))) { kfree_skb(skb); continue; } - virtio_vsock_skb_rx_put(skb); + hdr = virtio_vsock_hdr(skb); + payload_len = le32_to_cpu(hdr->len); + if (unlikely(payload_len > len - sizeof(*hdr))) { + kfree_skb(skb); + continue; + } + + if (payload_len) + virtio_vsock_skb_put(skb, payload_len); + virtio_transport_deliver_tap_pkt(skb); virtio_transport_recv_pkt(&virtio_transport, skb); } @@ -637,6 +689,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev) { struct virtio_vsock *vsock = NULL; int ret; + int i; ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); if (ret) @@ -679,6 +732,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev) if (ret < 0) goto out; + for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++) + vsock->out_sgs[i] = &vsock->out_bufs[i]; + rcu_assign_pointer(the_virtio_vsock, vsock); virtio_vsock_vqs_start(vsock); diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index e289b8f9d75c..790d968a8e5e 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -29,10 +29,6 @@ static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, bool cancel_timeout); -uint virtio_transport_max_vsock_pkt_buf_size = 64 * 1024; -module_param(virtio_transport_max_vsock_pkt_buf_size, uint, 0444); -EXPORT_SYMBOL_GPL(virtio_transport_max_vsock_pkt_buf_size); - static const struct virtio_transport * virtio_transport_get_ops(struct vsock_sock *vsk) { @@ -59,7 +55,6 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len; struct virtio_vsock_hdr *hdr; struct sk_buff *skb; - void *payload; int err; skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); @@ -79,8 +74,8 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, hdr->fwd_cnt = cpu_to_le32(0); if (info->msg && len > 0) { - payload = skb_put(skb, len); - err = memcpy_from_msg(payload, info->msg, len); + virtio_vsock_skb_put(skb, len); + err = skb_copy_datagram_from_iter(skb, 0, &info->msg->msg_iter, len); if (err) goto out; @@ -115,6 +110,27 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, return NULL; } +static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb, + void *dst, + size_t len) +{ + struct iov_iter iov_iter = { 0 }; + struct kvec kvec; + size_t to_copy; + + kvec.iov_base = dst; + kvec.iov_len = len; + + iov_iter.iter_type = ITER_KVEC; + iov_iter.kvec = &kvec; + iov_iter.nr_segs = 1; + + to_copy = min_t(size_t, len, skb->len); + + skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset, + &iov_iter, to_copy); +} + /* Packet capture */ static struct sk_buff *virtio_transport_build_skb(void *opaque) { @@ -123,7 +139,6 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) struct af_vsockmon_hdr *hdr; struct sk_buff *skb; size_t payload_len; - void *payload_buf; /* A packet could be split to fit the RX buffer, so we can retrieve * the payload length from the header and the buffer pointer taking @@ -131,7 +146,6 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) */ pkt_hdr = virtio_vsock_hdr(pkt); payload_len = pkt->len; - payload_buf = pkt->data; skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len, GFP_ATOMIC); @@ -174,7 +188,13 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque) skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr)); if (payload_len) { - skb_put_data(skb, payload_buf, payload_len); + if (skb_is_nonlinear(pkt)) { + void *data = skb_put(skb, payload_len); + + virtio_transport_copy_nonlinear_skb(pkt, data, payload_len); + } else { + skb_put_data(skb, pkt->data, payload_len); + } } return skb; @@ -206,6 +226,24 @@ static u16 virtio_transport_get_type(struct sock *sk) static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, struct virtio_vsock_pkt_info *info) { + /* ANDROID: + * + * Older host kernels (including the 5.10-based images used by + * Cuttlefish) only support linear SKBs on the RX path. + * Consequently, if we transmit a VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + * packet, the host allocation can fail and the packet will be + * silently dropped. + * + * As a nasty workaround, limit the entire SKB to ~28KiB, which + * allows for 4KiB of SKB wiggle room whilst keeping the + * allocation below PAGE_ALLOC_COSTLY_ORDER. + * + * This can be removed when all supported host kernels have + * support for non-linear RX buffers introduced by Change-Id + * I4212a8daf9f19b5bbffc06ce93338c823de7bb19. + */ + u32 max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE, + SKB_WITH_OVERHEAD(SZ_32K - VIRTIO_VSOCK_SKB_HEADROOM) - SZ_4K); u32 src_cid, src_port, dst_cid, dst_port; const struct virtio_transport *t_ops; struct virtio_vsock_sock *vvs; @@ -244,7 +282,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, struct sk_buff *skb; size_t skb_len; - skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE, rest_len); + skb_len = min_t(u32, max_skb_len, rest_len); skb = virtio_transport_alloc_skb(info, skb_len, src_cid, src_port, @@ -373,9 +411,10 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk, spin_unlock_bh(&vvs->rx_lock); /* sk_lock is held by caller so no one else can dequeue. - * Unlock rx_lock since memcpy_to_msg() may sleep. + * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ - err = memcpy_to_msg(msg, skb->data, bytes); + err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset, + &msg->msg_iter, bytes); if (err) goto out; @@ -421,25 +460,27 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, while (total < len && !skb_queue_empty(&vvs->rx_queue)) { skb = skb_peek(&vvs->rx_queue); - bytes = len - total; - if (bytes > skb->len) - bytes = skb->len; + bytes = min_t(size_t, len - total, + skb->len - VIRTIO_VSOCK_SKB_CB(skb)->offset); /* sk_lock is held by caller so no one else can dequeue. - * Unlock rx_lock since memcpy_to_msg() may sleep. + * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ spin_unlock_bh(&vvs->rx_lock); - err = memcpy_to_msg(msg, skb->data, bytes); + err = skb_copy_datagram_iter(skb, + VIRTIO_VSOCK_SKB_CB(skb)->offset, + &msg->msg_iter, bytes); if (err) goto out; spin_lock_bh(&vvs->rx_lock); total += bytes; - skb_pull(skb, bytes); - if (skb->len == 0) { + VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes; + + if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) { u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); virtio_transport_dec_rx_pkt(vvs, pkt_len); @@ -508,9 +549,10 @@ virtio_transport_seqpacket_do_peek(struct vsock_sock *vsk, spin_unlock_bh(&vvs->rx_lock); /* sk_lock is held by caller so no one else can dequeue. - * Unlock rx_lock since memcpy_to_msg() may sleep. + * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ - err = memcpy_to_msg(msg, skb->data, bytes); + err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset, + &msg->msg_iter, bytes); if (err) return err; @@ -569,11 +611,13 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, int err; /* sk_lock is held by caller so no one else can dequeue. - * Unlock rx_lock since memcpy_to_msg() may sleep. + * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ spin_unlock_bh(&vvs->rx_lock); - err = memcpy_to_msg(msg, skb->data, bytes_to_copy); + err = skb_copy_datagram_iter(skb, 0, + &msg->msg_iter, + bytes_to_copy); if (err) { /* Copy of message failed. Rest of * fragments will be freed without copy. diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f2cc9fa33f3c..6eb357ca75b2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1208,12 +1208,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if ((chan->flags & IEEE80211_CHAN_DFS_CONCURRENT) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DFS_CONCURRENT)) goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT)) - goto nla_put_failure; - if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) && - nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT)) - goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 8ee613917a3f..671797958d47 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -5,7 +5,7 @@ * Copyright 2008-2011 Luis R. Rodriguez * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2023 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -1592,10 +1592,6 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_EHT; if (rd_flags & NL80211_RRF_DFS_CONCURRENT) channel_flags |= IEEE80211_CHAN_DFS_CONCURRENT; - if (rd_flags & NL80211_RRF_NO_6GHZ_VLP_CLIENT) - channel_flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT; - if (rd_flags & NL80211_RRF_NO_6GHZ_AFC_CLIENT) - channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT; if (rd_flags & NL80211_RRF_PSD) channel_flags |= IEEE80211_CHAN_PSD; return channel_flags; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 57d6085c72f8..c86279967e2b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -5,7 +5,7 @@ * Copyright 2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2016 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include #include @@ -2139,36 +2139,6 @@ struct cfg80211_inform_single_bss_data { u64 cannot_use_reasons; }; -static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen, - const u32 flags) -{ - const struct element *tmp; - struct ieee80211_he_operation *he_oper; - - tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen); - if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 && - tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) { - const struct ieee80211_he_6ghz_oper *he_6ghz_oper; - - he_oper = (void *)&tmp->data[1]; - he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); - - if (!he_6ghz_oper) - return false; - - switch (u8_get_bits(he_6ghz_oper->control, - IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { - case IEEE80211_6GHZ_CTRL_REG_LPI_AP: - return true; - case IEEE80211_6GHZ_CTRL_REG_SP_AP: - return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT); - case IEEE80211_6GHZ_CTRL_REG_VLP_AP: - return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT); - } - } - return false; -} - /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_bss * cfg80211_inform_single_bss_data(struct wiphy *wiphy, @@ -2201,14 +2171,6 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, if (!channel) return NULL; - if (channel->band == NL80211_BAND_6GHZ && - !cfg80211_6ghz_power_type_valid(data->ie, data->ielen, - channel->flags)) { - data->use_for = 0; - data->cannot_use_reasons = - NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH; - } - memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN); tmp.pub.channel = channel; if (data->bss_source != BSS_SOURCE_STA_PROFILE) diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index e21cc71095bb..ca6db1e960ce 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -880,7 +880,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], return -EINVAL; } - if (p.collect_md) { + if (p.collect_md || xi->p.collect_md) { NL_SET_ERR_MSG(extack, "collect_md can't be changed"); return -EINVAL; } @@ -891,11 +891,6 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], } else { if (xi->dev != dev) return -EEXIST; - if (xi->p.collect_md) { - NL_SET_ERR_MSG(extack, - "device can't be changed to collect_md"); - return -EINVAL; - } } return xfrmi_update(xi, &p); diff --git a/scripts/gen-randstruct-seed.sh b/scripts/gen-randstruct-seed.sh index 61017b36c464..0c84743dade0 100755 --- a/scripts/gen-randstruct-seed.sh +++ b/scripts/gen-randstruct-seed.sh @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -SEED=$(od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n') +SEED=$(echo -n $KBUILD_BUILD_TIMESTAMP | sha256sum | cut -f 1 -d ' ') echo "$SEED" > "$1" HASH=$(echo -n "$SEED" | sha256sum | cut -d" " -f1) echo "#define RANDSTRUCT_HASHED_SEED \"$HASH\"" > "$2" diff --git a/scripts/sign-file.c b/scripts/sign-file.c index 93816c7bf9f3..87b16098fc3a 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -288,12 +288,14 @@ int main(int argc, char **argv) replace_orig = true; } +#ifndef OPENSSL_IS_BORINGSSL #ifdef USE_PKCS7 if (strcmp(hash_algo, "sha1") != 0) { fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", OPENSSL_VERSION_TEXT); exit(3); } +#endif #endif /* Open the module file */ diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index 2cff851ebfd7..b247f3740428 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -277,6 +277,13 @@ config ZERO_CALL_USED_REGS be evaluated for suitability. For example, x86_64 grows by less than 1%, and arm64 grows by about 5%. +config PAGE_SANITIZE_VERIFY + bool "Verify sanitized pages" + default y + help + When init_on_free is enabled, verify that newly allocated pages + are zeroed to detect write-after-free bugs. + endmenu menu "Hardening of kernel data structures" diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 849e832719e2..ebc61026a839 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -194,7 +194,7 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr, * @a : common audit data * */ -static void dump_common_audit_data(struct audit_buffer *ab, +void dump_common_audit_data(struct audit_buffer *ab, struct common_audit_data *a) { char comm[sizeof(current->comm)]; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d4a99d98ec77..c29cd17ee900 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -251,6 +251,59 @@ static void ad_net_init_from_iif(struct common_audit_data *ad, __ad_net_init(ad, net, ifindex, NULL, family); } +static inline u64 cred_tsec_flags(const struct cred *cred) +{ + const struct task_security_struct *tsec; + + tsec = selinux_cred(cred); + return tsec->flags; +} + +static void audit_log_tsec_flag_denial_inner(const char *prefix, struct common_audit_data *adp) +{ + kuid_t uid; + pid_t tgid; + // Apps are allowed to fork() their processes. If process parent is an isolated process, there's no good way to + // determine after child process death which app UID it belonged to, since isolated process UID is separate from + // app UID. + // Adding tgid of the top-most process with the same uid as current process to audit message allows to attribute + // such processes to their apps, since top-most processes are always spawned and managed by the OS (see difference + // between ProcessRecord and PhantomProcessRecord in Android system_server code). + pid_t top_tgid_with_same_uid; + struct task_struct *cur; + struct task_struct *parent; + struct audit_buffer *ab; + + rcu_read_lock(); + cur = current; + uid = __task_cred(cur)->uid; + tgid = task_tgid_nr(cur); + top_tgid_with_same_uid = tgid; + parent = cur->parent; + while (parent != NULL && uid_eq(__task_cred(parent)->uid, uid)) { + top_tgid_with_same_uid = task_tgid_nr(parent); + parent = parent->parent; + } + rcu_read_unlock(); + + ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN, AUDIT_SELINUX_TSEC_FLAG_DENIAL); + + if (ab == NULL) { + return; + } + + audit_log_format(ab, "%s: op denied, uid %u, pid %i, top_pid_with_same_uid %i,", prefix, __kuid_val(uid), + tgid, top_tgid_with_same_uid); + + if (adp) { + dump_common_audit_data(ab, adp); + } + + audit_log_end(ab); +} + +#define audit_log_tsec_flag_denial(f, adp) audit_log_tsec_flag_denial_inner(#f, adp) + /* * get the objective security ID of a task */ @@ -1649,6 +1702,66 @@ static int cred_has_capability(const struct cred *cred, return rc; } +static int selinux_inode_check_tsec_flags( + const struct cred *cred, struct inode_security_struct *isec, + u32 perms, struct common_audit_data *adp) +{ + int rc; + struct selinux_state *s = &selinux_state; + u32 inode_type; + u64 flags = cred_tsec_flags(cred); + u64 denied_flags = 0; + char *flag_str = NULL; + + if (perms & FILE__EXECMOD) { + if (flags & TSEC_FLAG_DENY_EXECMOD) { + audit_log_tsec_flag_denial(TSEC_FLAG_DENY_EXECMOD, adp); + return -EACCES; + } + } + + if (perms & FILE__EXECUTE) { + if (!(flags & TSEC_ALL_DENY_EXECUTE_FLAGS)) { + // none of the DENY_EXEC_* flags are set + return 0; + } + rc = security_sid_to_context_type(isec->sid, &inode_type); + + if (rc) { + pr_warn("unknown type for sid %i, inode %lu\n", isec->sid, isec->inode->i_ino); + // This function is called only if the regular SELinux check returned "allowed". If SELinux is configured + // to deny this inode operation on inodes with unknown contexts, it would already be denied and this code + // would not be reached. + return 0; + } + +#define DENY_FLAG(s) denied_flags = s; flag_str = #s + + if (inode_type == s->types.appdomain_tmpfs) { + DENY_FLAG(TSEC_FLAG_DENY_EXECUTE_APPDOMAIN_TMPFS); + } else if (inode_type == s->types.app_data_file) { + DENY_FLAG(TSEC_FLAG_DENY_EXECUTE_APP_DATA_FILE); + } else if (inode_type == s->types.ashmem_device) { + DENY_FLAG(TSEC_FLAG_DENY_EXECUTE_ASHMEM_DEVICE); + } else if (inode_type == s->types.ashmem_libcutils_device) { + DENY_FLAG(TSEC_FLAG_DENY_EXECUTE_ASHMEM_LIBCUTILS_DEVICE); + } else if (inode_type == s->types.privapp_data_file) { + DENY_FLAG(TSEC_FLAG_DENY_EXECUTE_PRIVAPP_DATA_FILE); + } else { + return 0; + } + +#undef DENY_FLAG + + if (flags & denied_flags) { + audit_log_tsec_flag_denial_inner(flag_str, adp); + return -EACCES; + } + } + + return 0; +} + /* Check whether a task has a particular permission to an inode. The 'adp' parameter is optional and allows other audit data to be passed (e.g. the dentry). */ @@ -1659,6 +1772,7 @@ static int inode_has_perm(const struct cred *cred, { struct inode_security_struct *isec; u32 sid; + int rc; if (unlikely(IS_PRIVATE(inode))) return 0; @@ -1666,7 +1780,12 @@ static int inode_has_perm(const struct cred *cred, sid = cred_sid(cred); isec = selinux_inode(inode); - return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp); + rc = avc_has_perm(sid, isec->sid, isec->sclass, perms, adp); + + if (!rc) { + rc = selinux_inode_check_tsec_flags(cred, isec, perms, adp); + } + return rc; } /* Same as inode_has_perm, but pass explicit audit data containing @@ -2085,24 +2204,61 @@ static int selinux_binder_transfer_file(const struct cred *from, &ad); } +static bool is_crash_dump_sid(u32 sid) +{ + u32 type; + if (security_sid_to_context_type(sid, &type)) { + return false; + } + return type == selinux_state.types.crash_dump; +} + static int selinux_ptrace_access_check(struct task_struct *child, unsigned int mode) { - u32 sid = current_sid(); + const struct cred *cred = current_cred(); + u32 sid = cred_sid(cred); u32 csid = task_sid_obj(child); + int rc; if (mode & PTRACE_MODE_READ) return avc_has_perm(sid, csid, SECCLASS_FILE, FILE__READ, NULL); - return avc_has_perm(sid, csid, SECCLASS_PROCESS, PROCESS__PTRACE, + rc = avc_has_perm(sid, csid, SECCLASS_PROCESS, PROCESS__PTRACE, NULL); + if (!rc) { + if (cred_tsec_flags(cred) & TSEC_FLAG_DENY_PROCESS_PTRACE) { + // Exempt crash_dump binary from this restriction: + // crash_dump process is spawned as a child of crashed process and needs ptrace acccess to collect parent's + // stack trace. + // sepolicy of crash_dump domain allows ptrace access, but tsec_flags are inherited across fork() + if (!is_crash_dump_sid(sid)) { + audit_log_tsec_flag_denial(TSEC_FLAG_DENY_PROCESS_PTRACE, NULL); + return -EPERM; + } + } + } + return rc; } static int selinux_ptrace_traceme(struct task_struct *parent) { - return avc_has_perm(task_sid_obj(parent), task_sid_obj(current), - SECCLASS_PROCESS, PROCESS__PTRACE, NULL); + const struct cred *cred = current_cred(); + u32 sid = cred_sid(cred); + + int rc = avc_has_perm(task_sid_obj(parent), sid, + SECCLASS_PROCESS, PROCESS__PTRACE, NULL); + + if (!rc) { + if (cred_tsec_flags(cred) & TSEC_FLAG_DENY_PROCESS_PTRACE) { + audit_log_tsec_flag_denial_inner("TSEC_FLAG_DENY_PROCESS_PTRACE (ptrace_traceme)", NULL); + return -EPERM; + } + } + + return rc; + } static int selinux_capget(const struct task_struct *target, kernel_cap_t *effective, @@ -2296,6 +2452,7 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) struct common_audit_data ad; struct inode *inode = file_inode(bprm->file); int rc; + u32 inode_context_type; /* SELinux context only depends on initial program or script and not * the script interpreter */ @@ -2347,6 +2504,17 @@ static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm) SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad); if (rc) return rc; + + if (old_tsec->flags & TSEC_FLAG_DENY_EXECUTE_NO_TRANS_APP_DATA_FILE) { + rc = security_sid_to_context_type(isec->sid, &inode_context_type); + if (rc) + return rc; + + if (inode_context_type == selinux_state.types.app_data_file) { + audit_log_tsec_flag_denial(TSEC_FLAG_DENY_EXECUTE_NO_TRANS_APP_DATA_FILE, &ad); + return -EACCES; + } + } } else { /* Check permissions for the transition. */ rc = avc_has_perm(old_tsec->sid, new_tsec->sid, @@ -3109,6 +3277,11 @@ static int selinux_inode_permission(struct inode *inode, int mask) rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd); + + if (!rc) { + rc = selinux_inode_check_tsec_flags(cred, isec, perms, NULL); + } + audited = avc_audit_required(perms, &avd, rc, from_access ? FILE__AUDIT_ACCESS : 0, &denied); @@ -3770,6 +3943,14 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared */ rc = avc_has_perm(sid, sid, SECCLASS_PROCESS, PROCESS__EXECMEM, NULL); + + if (!rc) { + if (cred_tsec_flags(cred) & TSEC_FLAG_DENY_EXECMEM) { + audit_log_tsec_flag_denial(TSEC_FLAG_DENY_EXECMEM, NULL); + rc = -EACCES; + } + } + if (rc) goto error; } @@ -6327,6 +6508,7 @@ static int selinux_getprocattr(struct task_struct *p, { const struct task_security_struct *__tsec; u32 sid; + u64 flags; int error; unsigned len; @@ -6352,12 +6534,26 @@ static int selinux_getprocattr(struct task_struct *p, sid = __tsec->keycreate_sid; else if (!strcmp(name, "sockcreate")) sid = __tsec->sockcreate_sid; + else if (!strcmp(name, "selinux_flags")) + flags = __tsec->flags; else { error = -EINVAL; goto bad; } rcu_read_unlock(); + if (!strcmp(name, "selinux_flags")) { + size_t len = 16 + 1; + // freed by the caller + char *buf = kzalloc(len, GFP_KERNEL); + if (!buf) { + return -ENOMEM; + } + len = snprintf(buf, len, "%llx", flags); + *value = buf; + return (int) len; + } + if (!sid) return 0; @@ -6375,9 +6571,10 @@ static int selinux_setprocattr(const char *name, void *value, size_t size) { struct task_security_struct *tsec; struct cred *new; - u32 mysid = current_sid(), sid = 0, ptsid; + u32 mysid = current_sid(), sid = 0, ptsid, context_type = 0; int error; char *str = value; + u64 flags; /* * Basic control over ability to set these attributes at all. @@ -6394,7 +6591,7 @@ static int selinux_setprocattr(const char *name, void *value, size_t size) else if (!strcmp(name, "sockcreate")) error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS, PROCESS__SETSOCKCREATE, NULL); - else if (!strcmp(name, "current")) + else if (!strcmp(name, "current") || !strcmp(name, "selinux_flags")) error = avc_has_perm(mysid, mysid, SECCLASS_PROCESS, PROCESS__SETCURRENT, NULL); else @@ -6403,7 +6600,7 @@ static int selinux_setprocattr(const char *name, void *value, size_t size) return error; /* Obtain a SID for the context, if one was specified. */ - if (size && str[0] && str[0] != '\n') { + if (size && str[0] && str[0] != '\n' && strcmp(name, "selinux_flags")) { if (str[size-1] == '\n') { str[size-1] = 0; size--; @@ -6493,6 +6690,37 @@ static int selinux_setprocattr(const char *name, void *value, size_t size) } tsec->sid = sid; + } else if (!strcmp(name, "selinux_flags")) { + error = security_sid_to_context_type(mysid, &context_type); + if (error) { + goto abort_change; + } + + if (context_type != selinux_state.types.zygote && + context_type != selinux_state.types.webview_zygote + ) { + pr_err("selinux_flags: attempt to set from an unknown context, pid %i\n", current->pid); + error = -EPERM; + goto abort_change; + } + + if (size >= 2 && str[size - 1] == 0) { + if (kstrtou64(str, 16, &flags)) { + error = -EINVAL; + goto abort_change; + } + } else { + error = -EINVAL; + goto abort_change; + } + + if ((flags & TSEC_ALL_FLAGS) != flags) { + pr_warn("selinux_flags: unknown flags %llu\n", flags & ~TSEC_ALL_FLAGS); + error = -EINVAL; + goto abort_change; + } + + tsec->flags = flags; } else { error = -EINVAL; goto abort_change; diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index 8159fd53c3de..ab873d5910e4 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -35,8 +35,35 @@ struct task_security_struct { u32 create_sid; /* fscreate SID */ u32 keycreate_sid; /* keycreate SID */ u32 sockcreate_sid; /* fscreate SID */ + u64 flags; } __randomize_layout; +#define TSEC_FLAG_DENY_EXECMEM (1 << 0) +#define TSEC_FLAG_DENY_EXECMOD (1 << 1) +#define TSEC_FLAG_DENY_EXECUTE_APPDOMAIN_TMPFS (1 << 2) +#define TSEC_FLAG_DENY_EXECUTE_APP_DATA_FILE (1 << 3) +#define TSEC_FLAG_DENY_EXECUTE_NO_TRANS_APP_DATA_FILE (1 << 4) +#define TSEC_FLAG_DENY_EXECUTE_ASHMEM_DEVICE (1 << 5) +#define TSEC_FLAG_DENY_EXECUTE_ASHMEM_LIBCUTILS_DEVICE (1 << 6) +#define TSEC_FLAG_DENY_EXECUTE_PRIVAPP_DATA_FILE (1 << 7) +#define TSEC_FLAG_DENY_PROCESS_PTRACE (1 << 8) + +#define TSEC_ALL_DENY_EXECUTE_FLAGS (\ + TSEC_FLAG_DENY_EXECUTE_APPDOMAIN_TMPFS | \ + TSEC_FLAG_DENY_EXECUTE_APP_DATA_FILE | \ + TSEC_FLAG_DENY_EXECUTE_NO_TRANS_APP_DATA_FILE | \ + TSEC_FLAG_DENY_EXECUTE_ASHMEM_DEVICE | \ + TSEC_FLAG_DENY_EXECUTE_ASHMEM_LIBCUTILS_DEVICE | \ + TSEC_FLAG_DENY_EXECUTE_PRIVAPP_DATA_FILE | \ +0) + +#define TSEC_ALL_FLAGS (\ + TSEC_FLAG_DENY_EXECMEM | \ + TSEC_FLAG_DENY_EXECMOD | \ + TSEC_ALL_DENY_EXECUTE_FLAGS | \ + TSEC_FLAG_DENY_PROCESS_PTRACE | \ +0) + enum label_initialized { LABEL_INVALID, /* invalid or not initialized */ LABEL_INITIALIZED, /* initialized */ diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 3349fe8d3cfc..96bdd2a96a1a 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -89,6 +89,17 @@ extern int selinux_enabled_boot; struct selinux_policy; +struct context_types { + u32 app_data_file; + u32 appdomain_tmpfs; + u32 ashmem_device; + u32 ashmem_libcutils_device; + u32 crash_dump; + u32 privapp_data_file; + u32 webview_zygote; + u32 zygote; +}; + struct selinux_state { #ifdef CONFIG_SECURITY_SELINUX_DEVELOP bool enforcing; @@ -98,6 +109,8 @@ struct selinux_state { bool android_netlink_route; bool android_netlink_getneigh; + struct context_types types; + struct page *status_page; struct mutex status_lock; @@ -279,6 +292,8 @@ int security_change_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len); +int security_sid_to_context_type(u32 sid, u32 *out); + int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); int security_sid_to_context_inval(u32 sid, char **scontext, u32 *scontext_len); diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index f01c07499659..66181697c2cc 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -43,6 +43,8 @@ #include "objsec.h" #include "conditional.h" #include "ima.h" +#include "ss/services.h" +#include "ss/symtab.h" enum sel_inos { SEL_ROOT_INO = 2, @@ -579,6 +581,36 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi, return ret; } +static int resolve_context_type(struct selinux_load_state *lstate, const char *name, u32 *out_type) +{ + struct type_datum *typdatum = symtab_search(&lstate->policy->policydb.p_types, name); + if (!typdatum || typdatum->attribute) { + pr_err("SELinux: missing type_datum for %s\n", name); + return -EINVAL; + } + *out_type = typdatum->value; + return 0; +} + +static int resolve_context_types(struct selinux_load_state *lstate, struct context_types *types) { + int rc; + +#define RESOLVE_TYPE(t) rc = resolve_context_type(lstate, #t, &types->t); if (rc) return rc + + RESOLVE_TYPE(app_data_file); + RESOLVE_TYPE(appdomain_tmpfs); + RESOLVE_TYPE(ashmem_device); + RESOLVE_TYPE(ashmem_libcutils_device); + RESOLVE_TYPE(crash_dump); + RESOLVE_TYPE(privapp_data_file); + RESOLVE_TYPE(webview_zygote); + RESOLVE_TYPE(zygote); + +#undef RESOLVE_TYPE + + return 0; +} + static ssize_t sel_write_load(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -625,6 +657,15 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf, goto out; } + length = resolve_context_types(&load_state, &selinux_state.types); + if (length) { +//don't cancel loading sepolicy beacuse of missing context_types in microdroid sepolicy +#if !IS_ENABLED(CONFIG_MICRODROID) + selinux_policy_cancel(&load_state); + goto out; +#endif + } + selinux_policy_commit(&load_state); length = count; audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD, diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index e5d143145c0a..d79849deb899 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1372,6 +1372,35 @@ static int security_sid_to_context_core(u32 sid, char **scontext, } +// based on security_sid_to_context_core() above +int security_sid_to_context_type(u32 sid, u32 *out) +{ + struct selinux_policy *policy; + struct policydb *policydb; + struct sidtab *sidtab; + struct sidtab_entry *entry; + int rc = 0; + + rcu_read_lock(); + policy = rcu_dereference(selinux_state.policy); + policydb = &policy->policydb; + sidtab = policy->sidtab; + + entry = sidtab_search_entry(sidtab, sid); + + if (!entry) { + pr_err("SELinux: %s: unrecognized SID %d\n", __func__, sid); + rc = -EINVAL; + goto out_unlock; + } + + *out = entry->context.type; + +out_unlock: + rcu_read_unlock(); + return rc; +} + /** * security_sid_to_context - Obtain a context for a given SID. * @sid: security identifier, SID diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index b0072e64b010..3ce462d25218 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -55,7 +55,7 @@ $(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT): $(Q)mkdir -p $(@) $(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd - $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(SUBCMD_OUT) \ + $(Q)$(MAKE) -C $(SUBCMD_SRC) EXTRA_CFLAGS="$(CFLAGS)" OUTPUT=$(SUBCMD_OUT) \ DESTDIR=$(SUBCMD_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \ $(abspath $@) install_headers diff --git a/tools/testing/selftests/filesystems/fuse/fuse_test.c b/tools/testing/selftests/filesystems/fuse/fuse_test.c index 06305ea9f6b1..359ae9e889b1 100644 --- a/tools/testing/selftests/filesystems/fuse/fuse_test.c +++ b/tools/testing/selftests/filesystems/fuse/fuse_test.c @@ -255,7 +255,7 @@ static int bpf_test_partial(const char *mount_dir) TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC), src_fd != -1); TESTEQUAL(create_file(src_fd, s(test_name), 1, 2), 0); - TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace", + TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_partial", &bpf_fd, NULL, NULL), 0); TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0); @@ -363,7 +363,7 @@ static int bpf_test_readdir(const char *mount_dir) src_fd != -1); TESTEQUAL(create_file(src_fd, s(names[0]), 1, 2), 0); TESTEQUAL(create_file(src_fd, s(names[1]), 1, 2), 0); - TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace", + TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_partial", &bpf_fd, NULL, NULL), 0); TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0); @@ -1490,6 +1490,8 @@ static int bpf_test_statfs(const char *mount_dir) static int bpf_test_lseek(const char *mount_dir) { const char *file = "real"; + const char *sparse_file = "sparse"; + const off_t sparse_length = 0x100000000u; const char *test_data = "data"; int result = TEST_FAILURE; int src_fd = -1; @@ -1504,6 +1506,12 @@ static int bpf_test_lseek(const char *mount_dir) TESTEQUAL(write(fd, test_data, strlen(test_data)), strlen(test_data)); TESTSYSCALL(close(fd)); fd = -1; + TEST(fd = openat(src_fd, sparse_file, O_CREAT | O_RDWR | O_CLOEXEC, + 0777), + fd != -1); + TESTSYSCALL(ftruncate(fd, sparse_length)); + TESTSYSCALL(close(fd)); + fd = -1; TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace", &bpf_fd, NULL, NULL), 0); TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0); @@ -1518,6 +1526,18 @@ static int bpf_test_lseek(const char *mount_dir) TESTEQUAL(bpf_test_trace("lseek"), 0); TESTEQUAL(lseek(fd, 1, SEEK_DATA), 1); TESTEQUAL(bpf_test_trace("lseek"), 0); + TESTSYSCALL(close(fd)); + fd = -1; + + TEST(fd = s_open(s_path(s(mount_dir), s(sparse_file)), + O_RDONLY | O_CLOEXEC), + fd != -1); + TESTEQUAL(lseek(fd, -256, SEEK_END), sparse_length - 256); + TESTEQUAL(lseek(fd, 0, SEEK_CUR), sparse_length - 256); + + TESTSYSCALL(close(fd)); + fd = -1; + result = TEST_SUCCESS; out: close(fd); diff --git a/tools/testing/selftests/filesystems/fuse/test_bpf.c b/tools/testing/selftests/filesystems/fuse/test_bpf.c index a014b915c059..be5f59ad8343 100644 --- a/tools/testing/selftests/filesystems/fuse/test_bpf.c +++ b/tools/testing/selftests/filesystems/fuse/test_bpf.c @@ -28,9 +28,9 @@ int readdir_test(struct fuse_bpf_args *fa) } } -SEC("test_trace") +SEC("test_partial") /* return FUSE_BPF_BACKING to use backing fs, 0 to pass to usermode */ -int trace_test(struct fuse_bpf_args *fa) +int partial_test(struct fuse_bpf_args *fa) { switch (fa->opcode) { case FUSE_LOOKUP | FUSE_PREFILTER: { @@ -329,6 +329,195 @@ int trace_test(struct fuse_bpf_args *fa) } } +SEC("test_trace") +/* return FUSE_BPF_BACKING to use backing fs, 0 to pass to usermode */ +int trace_test(struct fuse_bpf_args *fa) +{ + switch (fa->opcode) { + case FUSE_LOOKUP | FUSE_PREFILTER: { + /* real and partial use backing file */ + const char *name = fa->in_args[0].value; + + bpf_printk("lookup %s", name); + return FUSE_BPF_BACKING; + } + + case FUSE_ACCESS | FUSE_PREFILTER: { + bpf_printk("Access: %d", fa->nodeid); + return FUSE_BPF_BACKING; + } + + case FUSE_CREATE | FUSE_PREFILTER: + bpf_printk("Create: %d", fa->nodeid); + return FUSE_BPF_BACKING; + + case FUSE_MKNOD | FUSE_PREFILTER: { + const struct fuse_mknod_in *fmi = fa->in_args[0].value; + const char *name = fa->in_args[1].value; + + bpf_printk("mknod %s %x %x", name, fmi->rdev | fmi->mode, fmi->umask); + return FUSE_BPF_BACKING; + } + + case FUSE_MKDIR | FUSE_PREFILTER: { + const struct fuse_mkdir_in *fmi = fa->in_args[0].value; + const char *name = fa->in_args[1].value; + + bpf_printk("mkdir %s %x %x", name, fmi->mode, fmi->umask); + return FUSE_BPF_BACKING; + } + + case FUSE_RMDIR | FUSE_PREFILTER: { + const char *name = fa->in_args[0].value; + + bpf_printk("rmdir %s", name); + return FUSE_BPF_BACKING; + } + + case FUSE_RENAME | FUSE_PREFILTER: { + const char *oldname = fa->in_args[1].value; + const char *newname = fa->in_args[2].value; + + bpf_printk("rename from %s", oldname); + bpf_printk("rename to %s", newname); + return FUSE_BPF_BACKING; + } + + case FUSE_RENAME2 | FUSE_PREFILTER: { + const struct fuse_rename2_in *fri = fa->in_args[0].value; + uint32_t flags = fri->flags; + const char *oldname = fa->in_args[1].value; + const char *newname = fa->in_args[2].value; + + bpf_printk("rename(%x) from %s", flags, oldname); + bpf_printk("rename to %s", newname); + return FUSE_BPF_BACKING; + } + + case FUSE_UNLINK | FUSE_PREFILTER: { + const char *name = fa->in_args[0].value; + + bpf_printk("unlink %s", name); + return FUSE_BPF_BACKING; + } + + case FUSE_LINK | FUSE_PREFILTER: { + const struct fuse_link_in *fli = fa->in_args[0].value; + const char *link_name = fa->in_args[1].value; + + bpf_printk("link %d %s", fli->oldnodeid, link_name); + return FUSE_BPF_BACKING; + } + + case FUSE_SYMLINK | FUSE_PREFILTER: { + const char *link_name = fa->in_args[0].value; + const char *link_dest = fa->in_args[1].value; + + bpf_printk("symlink from %s", link_name); + bpf_printk("symlink to %s", link_dest); + return FUSE_BPF_BACKING; + } + + case FUSE_READLINK | FUSE_PREFILTER: { + const char *link_name = fa->in_args[0].value; + + bpf_printk("readlink from", link_name); + return FUSE_BPF_BACKING; + } + + case FUSE_OPEN | FUSE_PREFILTER: { + bpf_printk("open"); + return FUSE_BPF_BACKING; + } + + case FUSE_OPEN | FUSE_POSTFILTER: + bpf_printk("open postfilter"); + return FUSE_BPF_USER_FILTER; + + case FUSE_READ | FUSE_PREFILTER: { + const struct fuse_read_in *fri = fa->in_args[0].value; + + bpf_printk("read %llu", fri->offset); + return FUSE_BPF_BACKING; + } + + case FUSE_GETATTR | FUSE_PREFILTER: { + bpf_printk("getattr"); + return FUSE_BPF_BACKING; + } + + case FUSE_SETATTR | FUSE_PREFILTER: { + bpf_printk("setattr"); + return FUSE_BPF_BACKING; + } + + case FUSE_OPENDIR | FUSE_PREFILTER: { + bpf_printk("opendir"); + return FUSE_BPF_BACKING; + } + + case FUSE_READDIR | FUSE_PREFILTER: { + bpf_printk("readdir"); + return FUSE_BPF_BACKING; + } + + case FUSE_FLUSH | FUSE_PREFILTER: { + bpf_printk("Flush"); + return FUSE_BPF_BACKING; + } + + case FUSE_GETXATTR | FUSE_PREFILTER: { + const char *name = fa->in_args[1].value; + + bpf_printk("getxattr %s", name); + return FUSE_BPF_BACKING; + } + + case FUSE_LISTXATTR | FUSE_PREFILTER: { + const char *name = fa->in_args[1].value; + + bpf_printk("listxattr %s", name); + return FUSE_BPF_BACKING; + } + + case FUSE_SETXATTR | FUSE_PREFILTER: { + const char *name = fa->in_args[1].value; + unsigned int size = fa->in_args[2].size; + + bpf_printk("setxattr %s %u", name, size); + return FUSE_BPF_BACKING; + } + + case FUSE_REMOVEXATTR | FUSE_PREFILTER: { + const char *name = fa->in_args[0].value; + + bpf_printk("removexattr %s", name); + return FUSE_BPF_BACKING; + } + + case FUSE_CANONICAL_PATH | FUSE_PREFILTER: { + bpf_printk("canonical_path"); + return FUSE_BPF_BACKING; + } + + case FUSE_STATFS | FUSE_PREFILTER: { + bpf_printk("statfs"); + return FUSE_BPF_BACKING; + } + + case FUSE_LSEEK | FUSE_PREFILTER: { + const struct fuse_lseek_in *fli = fa->in_args[0].value; + + bpf_printk("lseek type:%d, offset:%lld", fli->whence, fli->offset); + return FUSE_BPF_BACKING; + } + + default: + bpf_printk("Unknown opcode %d", fa->opcode); + return FUSE_BPF_BACKING; + } +} + SEC("test_hidden") int trace_hidden(struct fuse_bpf_args *fa) { diff --git a/update_virt_prebuilts.sh b/update_virt_prebuilts.sh new file mode 100755 index 000000000000..0402e0c46918 --- /dev/null +++ b/update_virt_prebuilts.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +#set -e + +KERNEL_VERSION="6.6" + +kernel_image_src= +kernel_image_dst= +if [[ "${ARCH}" == "arm64" ]]; then + # https://github.com/GrapheneOS/device_generic_goldfish/blob/14/board/kernel/arm64.mk#L52 + kernel_image_src="Image.gz" + kernel_image_dst="kernel-${KERNEL_VERSION}-gz" +elif [[ "${ARCH}" == "x86_64" ]]; then + kernel_image_src="bzImage" + kernel_image_dst="kernel-${KERNEL_VERSION}" +else + echo "ARCH is undefined or unknown" + exit 1 +fi + +test -d "$ANDROID_BUILD_TOP" || (echo "ANDROID_BUILD_TOP is undefined or missing" && exit 1) + +COMMON_PREBUILT_PATH="$ANDROID_BUILD_TOP/prebuilts/qemu-kernel/${ARCH}/${KERNEL_VERSION}" +GKI_PREBUILT_PATH="$COMMON_PREBUILT_PATH/gki_modules" +VIRT_PREBUILT_PATH="$COMMON_PREBUILT_PATH/goldfish_modules" + +for file in $(find ${COMMON_PREBUILT_PATH} -maxdepth 1 -type f -printf "%f\n"); do + cp "$@" common_dist/$file ${COMMON_PREBUILT_PATH}/$file > /dev/null 2>&1 +done +for file in $(find ${GKI_PREBUILT_PATH} -maxdepth 1 -type f -printf "%f\n"); do + cp "$@" common_dist/$file ${GKI_PREBUILT_PATH}/$file > /dev/null 2>&1 +done +cp "$@" common_dist/${kernel_image_src} ${COMMON_PREBUILT_PATH}/${kernel_image_dst} +#test -d lib && rm -r lib +#bsdtar xvf common_dist/system_dlkm_staging_archive.tar.gz >/dev/null 2>&1 +#rm -r ${COMMON_PREBUILT_PATH}/system_dlkm_staging/modules +#cp -a "$@" lib/modules ${COMMON_PREBUILT_PATH}/system_dlkm_staging/modules +for file in $(find ${VIRT_PREBUILT_PATH} -maxdepth 1 -type f -printf "%f\n"); do + cp "$@" virt_dist/$file ${VIRT_PREBUILT_PATH}/$file > /dev/null 2>&1 +done