From 404b2ef3bca4fa5661ebd22bc5377d6d3f541cc6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 24 Mar 2021 14:53:37 -0700 Subject: [PATCH 1/2] inet: use bigger hash table for IP ID generation stable inclusion from stable-v4.19 commit 7f7e23df8509e072593200400a4b094cc44376d2 category: bugfix issue: CVE: CVE-2021-45486 Signed-off-by: gaochao --------------------------------------- commit aa6dd211e4b1dde9d5dc25d699d35f789ae7eeba upstream. In commit 73f156a6e8c1 ("inetpeer: get rid of ip_id_count") I used a very small hash table that could be abused by patient attackers to reveal sensitive information. Switch to a dynamic sizing, depending on RAM size. Typical big hosts will now use 128x more storage (2 MB) to get a similar increase in security and reduction of hash collisions. As a bonus, use of alloc_large_system_hash() spreads allocated memory among all NUMA nodes. Fixes: 73f156a6e8c1 ("inetpeer: get rid of ip_id_count") Reported-by: Amit Klein Signed-off-by: Eric Dumazet Cc: Willy Tarreau Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman Signed-off-by: gaochao --- net/ipv4/route.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 48081e6d50b4..753d4121a019 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include #include @@ -470,8 +471,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); } -#define IP_IDENTS_SZ 2048u - +/* Hash tables of size 2048..262144 depending on RAM size. + * Each bucket uses 8 bytes. + */ +static u32 ip_idents_mask __read_mostly; static atomic_t *ip_idents __read_mostly; static u32 *ip_tstamps __read_mostly; @@ -481,12 +484,16 @@ static u32 *ip_tstamps __read_mostly; */ u32 ip_idents_reserve(u32 hash, int segs) { - u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; - atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; - u32 old = READ_ONCE(*p_tstamp); - u32 now = (u32)jiffies; + u32 bucket, old, now = (u32)jiffies; + atomic_t *p_id; + u32 *p_tstamp; u32 delta = 0; + bucket = hash & ip_idents_mask; + p_tstamp = ip_tstamps + bucket; + p_id = ip_idents + bucket; + old = READ_ONCE(*p_tstamp); + if (old != now && cmpxchg(p_tstamp, old, now) == old) delta = prandom_u32_max(now - old); @@ -3196,18 +3203,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; int __init ip_rt_init(void) { + void *idents_hash; int cpu; - ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents), - GFP_KERNEL); - if (!ip_idents) - panic("IP: failed to allocate ip_idents\n"); + /* For modern hosts, this will use 2 MB of memory */ + idents_hash = alloc_large_system_hash("IP idents", + sizeof(*ip_idents) + sizeof(*ip_tstamps), + 0, + 16, /* one bucket per 64 KB */ + HASH_ZERO, + NULL, + &ip_idents_mask, + 2048, + 256*1024); + + ip_idents = idents_hash; - prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); + prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); - ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL); - if (!ip_tstamps) - panic("IP: failed to allocate ip_tstamps\n"); + ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); for_each_possible_cpu(cpu) { struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); -- Gitee From 9c18d3800535c025a9cb8f279a0801d1a6463806 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 11 Dec 2018 14:50:02 +1000 Subject: [PATCH 2/2] drm/nouveau/mmu: add more general vmm free/node handling functions mainline inclusion from mainline-v6.2-rc4 commit 729eba3355674f2d9524629b73683ba1d1cd3f10 category: bugfix issue: CVE: CVE-2023-0030 Signed-off-by: gaochao --------------------------------------- Aside from being a nice cleanup, these will to allow the upcoming direct page mapping interfaces to play nicely with normal mappings. Signed-off-by: Ben Skeggs Signed-off-by: gaochao --- .../gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c | 21 +-- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | 145 ++++++++++++++---- drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h | 3 +- 3 files changed, 118 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c index 37b201b95f15..6889076097ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -134,23 +134,10 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto fail; } - if (vma->addr != addr) { - const u64 tail = vma->size + vma->addr - addr; - if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail))) - goto fail; - vma->part = true; - nvkm_vmm_node_insert(vmm, vma); - } - - if (vma->size != size) { - const u64 tail = vma->size - size; - struct nvkm_vma *tmp; - if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) { - nvkm_vmm_unmap_region(vmm, vma); - goto fail; - } - tmp->part = true; - nvkm_vmm_node_insert(vmm, tmp); + vma = nvkm_vmm_node_split(vmm, vma, addr, size); + if (!vma) { + ret = -ENOMEM; + goto fail; } } vma->busy = true; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 5f8b8b399b97..4b839b5c5589 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -767,6 +767,20 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) return new; } +static inline void +nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) +{ + rb_erase(&vma->tree, &vmm->free); +} + +static inline void +nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) +{ + nvkm_vmm_free_remove(vmm, vma); + list_del(&vma->head); + kfree(vma); +} + static void nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) { @@ -795,7 +809,21 @@ nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) rb_insert_color(&vma->tree, &vmm->free); } -void +static inline void +nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) +{ + rb_erase(&vma->tree, &vmm->root); +} + +static inline void +nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) +{ + nvkm_vmm_node_remove(vmm, vma); + list_del(&vma->head); + kfree(vma); +} + +static void nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) { struct rb_node **ptr = &vmm->root.rb_node; @@ -834,6 +862,78 @@ nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr) return NULL; } +#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \ + list_entry((root)->head.dir, struct nvkm_vma, head)) + +static struct nvkm_vma * +nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev, + struct nvkm_vma *vma, struct nvkm_vma *next, u64 size) +{ + if (next) { + if (vma->size == size) { + vma->size += next->size; + nvkm_vmm_node_delete(vmm, next); + if (prev) { + prev->size += vma->size; + nvkm_vmm_node_delete(vmm, vma); + return prev; + } + return vma; + } + BUG_ON(prev); + + nvkm_vmm_node_remove(vmm, next); + vma->size -= size; + next->addr -= size; + next->size += size; + nvkm_vmm_node_insert(vmm, next); + return next; + } + + if (prev) { + if (vma->size != size) { + nvkm_vmm_node_remove(vmm, vma); + prev->size += size; + vma->addr += size; + vma->size -= size; + nvkm_vmm_node_insert(vmm, vma); + } else { + prev->size += vma->size; + nvkm_vmm_node_delete(vmm, vma); + } + return prev; + } + + return vma; +} + +struct nvkm_vma * +nvkm_vmm_node_split(struct nvkm_vmm *vmm, + struct nvkm_vma *vma, u64 addr, u64 size) +{ + struct nvkm_vma *prev = NULL; + + if (vma->addr != addr) { + prev = vma; + if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) + return NULL; + vma->part = true; + nvkm_vmm_node_insert(vmm, vma); + } + + if (vma->size != size) { + struct nvkm_vma *tmp; + if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) { + nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size); + return NULL; + } + tmp->part = true; + nvkm_vmm_node_insert(vmm, tmp); + } + + return vma; +} + static void nvkm_vmm_dtor(struct nvkm_vmm *vmm) { @@ -954,37 +1054,20 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm); } -#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \ - list_entry((root)->head.dir, struct nvkm_vma, head) - void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) { - struct nvkm_vma *next; + struct nvkm_vma *next = node(vma, next); + struct nvkm_vma *prev = NULL; nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); nvkm_memory_unref(&vma->memory); - if (vma->part) { - struct nvkm_vma *prev = node(vma, prev); - if (!prev->memory) { - prev->size += vma->size; - rb_erase(&vma->tree, &vmm->root); - list_del(&vma->head); - kfree(vma); - vma = prev; - } - } - - next = node(vma, next); - if (next && next->part) { - if (!next->memory) { - vma->size += next->size; - rb_erase(&next->tree, &vmm->root); - list_del(&next->head); - kfree(next); - } - } + if (!vma->part || ((prev = node(vma, prev)), prev->memory)) + prev = NULL; + if (!next->part || next->memory) + next = NULL; + nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size); } void @@ -1163,18 +1246,14 @@ nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) struct nvkm_vma *prev, *next; if ((prev = node(vma, prev)) && !prev->used) { - rb_erase(&prev->tree, &vmm->free); - list_del(&prev->head); vma->addr = prev->addr; vma->size += prev->size; - kfree(prev); + nvkm_vmm_free_delete(vmm, prev); } if ((next = node(vma, next)) && !next->used) { - rb_erase(&next->tree, &vmm->free); - list_del(&next->head); vma->size += next->size; - kfree(next); + nvkm_vmm_free_delete(vmm, next); } nvkm_vmm_free_insert(vmm, vma); @@ -1250,7 +1329,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) } /* Remove VMA from the list of allocated nodes. */ - rb_erase(&vma->tree, &vmm->root); + nvkm_vmm_node_remove(vmm, vma); /* Merge VMA back into the free list. */ vma->page = NVKM_VMA_PAGE_NONE; @@ -1357,7 +1436,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, tail = ALIGN_DOWN(tail, vmm->func->page_block); if (addr <= tail && tail - addr >= size) { - rb_erase(&this->tree, &vmm->free); + nvkm_vmm_free_remove(vmm, this); vma = this; break; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h index 1a3b0a3724ca..6d3f1e33793d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h @@ -157,6 +157,8 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32 pd_header, u64 addr, u64 size, struct lock_class_key *, const char *name, struct nvkm_vmm *); struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr); +struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *, + u64 addr, u64 size); int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref, bool sparse, u8 page, u8 align, u64 size, struct nvkm_vma **pvma); @@ -165,7 +167,6 @@ void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *); void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma); struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail); -void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *); int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, u64, u64, void *, u32, struct lock_class_key *, -- Gitee