ref: 65ca71742cd75844e8720d1e4324bd1d4bff972d
dir: /sys/src/cmd/diff/test/diff-t8.expected/
--- diff-t8.l
+++ diff-t8.r
@@ -1,4 +1,5 @@
-/* $NetBSD: kern_malloc.c,v 1.11 1995/05/01 22:39:11 cgd Exp $ */
+/* $OpenBSD: t8.2,v 1.1 2003/07/17 21:04:04 otto Exp $ */
+/* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
/*
* Copyright (c) 1987, 1991, 1993
@@ -33,24 +34,59 @@
#include <sys/param.h>
#include <sys/proc.h>
-#include <sys/map.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
+#include <sys/systm.h>
+#include <sys/sysctl.h>
-#include <vm/vm.h>
-#include <vm/vm_kern.h>
+#include <uvm/uvm_extern.h>
+
+static struct vm_map_intrsafe kmem_map_store;
+struct vm_map *kmem_map = NULL;
+
+#ifdef NKMEMCLUSTERS
+#error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
+#endif
+
+/*
+ * Default number of pages in kmem_map. We attempt to calculate this
+ * at run-time, but allow it to be either patched or set in the kernel
+ * config file.
+ */
+#ifndef NKMEMPAGES
+#define NKMEMPAGES 0
+#endif
+int nkmempages = NKMEMPAGES;
+
+/*
+ * Defaults for lower- and upper-bounds for the kmem_map page count.
+ * Can be overridden by kernel config options.
+ */
+#ifndef NKMEMPAGES_MIN
+#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
+#endif
+
+#ifndef NKMEMPAGES_MAX
+#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
+#endif
struct kmembuckets bucket[MINBUCKET + 16];
struct kmemstats kmemstats[M_LAST];
struct kmemusage *kmemusage;
char *kmembase, *kmemlimit;
+char buckstring[16 * sizeof("123456,")];
+int buckstring_init = 0;
+#if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
char *memname[] = INITKMEMNAMES;
+char *memall = NULL;
+extern struct lock sysctl_kmemlock;
+#endif
#ifdef DIAGNOSTIC
/*
* This structure provides a set of masks to catch unaligned frees.
*/
-long addrmask[] = { 0,
+const long addrmask[] = { 0,
0x00000001, 0x00000003, 0x00000007, 0x0000000f,
0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
@@ -61,14 +97,14 @@
* The WEIRD_ADDR is used as known text to copy into free objects so
* that modifications after frees can be detected.
*/
-#define WEIRD_ADDR 0xdeadbeef
+#define WEIRD_ADDR ((unsigned) 0xdeadbeef)
#define MAX_COPY 32
/*
* Normally the freelist structure is used only to hold the list pointer
* for free objects. However, when running with diagnostics, the first
* 8 bytes of the structure is unused except for diagnostic information,
- * and the free list pointer is at offst 8 in the structure. Since the
+ * and the free list pointer is at offset 8 in the structure. Since the
* first 8 bytes is the portion of the structure most often modified, this
* helps to detect memory reuse problems and avoid free list corruption.
*/
@@ -106,12 +142,18 @@
#ifdef KMEMSTATS
register struct kmemstats *ksp = &kmemstats[type];
- if (((unsigned long)type) > M_LAST)
+ if (((unsigned long)type) >= M_LAST)
panic("malloc - bogus type");
#endif
+
+#ifdef MALLOC_DEBUG
+ if (debug_malloc(size, type, flags, (void **)&va))
+ return ((void *) va);
+#endif
+
indx = BUCKETINDX(size);
kbp = &bucket[indx];
- s = splimp();
+ s = splvm();
#ifdef KMEMSTATS
while (ksp->ks_memuse >= ksp->ks_limit) {
if (flags & M_NOWAIT) {
@@ -130,13 +172,24 @@
if (kbp->kb_next == NULL) {
kbp->kb_last = NULL;
if (size > MAXALLOCSAVE)
- allocsize = roundup(size, CLBYTES);
+ allocsize = round_page(size);
else
allocsize = 1 << indx;
- npg = clrnd(btoc(allocsize));
- va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
- !(flags & M_NOWAIT));
+ npg = btoc(allocsize);
+ va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
+ (vsize_t)ctob(npg),
+ (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
if (va == NULL) {
+ /*
+ * Kmem_malloc() can return NULL, even if it can
+ * wait, if there is no map space available, because
+ * it can't fix that problem. Neither can we,
+ * right now. (We should release pages which
+ * are completely free and which are in buckets
+ * with too many free elements.)
+ */
+ if ((flags & M_NOWAIT) == 0)
+ panic("malloc: out of space in kmem_map");
splx(s);
return ((void *) NULL);
}
@@ -164,7 +217,7 @@
* bucket, don't assume the list is still empty.
*/
savedlist = kbp->kb_next;
- kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
+ kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
for (;;) {
freep = (struct freelist *)cp;
#ifdef DIAGNOSTIC
@@ -192,13 +245,22 @@
freep = (struct freelist *)va;
savedtype = (unsigned)freep->type < M_LAST ?
memname[freep->type] : "???";
- if (kbp->kb_next &&
- !kernacc(kbp->kb_next, sizeof(struct freelist), 0)) {
- printf("%s %d of object %p size %d %s %s (invalid addr %p)\n",
+ if (kbp->kb_next) {
+ int rv;
+ vaddr_t addr = (vaddr_t)kbp->kb_next;
+
+ vm_map_lock(kmem_map);
+ rv = uvm_map_checkprot(kmem_map, addr,
+ addr + sizeof(struct freelist), VM_PROT_WRITE);
+ vm_map_unlock(kmem_map);
+
+ if (!rv) {
+ printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n",
"Data modified on freelist: word",
(int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size,
"previous type", savedtype, kbp->kb_next);
kbp->kb_next = NULL;
+ }
}
/* Fill the fields that we've used with WEIRD_ADDR */
@@ -218,7 +280,7 @@
for (lp = (int32_t *)va; lp < end; lp++) {
if (*lp == WEIRD_ADDR)
continue;
- printf("%s %d of object %p size %d %s %s (%p != %p)\n",
+ printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n",
"Data modified on freelist: word", lp - (int32_t *)va,
va, size, "previous type", savedtype, *lp, WEIRD_ADDR);
break;
@@ -270,25 +332,36 @@
register struct kmemstats *ksp = &kmemstats[type];
#endif
+#ifdef MALLOC_DEBUG
+ if (debug_free(addr, type))
+ return;
+#endif
+
+#ifdef DIAGNOSTIC
+ if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
+ panic("free: non-malloced addr %p type %s", addr,
+ memname[type]);
+#endif
+
kup = btokup(addr);
size = 1 << kup->ku_indx;
kbp = &bucket[kup->ku_indx];
- s = splimp();
+ s = splvm();
#ifdef DIAGNOSTIC
/*
* Check for returns of data that do not point to the
* beginning of the allocation.
*/
- if (size > NBPG * CLSIZE)
- alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
+ if (size > PAGE_SIZE)
+ alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
else
alloc = addrmask[kup->ku_indx];
if (((u_long)addr & alloc) != 0)
- panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
+ panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
addr, size, memname[type], alloc);
#endif /* DIAGNOSTIC */
if (size > MAXALLOCSAVE) {
- kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
+ uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
#ifdef KMEMSTATS
size = kup->ku_pagecnt << PGSHIFT;
ksp->ks_memuse -= size;
@@ -310,7 +383,8 @@
* it looks free before laboriously searching the freelist.
*/
if (freep->spare0 == WEIRD_ADDR) {
- for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
+ for (cp = kbp->kb_next; cp;
+ cp = ((struct freelist *)cp)->next) {
if (addr != cp)
continue;
printf("multiply freed item %p\n", addr);
@@ -331,11 +405,12 @@
#endif /* DIAGNOSTIC */
#ifdef KMEMSTATS
kup->ku_freecnt++;
- if (kup->ku_freecnt >= kbp->kb_elmpercl)
+ if (kup->ku_freecnt >= kbp->kb_elmpercl) {
if (kup->ku_freecnt > kbp->kb_elmpercl)
panic("free: multiple frees");
else if (kbp->kb_totalfree > kbp->kb_highwat)
kbp->kb_couldfree++;
+ }
kbp->kb_totalfree++;
ksp->ks_memuse -= size;
if (ksp->ks_memuse + size >= ksp->ks_limit &&
@@ -353,40 +428,189 @@
}
/*
+ * Compute the number of pages that kmem_map will map, that is,
+ * the size of the kernel malloc arena.
+ */
+void
+kmeminit_nkmempages()
+{
+ int npages;
+
+ if (nkmempages != 0) {
+ /*
+ * It's already been set (by us being here before, or
+ * by patching or kernel config options), bail out now.
+ */
+ return;
+ }
+
+ /*
+ * We use the following (simple) formula:
+ *
+ * - Starting point is physical memory / 4.
+ *
+ * - Clamp it down to NKMEMPAGES_MAX.
+ *
+ * - Round it up to NKMEMPAGES_MIN.
+ */
+ npages = physmem / 4;
+
+ if (npages > NKMEMPAGES_MAX)
+ npages = NKMEMPAGES_MAX;
+
+ if (npages < NKMEMPAGES_MIN)
+ npages = NKMEMPAGES_MIN;
+
+ nkmempages = npages;
+}
+
+/*
* Initialize the kernel memory allocator
*/
+void
kmeminit()
{
- register long indx;
- int npg;
-
-#if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
- ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
-#endif
-#if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
- ERROR!_kmeminit:_MAXALLOCSAVE_too_big
-#endif
-#if (MAXALLOCSAVE < CLBYTES)
- ERROR!_kmeminit:_MAXALLOCSAVE_too_small
+ vaddr_t base, limit;
+#ifdef KMEMSTATS
+ long indx;
#endif
+#ifdef DIAGNOSTIC
if (sizeof(struct freelist) > (1 << MINBUCKET))
- panic("minbucket too small/struct freelist too big");
+ panic("kmeminit: minbucket too small/struct freelist too big");
+#endif
- npg = VM_KMEM_SIZE/ NBPG;
- kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
- (vm_size_t)(npg * sizeof(struct kmemusage)));
- kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
- (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
+ /*
+ * Compute the number of kmem_map pages, if we have not
+ * done so already.
+ */
+ kmeminit_nkmempages();
+ base = vm_map_min(kernel_map);
+ kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
+ (vsize_t)(nkmempages * PAGE_SIZE), VM_MAP_INTRSAFE, FALSE,
+ &kmem_map_store.vmi_map);
+ kmembase = (char *)base;
+ kmemlimit = (char *)limit;
+ kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
+ (vsize_t)(nkmempages * sizeof(struct kmemusage)));
#ifdef KMEMSTATS
for (indx = 0; indx < MINBUCKET + 16; indx++) {
- if (1 << indx >= CLBYTES)
+ if (1 << indx >= PAGE_SIZE)
bucket[indx].kb_elmpercl = 1;
else
- bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
+ bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
}
for (indx = 0; indx < M_LAST; indx++)
- kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
+ kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
+#endif
+#ifdef MALLOC_DEBUG
+ debug_malloc_init();
+#endif
+}
+
+/*
+ * Return kernel malloc statistics information.
+ */
+int
+sysctl_malloc(name, namelen, oldp, oldlenp, newp, newlen, p)
+ int *name;
+ u_int namelen;
+ void *oldp;
+ size_t *oldlenp;
+ void *newp;
+ size_t newlen;
+ struct proc *p;
+{
+ struct kmembuckets kb;
+ int i, siz;
+
+ if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
+ name[0] != KERN_MALLOC_KMEMNAMES)
+ return (ENOTDIR); /* overloaded */
+
+ switch (name[0]) {
+ case KERN_MALLOC_BUCKETS:
+ /* Initialize the first time */
+ if (buckstring_init == 0) {
+ buckstring_init = 1;
+ bzero(buckstring, sizeof(buckstring));
+ for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
+ snprintf(buckstring + siz,
+ sizeof buckstring - siz,
+ "%d,", (u_int)(1<<i));
+ siz += strlen(buckstring + siz);
+ }
+ /* Remove trailing comma */
+ if (siz)
+ buckstring[siz - 1] = '\0';
+ }
+ return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
+
+ case KERN_MALLOC_BUCKET:
+ bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb));
+ kb.kb_next = kb.kb_last = 0;
+ return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
+ case KERN_MALLOC_KMEMSTATS:
+#ifdef KMEMSTATS
+ if ((name[1] < 0) || (name[1] >= M_LAST))
+ return (EINVAL);
+ return (sysctl_rdstruct(oldp, oldlenp, newp,
+ &kmemstats[name[1]], sizeof(struct kmemstats)));
+#else
+ return (EOPNOTSUPP);
#endif
+ case KERN_MALLOC_KMEMNAMES:
+#if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
+ if (memall == NULL) {
+ int totlen;
+
+ i = lockmgr(&sysctl_kmemlock, LK_EXCLUSIVE, NULL, p);
+ if (i)
+ return (i);
+
+ /* Figure out how large a buffer we need */
+ for (totlen = 0, i = 0; i < M_LAST; i++) {
+ if (memname[i])
+ totlen += strlen(memname[i]);
+ totlen++;
+ }
+ memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK);
+ bzero(memall, totlen + M_LAST);
+ for (siz = 0, i = 0; i < M_LAST; i++) {
+ snprintf(memall + siz,
+ totlen + M_LAST - siz,
+ "%s,", memname[i] ? memname[i] : "");
+ siz += strlen(memall + siz);
+ }
+ /* Remove trailing comma */
+ if (siz)
+ memall[siz - 1] = '\0';
+
+ /* Now, convert all spaces to underscores */
+ for (i = 0; i < totlen; i++)
+ if (memall[i] == ' ')
+ memall[i] = '_';
+ lockmgr(&sysctl_kmemlock, LK_RELEASE, NULL, p);
+ }
+ return (sysctl_rdstring(oldp, oldlenp, newp, memall));
+#else
+ return (EOPNOTSUPP);
+#endif
+ default:
+ return (EOPNOTSUPP);
+ }
+ /* NOTREACHED */
+}
+
+/*
+ * Round up a size to how much malloc would actually allocate.
+ */
+size_t
+malloc_roundup(size_t sz)
+{
+ if (sz > MAXALLOCSAVE)
+ return round_page(sz);
+
+ return (1 << BUCKETINDX(sz));
}