1
0
Fork 0

Adding upstream version 1.11.0+dfsg.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-20 07:22:08 +02:00
parent bfd67a6a34
commit 7d6afcab99
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
21 changed files with 1681 additions and 0 deletions

15
AUTHORS Normal file
View file

@ -0,0 +1,15 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
Chris Waldon <christopher(dot)waldon(dot)dev@gmail.com>
Gleb Sakhnov <gleb.sakhnov@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Scot C Bontrager <scot@indievisible.org>
Steffen Butzer <steffen(dot)butzer@outlook.com>

15
CONTRIBUTORS Normal file
View file

@ -0,0 +1,15 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Anup Kodlekere <anup.kodlekere@ibm.com>
Chris Waldon <christopher(dot)waldon(dot)dev@gmail.com>
Gleb Sakhnov <gleb.sakhnov@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Scot C Bontrager <scot@indievisible.org>
Steffen Butzer <steffen(dot)butzer@outlook.com>
ZHU Zijia <piggynl@outlook.com>

27
LICENSE Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2017 The Memory Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

27
LICENSE-GO Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

25
LICENSE-MMAP-GO Normal file
View file

@ -0,0 +1,25 @@
Copyright (c) 2011, Evan Shaw <edsrzf@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

112
Makefile Normal file
View file

@ -0,0 +1,112 @@
# Copyright 2017 The Memory Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all clean cover cpu editor internalError later mem nuke todo edit build_all_targets
grep=--include=*.go --include=*.l --include=*.y --include=*.yy
ngrep='TODOOK\|parser\.go\|scanner\.go\|.*_string\.go'
all: editor
go vet 2>&1 | grep -v $(ngrep) || true
golint 2>&1 | grep -v $(ngrep) || true
make todo
misspell *.go
maligned || true
unconvert -apply
staticcheck | grep -v 'lexer\.go' || true
grep -n 'FAIL\|PASS' log
date
clean:
go clean
rm -f *~ *.test *.out
cover:
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
cpu: clean
go test -run @ -bench . -cpuprofile cpu.out
go tool pprof -lines *.test cpu.out
edit:
@ 1>/dev/null 2>/dev/null gvim -p Makefile go.mod builder.json *.go &
editor:
gofmt -l -s -w *.go
build_all_targets:
GOOS=darwin GOARCH=amd64 go build
GOOS=darwin GOARCH=amd64 staticcheck
GOOS=darwin GOARCH=arm64 go build
GOOS=darwin GOARCH=arm64 staticcheck
GOOS=freebsd GOARCH=386 go build
GOOS=freebsd GOARCH=386 staticcheck
GOOS=freebsd GOARCH=amd64 go build
GOOS=freebsd GOARCH=amd64 staticcheck
GOOS=freebsd GOARCH=arm go build
GOOS=freebsd GOARCH=arm staticcheck
GOOS=freebsd GOARCH=arm64 go build
GOOS=freebsd GOARCH=arm64 staticcheck
GOOS=illumos GOARCH=amd64 go build
GOOS=illumos GOARCH=amd64 staticcheck
GOOS=linux GOARCH=386 go build
GOOS=linux GOARCH=386 staticcheck
GOOS=linux GOARCH=amd64 go build
GOOS=linux GOARCH=amd64 staticcheck
GOOS=linux GOARCH=arm go build
GOOS=linux GOARCH=arm staticcheck
GOOS=linux GOARCH=arm64 go build
GOOS=linux GOARCH=arm64 staticcheck
GOOS=linux GOARCH=loong64 go build
GOOS=linux GOARCH=loong64 staticcheck
GOOS=linux GOARCH=mips go build
GOOS=linux GOARCH=mips staticcheck
GOOS=linux GOARCH=mips64le go build
GOOS=linux GOARCH=mips64le staticcheck
GOOS=linux GOARCH=mipsle go build
GOOS=linux GOARCH=mipsle staticcheck
GOOS=linux GOARCH=ppc64le go build
GOOS=linux GOARCH=ppc64le staticcheck
GOOS=linux GOARCH=riscv64 go build
GOOS=linux GOARCH=riscv64 staticcheck
GOOS=linux GOARCH=s390x go build
GOOS=linux GOARCH=s390x staticcheck
GOOS=netbsd GOARCH=386 go build
GOOS=netbsd GOARCH=386 staticcheck
GOOS=netbsd GOARCH=amd64 go build
GOOS=netbsd GOARCH=amd64 staticcheck
GOOS=netbsd GOARCH=arm go build
GOOS=netbsd GOARCH=arm staticcheck
GOOS=openbsd GOARCH=386 go build
GOOS=openbsd GOARCH=386 staticcheck
GOOS=openbsd GOARCH=amd64 go build
GOOS=openbsd GOARCH=amd64 staticcheck
GOOS=openbsd GOARCH=arm64 go build
GOOS=openbsd GOARCH=arm64 staticcheck
GOOS=windows GOARCH=386 go build
GOOS=windows GOARCH=386 staticcheck
GOOS=windows GOARCH=amd64 go build
GOOS=windows GOARCH=amd64 staticcheck
GOOS=windows GOARCH=arm64 go build
GOOS=windows GOARCH=arm64 staticcheck
internalError:
egrep -ho '"internal error.*"' *.go | sort | cat -n
later:
@grep -n $(grep) LATER * || true
@grep -n $(grep) MAYBE * || true
mem: clean
go test -run @ -bench . -memprofile mem.out -memprofilerate 1 -timeout 24h
go tool pprof -lines -web -alloc_space *.test mem.out
nuke: clean
go clean -i
todo:
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * | grep -v $(ngrep) || true
@grep -nr $(grep) TODO * | grep -v $(ngrep) || true
@grep -nr $(grep) BUG * | grep -v $(ngrep) || true
@grep -nr $(grep) [^[:alpha:]]println * | grep -v $(ngrep) || true

13
README.md Normal file
View file

@ -0,0 +1,13 @@
![logo-png](logo.png)
Package memory implements a memory allocator.
## Build status
available at https://modern-c.appspot.com/-/builder/?importpath=modernc.org%2fmemory
Installation
$ go get modernc.org/memory
[![Go Reference](https://pkg.go.dev/badge/modernc.org/memory.0.svg)](https://pkg.go.dev/modernc.org/memory)

775
all_test.go Normal file
View file

@ -0,0 +1,775 @@
// Copyright 2017 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package memory // import "modernc.org/memory"
import (
"bytes"
"fmt"
"math"
"os"
"path"
"runtime"
"strings"
"testing"
"unsafe"
"modernc.org/mathutil"
)
func caller(s string, va ...interface{}) {
if s == "" {
s = strings.Repeat("%v ", len(va))
}
_, fn, fl, _ := runtime.Caller(2)
fmt.Fprintf(os.Stderr, "# caller: %s:%d: ", path.Base(fn), fl)
fmt.Fprintf(os.Stderr, s, va...)
fmt.Fprintln(os.Stderr)
_, fn, fl, _ = runtime.Caller(1)
fmt.Fprintf(os.Stderr, "# \tcallee: %s:%d: ", path.Base(fn), fl)
fmt.Fprintln(os.Stderr)
os.Stderr.Sync()
}
func dbg(s string, va ...interface{}) {
if s == "" {
s = strings.Repeat("%v ", len(va))
}
_, fn, fl, _ := runtime.Caller(1)
fmt.Fprintf(os.Stderr, "# dbg %s:%d: ", path.Base(fn), fl)
fmt.Fprintf(os.Stderr, s, va...)
fmt.Fprintln(os.Stderr)
os.Stderr.Sync()
}
func TODO(...interface{}) string { //TODOOK
_, fn, fl, _ := runtime.Caller(1)
return fmt.Sprintf("# TODO: %s:%d:\n", path.Base(fn), fl) //TODOOK
}
func use(...interface{}) {}
func init() {
use(caller, dbg, TODO) //TODOOK
}
// ============================================================================
const quota = 128 << 20
var (
max = 2 * osPageSize
bigMax = 2 * pageSize
)
type block struct {
p uintptr
size int
}
func test1u(t *testing.T, max int) {
var alloc Allocator
defer alloc.Close()
rem := quota
var a []block
srng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
vrng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
// Allocate
for rem > 0 {
size := srng.Next()%max + 1
rem -= size
p, err := alloc.UintptrMalloc(size)
if err != nil {
t.Fatal(err)
}
a = append(a, block{p, size})
for i := 0; i < size; i++ {
*(*byte)(unsafe.Pointer(p + uintptr(i))) = byte(vrng.Next())
}
}
if counters {
t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota)
}
srng.Seek(0)
vrng.Seek(0)
// Verify
for i, b := range a {
if g, e := b.size, srng.Next()%max+1; g != e {
t.Fatal(i, g, e)
}
if a, b := b.size, UintptrUsableSize(b.p); a > b {
t.Fatal(i, a, b)
}
for j := 0; j < b.size; j++ {
g := *(*byte)(unsafe.Pointer(b.p + uintptr(j)))
if e := byte(vrng.Next()); g != e {
t.Fatalf("%v,%v %#x: %#02x %#02x", i, j, b.p+uintptr(j), g, e)
}
*(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0
}
}
// Shuffle
for i := range a {
j := srng.Next() % len(a)
a[i], a[j] = a[j], a[i]
}
// Free
for _, b := range a {
if err := alloc.UintptrFree(b.p); err != nil {
t.Fatal(err)
}
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func Test1USmall(t *testing.T) { test1u(t, max) }
func Test1UBig(t *testing.T) { test1u(t, bigMax) }
func test2u(t *testing.T, max int) {
var alloc Allocator
defer alloc.Close()
rem := quota
var a []block
srng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
vrng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
// Allocate
for rem > 0 {
size := srng.Next()%max + 1
rem -= size
p, err := alloc.UintptrMalloc(size)
if err != nil {
t.Fatal(err)
}
a = append(a, block{p, size})
for i := 0; i < size; i++ {
*(*byte)(unsafe.Pointer(p + uintptr(i))) = byte(vrng.Next())
}
}
if counters {
t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota)
}
srng.Seek(0)
vrng.Seek(0)
// Verify & free
for i, b := range a {
if g, e := b.size, srng.Next()%max+1; g != e {
t.Fatal(i, g, e)
}
if a, b := b.size, UintptrUsableSize(b.p); a > b {
t.Fatal(i, a, b)
}
for j := 0; j < b.size; j++ {
g := *(*byte)(unsafe.Pointer(b.p + uintptr(j)))
if e := byte(vrng.Next()); g != e {
t.Fatalf("%v,%v %#x: %#02x %#02x", i, j, b.p+uintptr(j), g, e)
}
*(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0
}
if err := alloc.UintptrFree(b.p); err != nil {
t.Fatal(err)
}
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func Test2USmall(t *testing.T) { test2u(t, max) }
func Test2UBig(t *testing.T) { test2u(t, bigMax) }
func test3u(t *testing.T, max int) {
var alloc Allocator
defer alloc.Close()
rem := quota
m := map[block][]byte{}
srng, err := mathutil.NewFC32(1, max, true)
if err != nil {
t.Fatal(err)
}
vrng, err := mathutil.NewFC32(1, max, true)
if err != nil {
t.Fatal(err)
}
for rem > 0 {
switch srng.Next() % 3 {
case 0, 1: // 2/3 allocate
size := srng.Next()
rem -= size
p, err := alloc.UintptrMalloc(size)
if err != nil {
t.Fatal(err)
}
b := make([]byte, size)
for i := range b {
b[i] = byte(vrng.Next())
*(*byte)(unsafe.Pointer(p + uintptr(i))) = b[i]
}
m[block{p, size}] = append([]byte(nil), b...)
default: // 1/3 free
for b, v := range m {
for i, v := range v {
if *(*byte)(unsafe.Pointer(b.p + uintptr(i))) != v {
t.Fatal("corrupted heap")
}
}
if a, b := b.size, UintptrUsableSize(b.p); a > b {
t.Fatal(a, b)
}
for j := 0; j < b.size; j++ {
*(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0
}
rem += b.size
alloc.UintptrFree(b.p)
delete(m, b)
break
}
}
}
if counters {
t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota)
}
for b, v := range m {
for i, v := range v {
if *(*byte)(unsafe.Pointer(b.p + uintptr(i))) != v {
t.Fatal("corrupted heap")
}
}
if a, b := b.size, UintptrUsableSize(b.p); a > b {
t.Fatal(a, b)
}
for j := 0; j < b.size; j++ {
*(*byte)(unsafe.Pointer(b.p + uintptr(j))) = 0
}
alloc.UintptrFree(b.p)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func Test3USmall(t *testing.T) { test3u(t, max) }
func Test3UBig(t *testing.T) { test3u(t, bigMax) }
func TestUFree(t *testing.T) {
var alloc Allocator
defer alloc.Close()
p, err := alloc.UintptrMalloc(1)
if err != nil {
t.Fatal(err)
}
if err := alloc.UintptrFree(p); err != nil {
t.Fatal(err)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func TestUMalloc(t *testing.T) {
var alloc Allocator
defer alloc.Close()
p, err := alloc.UintptrMalloc(maxSlotSize)
if err != nil {
t.Fatal(err)
}
pg := (*page)(unsafe.Pointer(p &^ uintptr(osPageMask)))
if 1<<pg.log > maxSlotSize {
t.Fatal(1<<pg.log, maxSlotSize)
}
if err := alloc.UintptrFree(p); err != nil {
t.Fatal(err)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func test1(t *testing.T, max int) {
var alloc Allocator
defer alloc.Close()
rem := quota
var a [][]byte
srng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
vrng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
// Allocate
for rem > 0 {
size := srng.Next()%max + 1
rem -= size
b, err := alloc.Malloc(size)
if err != nil {
t.Fatal(err)
}
a = append(a, b)
for i := range b {
b[i] = byte(vrng.Next())
}
}
if counters {
t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota)
}
srng.Seek(0)
vrng.Seek(0)
// Verify
for i, b := range a {
if g, e := len(b), srng.Next()%max+1; g != e {
t.Fatal(i, g, e)
}
if a, b := len(b), UsableSize(&b[0]); a > b {
t.Fatal(i, a, b)
}
for i, g := range b {
if e := byte(vrng.Next()); g != e {
t.Fatalf("%v %p: %#02x %#02x", i, &b[i], g, e)
}
b[i] = 0
}
}
// Shuffle
for i := range a {
j := srng.Next() % len(a)
a[i], a[j] = a[j], a[i]
}
// Free
for _, b := range a {
if err := alloc.Free(b); err != nil {
t.Fatal(err)
}
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func Test1Small(t *testing.T) { test1(t, max) }
func Test1Big(t *testing.T) { test1(t, bigMax) }
func test2(t *testing.T, max int) {
var alloc Allocator
defer alloc.Close()
rem := quota
var a [][]byte
srng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
vrng, err := mathutil.NewFC32(0, math.MaxInt32, true)
if err != nil {
t.Fatal(err)
}
// Allocate
for rem > 0 {
size := srng.Next()%max + 1
rem -= size
b, err := alloc.Malloc(size)
if err != nil {
t.Fatal(err)
}
a = append(a, b)
for i := range b {
b[i] = byte(vrng.Next())
}
}
if counters {
t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota)
}
srng.Seek(0)
vrng.Seek(0)
// Verify & free
for i, b := range a {
if g, e := len(b), srng.Next()%max+1; g != e {
t.Fatal(i, g, e)
}
if a, b := len(b), UsableSize(&b[0]); a > b {
t.Fatal(i, a, b)
}
for i, g := range b {
if e := byte(vrng.Next()); g != e {
t.Fatalf("%v %p: %#02x %#02x", i, &b[i], g, e)
}
b[i] = 0
}
if err := alloc.Free(b); err != nil {
t.Fatal(err)
}
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func Test2Small(t *testing.T) { test2(t, max) }
func Test2Big(t *testing.T) { test2(t, bigMax) }
func test3(t *testing.T, max int) {
var alloc Allocator
defer alloc.Close()
rem := quota
m := map[*[]byte][]byte{}
srng, err := mathutil.NewFC32(1, max, true)
if err != nil {
t.Fatal(err)
}
vrng, err := mathutil.NewFC32(1, max, true)
if err != nil {
t.Fatal(err)
}
for rem > 0 {
switch srng.Next() % 3 {
case 0, 1: // 2/3 allocate
size := srng.Next()
rem -= size
b, err := alloc.Malloc(size)
if err != nil {
t.Fatal(err)
}
for i := range b {
b[i] = byte(vrng.Next())
}
m[&b] = append([]byte(nil), b...)
default: // 1/3 free
for k, v := range m {
b := *k
if !bytes.Equal(b, v) {
t.Fatal("corrupted heap")
}
if a, b := len(b), UsableSize(&b[0]); a > b {
t.Fatal(a, b)
}
for i := range b {
b[i] = 0
}
rem += len(b)
alloc.Free(b)
delete(m, k)
break
}
}
}
if counters {
t.Logf("allocs %v, mmaps %v, bytes %v, overhead %v (%.2f%%).", alloc.Allocs, alloc.Mmaps, alloc.Bytes, alloc.Bytes-quota, 100*float64(alloc.Bytes-quota)/quota)
}
for k, v := range m {
b := *k
if !bytes.Equal(b, v) {
t.Fatal("corrupted heap")
}
if a, b := len(b), UsableSize(&b[0]); a > b {
t.Fatal(a, b)
}
for i := range b {
b[i] = 0
}
alloc.Free(b)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func Test3Small(t *testing.T) { test3(t, max) }
func Test3Big(t *testing.T) { test3(t, bigMax) }
func TestFree(t *testing.T) {
var alloc Allocator
defer alloc.Close()
b, err := alloc.Malloc(1)
if err != nil {
t.Fatal(err)
}
if err := alloc.Free(b[:0]); err != nil {
t.Fatal(err)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func TestMalloc(t *testing.T) {
var alloc Allocator
defer alloc.Close()
b, err := alloc.Malloc(maxSlotSize)
if err != nil {
t.Fatal(err)
}
p := (*page)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) &^ uintptr(osPageMask)))
if 1<<p.log > maxSlotSize {
t.Fatal(1<<p.log, maxSlotSize)
}
if err := alloc.Free(b[:0]); err != nil {
t.Fatal(err)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
t.Fatalf("%+v", alloc)
}
}
func benchmarkFree(b *testing.B, size int) {
var alloc Allocator
defer alloc.Close()
a := make([][]byte, b.N)
for i := range a {
p, err := alloc.Malloc(size)
if err != nil {
b.Fatal(err)
}
a[i] = p
}
b.ResetTimer()
for _, b := range a {
alloc.Free(b)
}
b.StopTimer()
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
b.Fatalf("%+v", alloc)
}
}
func BenchmarkFree16(b *testing.B) { benchmarkFree(b, 1<<4) }
func BenchmarkFree32(b *testing.B) { benchmarkFree(b, 1<<5) }
func BenchmarkFree64(b *testing.B) { benchmarkFree(b, 1<<6) }
func benchmarkCalloc(b *testing.B, size int) {
var alloc Allocator
defer alloc.Close()
a := make([][]byte, b.N)
b.ResetTimer()
for i := range a {
p, err := alloc.Calloc(size)
if err != nil {
b.Fatal(err)
}
a[i] = p
}
b.StopTimer()
for _, b := range a {
alloc.Free(b)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
b.Fatalf("%+v", alloc)
}
}
func BenchmarkCalloc16(b *testing.B) { benchmarkCalloc(b, 1<<4) }
func BenchmarkCalloc32(b *testing.B) { benchmarkCalloc(b, 1<<5) }
func BenchmarkCalloc64(b *testing.B) { benchmarkCalloc(b, 1<<6) }
func benchmarkGoCalloc(b *testing.B, size int) {
a := make([][]byte, b.N)
b.ResetTimer()
for i := range a {
a[i] = make([]byte, size)
}
b.StopTimer()
use(a)
}
func BenchmarkGoCalloc16(b *testing.B) { benchmarkGoCalloc(b, 1<<4) }
func BenchmarkGoCalloc32(b *testing.B) { benchmarkGoCalloc(b, 1<<5) }
func BenchmarkGoCalloc64(b *testing.B) { benchmarkGoCalloc(b, 1<<6) }
func benchmarkMalloc(b *testing.B, size int) {
var alloc Allocator
defer alloc.Close()
a := make([][]byte, b.N)
b.ResetTimer()
for i := range a {
p, err := alloc.Malloc(size)
if err != nil {
b.Fatal(err)
}
a[i] = p
}
b.StopTimer()
for _, b := range a {
alloc.Free(b)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
b.Fatalf("%+v", alloc)
}
}
func BenchmarkMalloc16(b *testing.B) { benchmarkMalloc(b, 1<<4) }
func BenchmarkMalloc32(b *testing.B) { benchmarkMalloc(b, 1<<5) }
func BenchmarkMalloc64(b *testing.B) { benchmarkMalloc(b, 1<<6) }
func benchmarkUintptrFree(b *testing.B, size int) {
var alloc Allocator
defer alloc.Close()
a := make([]uintptr, b.N)
for i := range a {
p, err := alloc.UintptrMalloc(size)
if err != nil {
b.Fatal(err)
}
a[i] = p
}
b.ResetTimer()
for _, p := range a {
alloc.UintptrFree(p)
}
b.StopTimer()
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
b.Fatalf("%+v", alloc)
}
}
func BenchmarkUintptrFree16(b *testing.B) { benchmarkUintptrFree(b, 1<<4) }
func BenchmarkUintptrFree32(b *testing.B) { benchmarkUintptrFree(b, 1<<5) }
func BenchmarkUintptrFree64(b *testing.B) { benchmarkUintptrFree(b, 1<<6) }
func benchmarkUintptrCalloc(b *testing.B, size int) {
var alloc Allocator
defer alloc.Close()
a := make([]uintptr, b.N)
b.ResetTimer()
for i := range a {
p, err := alloc.UintptrCalloc(size)
if err != nil {
b.Fatal(err)
}
a[i] = p
}
b.StopTimer()
for _, p := range a {
alloc.UintptrFree(p)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
b.Fatalf("%+v", alloc)
}
}
func BenchmarkUintptrCalloc16(b *testing.B) { benchmarkUintptrCalloc(b, 1<<4) }
func BenchmarkUintptrCalloc32(b *testing.B) { benchmarkUintptrCalloc(b, 1<<5) }
func BenchmarkUintptrCalloc64(b *testing.B) { benchmarkUintptrCalloc(b, 1<<6) }
func benchmarkUintptrMalloc(b *testing.B, size int) {
var alloc Allocator
defer alloc.Close()
a := make([]uintptr, b.N)
b.ResetTimer()
for i := range a {
p, err := alloc.UintptrMalloc(size)
if err != nil {
b.Fatal(err)
}
a[i] = p
}
b.StopTimer()
for _, p := range a {
alloc.UintptrFree(p)
}
if alloc.Allocs != 0 || alloc.Mmaps != 0 || alloc.Bytes != 0 || len(alloc.regs) != 0 {
b.Fatalf("%+v", alloc)
}
}
func BenchmarkUintptrMalloc16(b *testing.B) { benchmarkUintptrMalloc(b, 1<<4) }
func BenchmarkUintptrMalloc32(b *testing.B) { benchmarkUintptrMalloc(b, 1<<5) }
func BenchmarkUintptrMalloc64(b *testing.B) { benchmarkUintptrMalloc(b, 1<<6) }

6
builder.json Normal file
View file

@ -0,0 +1,6 @@
{
"autogen": "<none>",
"autoupdate": ".",
"autotag": ".",
"test": "."
}

10
counters.go Normal file
View file

@ -0,0 +1,10 @@
// Copyright 2017 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build memory.counters
// +build memory.counters
package memory // import "modernc.org/memory"
const counters = true

12
go.mod Normal file
View file

@ -0,0 +1,12 @@
module modernc.org/memory
// +heroku goVersion go1.14
go 1.23.0
require (
golang.org/x/sys v0.31.0
modernc.org/mathutil v1.7.1
)
require github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect

6
go.sum Normal file
View file

@ -0,0 +1,6 @@
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=

0
internal/autogen/.keep Normal file
View file

469
memory.go Normal file
View file

@ -0,0 +1,469 @@
// Copyright 2017 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package memory implements a memory allocator.
//
// # Build status
//
// available at https://modern-c.appspot.com/-/builder/?importpath=modernc.org%2fmemory
//
// # Changelog
//
// 2017-10-03 Added alternative, unsafe.Pointer-based API.
//
// Package memory implements a memory allocator.
//
// # Changelog
//
// 2017-10-03 Added alternative, unsafe.Pointer-based API.
//
// # Benchmarks
//
// jnml@3900x:~/src/modernc.org/memory$ date ; go version ; go test -run @ -bench . -benchmem |& tee log
// Mon Sep 25 16:02:02 CEST 2023
// go version go1.21.1 linux/amd64
// goos: linux
// goarch: amd64
// pkg: modernc.org/memory
// cpu: AMD Ryzen 9 3900X 12-Core Processor
// BenchmarkFree16-24 123506772 9.802 ns/op 0 B/op 0 allocs/op
// BenchmarkFree32-24 73853230 15.08 ns/op 0 B/op 0 allocs/op
// BenchmarkFree64-24 43070334 25.15 ns/op 0 B/op 0 allocs/op
// BenchmarkCalloc16-24 59353304 18.92 ns/op 0 B/op 0 allocs/op
// BenchmarkCalloc32-24 39415004 29.00 ns/op 0 B/op 0 allocs/op
// BenchmarkCalloc64-24 35825725 32.02 ns/op 0 B/op 0 allocs/op
// BenchmarkGoCalloc16-24 38274313 26.99 ns/op 16 B/op 1 allocs/op
// BenchmarkGoCalloc32-24 44590477 33.06 ns/op 32 B/op 1 allocs/op
// BenchmarkGoCalloc64-24 44233016 37.20 ns/op 64 B/op 1 allocs/op
// BenchmarkMalloc16-24 145736911 7.720 ns/op 0 B/op 0 allocs/op
// BenchmarkMalloc32-24 128898334 7.887 ns/op 0 B/op 0 allocs/op
// BenchmarkMalloc64-24 149569483 7.994 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrFree16-24 117043012 9.205 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrFree32-24 77399617 14.20 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrFree64-24 48770785 25.04 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrCalloc16-24 79257636 15.44 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrCalloc32-24 49644562 23.62 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrCalloc64-24 39854710 28.22 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrMalloc16-24 252987727 4.525 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrMalloc32-24 241423840 4.433 ns/op 0 B/op 0 allocs/op
// BenchmarkUintptrMalloc64-24 256450324 4.669 ns/op 0 B/op 0 allocs/op
// PASS
// ok modernc.org/memory 93.178s
// jnml@3900x:~/src/modernc.org/memory$
package memory // import "modernc.org/memory"
import (
"fmt"
"math/bits"
"os"
"unsafe"
)
const (
headerSize = unsafe.Sizeof(page{})
mallocAllign = 2 * unsafe.Sizeof(uintptr(0))
maxSlotSize = 1 << maxSlotSizeLog
maxSlotSizeLog = pageSizeLog - 2
pageAvail = pageSize - headerSize
pageMask = pageSize - 1
pageSize = 1 << pageSizeLog
)
func init() {
if unsafe.Sizeof(page{})%mallocAllign != 0 {
panic("internal error")
}
}
// if n%m != 0 { n += m-n%m }. m must be a power of 2.
func roundup(n, m int) int { return (n + m - 1) &^ (m - 1) }
type node struct {
prev, next uintptr // *node
}
type page struct {
brk int
log uint
size int
used int
}
// Allocator allocates and frees memory. Its zero value is ready for use. The
// exported counters are updated only when build tag memory.counters is
// present.
type Allocator struct {
Allocs int // # of allocs.
Bytes int // Asked from OS.
cap [64]int
lists [64]uintptr // *node
Mmaps int // Asked from OS.
pages [64]uintptr // *page
regs map[uintptr]struct{} // map[*page]struct{}
}
func (a *Allocator) mmap(size int) (uintptr /* *page */, error) {
p, size, err := mmap(size)
if err != nil {
return 0, err
}
//TODO(jnml) The returned size may now be nearly as twice as large as we asked
//for. Use that extra capacity. For that we need to move the respective
//Allocator.cap item into the page struct so the page cap becomes dynamic.
//
// Related: This is a consequence of fixing the bigsort.test failures on
// linux/s390x, see: https://gitlab.com/cznic/sqlite/-/issues/207
if counters {
a.Mmaps++
a.Bytes += size
}
if a.regs == nil {
a.regs = map[uintptr]struct{}{}
}
(*page)(unsafe.Pointer(p)).size = size
a.regs[p] = struct{}{}
return p, nil
}
func (a *Allocator) newPage(size int) (uintptr /* *page */, error) {
size += int(headerSize)
p, err := a.mmap(size)
if err != nil {
return 0, err
}
(*page)(unsafe.Pointer(p)).log = 0
return p, nil
}
func (a *Allocator) newSharedPage(log uint) (uintptr /* *page */, error) {
if a.cap[log] == 0 {
a.cap[log] = int(pageAvail) / (1 << log)
}
size := int(headerSize) + a.cap[log]<<log
p, err := a.mmap(size)
if err != nil {
return 0, err
}
a.pages[log] = p
(*page)(unsafe.Pointer(p)).log = log
return p, nil
}
func (a *Allocator) unmap(p uintptr /* *page */) error {
delete(a.regs, p)
if counters {
a.Mmaps--
}
return unmap(p, (*page)(unsafe.Pointer(p)).size)
}
// UintptrCalloc is like Calloc except it returns an uintptr.
func (a *Allocator) UintptrCalloc(size int) (r uintptr, err error) {
if trace {
defer func() {
fmt.Fprintf(os.Stderr, "Calloc(%#x) %#x, %v\n", size, r, err)
}()
}
if r, err = a.UintptrMalloc(size); r == 0 || err != nil {
return 0, err
}
b := ((*rawmem)(unsafe.Pointer(r)))[:size:size]
for i := range b {
b[i] = 0
}
return r, nil
}
// UintptrFree is like Free except its argument is an uintptr, which must have
// been acquired from UintptrCalloc or UintptrMalloc or UintptrRealloc.
func (a *Allocator) UintptrFree(p uintptr) (err error) {
if trace {
defer func() {
fmt.Fprintf(os.Stderr, "Free(%#x) %v\n", p, err)
}()
}
if p == 0 {
return nil
}
if counters {
a.Allocs--
}
pg := p &^ uintptr(pageMask)
log := (*page)(unsafe.Pointer(pg)).log
if log == 0 {
if counters {
a.Bytes -= (*page)(unsafe.Pointer(pg)).size
}
return a.unmap(pg)
}
(*node)(unsafe.Pointer(p)).prev = 0
(*node)(unsafe.Pointer(p)).next = a.lists[log]
if next := (*node)(unsafe.Pointer(p)).next; next != 0 {
(*node)(unsafe.Pointer(next)).prev = p
}
a.lists[log] = p
(*page)(unsafe.Pointer(pg)).used--
if (*page)(unsafe.Pointer(pg)).used != 0 {
return nil
}
for i := 0; i < (*page)(unsafe.Pointer(pg)).brk; i++ {
n := pg + headerSize + uintptr(i)<<log
next := (*node)(unsafe.Pointer(n)).next
prev := (*node)(unsafe.Pointer(n)).prev
switch {
case prev == 0:
a.lists[log] = next
if next != 0 {
(*node)(unsafe.Pointer(next)).prev = 0
}
case next == 0:
(*node)(unsafe.Pointer(prev)).next = 0
default:
(*node)(unsafe.Pointer(prev)).next = next
(*node)(unsafe.Pointer(next)).prev = prev
}
}
if a.pages[log] == pg {
a.pages[log] = 0
}
if counters {
a.Bytes -= (*page)(unsafe.Pointer(pg)).size
}
return a.unmap(pg)
}
// UintptrMalloc is like Malloc except it returns an uinptr.
func (a *Allocator) UintptrMalloc(size int) (r uintptr, err error) {
if trace {
defer func() {
fmt.Fprintf(os.Stderr, "Malloc(%#x) %#x, %v\n", size, r, err)
}()
}
if size < 0 {
panic("invalid malloc size")
}
if size == 0 {
return 0, nil
}
if counters {
a.Allocs++
}
log := uint(bits.Len(uint((size+int(mallocAllign)-1)&^int(mallocAllign-1) - 1)))
if log > maxSlotSizeLog {
p, err := a.newPage(size)
if err != nil {
return 0, err
}
return p + headerSize, nil
}
if a.lists[log] == 0 && a.pages[log] == 0 {
if _, err := a.newSharedPage(log); err != nil {
return 0, err
}
}
if p := a.pages[log]; p != 0 {
(*page)(unsafe.Pointer(p)).used++
(*page)(unsafe.Pointer(p)).brk++
if (*page)(unsafe.Pointer(p)).brk == a.cap[log] {
a.pages[log] = 0
}
return p + headerSize + uintptr((*page)(unsafe.Pointer(p)).brk-1)<<log, nil
}
n := a.lists[log]
p := n &^ uintptr(pageMask)
a.lists[log] = (*node)(unsafe.Pointer(n)).next
if next := (*node)(unsafe.Pointer(n)).next; next != 0 {
(*node)(unsafe.Pointer(next)).prev = 0
}
(*page)(unsafe.Pointer(p)).used++
return n, nil
}
// UintptrRealloc is like Realloc except its first argument is an uintptr,
// which must have been returned from UintptrCalloc, UintptrMalloc or
// UintptrRealloc.
func (a *Allocator) UintptrRealloc(p uintptr, size int) (r uintptr, err error) {
if trace {
defer func() {
fmt.Fprintf(os.Stderr, "UnsafeRealloc(%#x, %#x) %#x, %v\n", p, size, r, err)
}()
}
switch {
case p == 0:
return a.UintptrMalloc(size)
case size == 0 && p != 0:
return 0, a.UintptrFree(p)
}
us := UintptrUsableSize(p)
if us >= size {
return p, nil
}
if r, err = a.UintptrMalloc(size); err != nil {
return 0, err
}
if us < size {
size = us
}
copy((*rawmem)(unsafe.Pointer(r))[:size:size], (*rawmem)(unsafe.Pointer(p))[:size:size])
return r, a.UintptrFree(p)
}
// UintptrUsableSize is like UsableSize except its argument is an uintptr,
// which must have been returned from UintptrCalloc, UintptrMalloc or
// UintptrRealloc.
func UintptrUsableSize(p uintptr) (r int) {
if trace {
defer func() {
fmt.Fprintf(os.Stderr, "UsableSize(%#x) %#x\n", p, r)
}()
}
if p == 0 {
return 0
}
return usableSize(p)
}
func usableSize(p uintptr) (r int) {
pg := p &^ uintptr(pageMask)
if log := (*page)(unsafe.Pointer(pg)).log; log != 0 {
return 1 << log
}
return (*page)(unsafe.Pointer(pg)).size - int(headerSize)
}
// Calloc is like Malloc except the allocated memory is zeroed.
func (a *Allocator) Calloc(size int) (r []byte, err error) {
p, err := a.UintptrCalloc(size)
if err != nil {
return nil, err
}
return (*rawmem)(unsafe.Pointer(p))[:size:usableSize(p)], nil
}
// Close releases all OS resources used by a and sets it to its zero value.
//
// It's not necessary to Close the Allocator when exiting a process.
func (a *Allocator) Close() (err error) {
for p := range a.regs {
if e := a.unmap(p); e != nil && err == nil {
err = e
}
}
*a = Allocator{}
return err
}
// Free deallocates memory (as in C.free). The argument of Free must have been
// acquired from Calloc or Malloc or Realloc.
func (a *Allocator) Free(b []byte) (err error) {
if b = b[:cap(b)]; len(b) == 0 {
return nil
}
return a.UintptrFree(uintptr(unsafe.Pointer(&b[0])))
}
// Malloc allocates size bytes and returns a byte slice of the allocated
// memory. The memory is not initialized. Malloc panics for size < 0 and
// returns (nil, nil) for zero size.
//
// It's ok to reslice the returned slice but the result of appending to it
// cannot be passed to Free or Realloc as it may refer to a different backing
// array afterwards.
func (a *Allocator) Malloc(size int) (r []byte, err error) {
p, err := a.UintptrMalloc(size)
if p == 0 || err != nil {
return nil, err
}
return (*rawmem)(unsafe.Pointer(p))[:size:usableSize(p)], nil
}
// Realloc changes the size of the backing array of b to size bytes or returns
// an error, if any. The contents will be unchanged in the range from the
// start of the region up to the minimum of the old and new sizes. If the
// new size is larger than the old size, the added memory will not be
// initialized. If b's backing array is of zero size, then the call is
// equivalent to Malloc(size), for all values of size; if size is equal to
// zero, and b's backing array is not of zero size, then the call is equivalent
// to Free(b). Unless b's backing array is of zero size, it must have been
// returned by an earlier call to Malloc, Calloc or Realloc. If the area
// pointed to was moved, a Free(b) is done.
func (a *Allocator) Realloc(b []byte, size int) (r []byte, err error) {
var p uintptr
if b = b[:cap(b)]; len(b) != 0 {
p = uintptr(unsafe.Pointer(&b[0]))
}
if p, err = a.UintptrRealloc(p, size); p == 0 || err != nil {
return nil, err
}
return (*rawmem)(unsafe.Pointer(p))[:size:usableSize(p)], nil
}
// UsableSize reports the size of the memory block allocated at p, which must
// point to the first byte of a slice returned from Calloc, Malloc or Realloc.
// The allocated memory block size can be larger than the size originally
// requested from Calloc, Malloc or Realloc.
func UsableSize(p *byte) (r int) { return UintptrUsableSize(uintptr(unsafe.Pointer(p))) }
// UnsafeCalloc is like Calloc except it returns an unsafe.Pointer.
func (a *Allocator) UnsafeCalloc(size int) (r unsafe.Pointer, err error) {
p, err := a.UintptrCalloc(size)
if err != nil {
return nil, err
}
return unsafe.Pointer(p), nil
}
// UnsafeFree is like Free except its argument is an unsafe.Pointer, which must
// have been acquired from UnsafeCalloc or UnsafeMalloc or UnsafeRealloc.
func (a *Allocator) UnsafeFree(p unsafe.Pointer) (err error) { return a.UintptrFree(uintptr(p)) }
// UnsafeMalloc is like Malloc except it returns an unsafe.Pointer.
func (a *Allocator) UnsafeMalloc(size int) (r unsafe.Pointer, err error) {
p, err := a.UintptrMalloc(size)
if err != nil {
return nil, err
}
return unsafe.Pointer(p), nil
}
// UnsafeRealloc is like Realloc except its first argument is an
// unsafe.Pointer, which must have been returned from UnsafeCalloc,
// UnsafeMalloc or UnsafeRealloc.
func (a *Allocator) UnsafeRealloc(p unsafe.Pointer, size int) (r unsafe.Pointer, err error) {
q, err := a.UintptrRealloc(uintptr(p), size)
if err != nil {
return nil, err
}
return unsafe.Pointer(q), nil
}
// UnsafeUsableSize is like UsableSize except its argument is an
// unsafe.Pointer, which must have been returned from UnsafeCalloc,
// UnsafeMalloc or UnsafeRealloc.
func UnsafeUsableSize(p unsafe.Pointer) (r int) { return UintptrUsableSize(uintptr(p)) }

10
memory32.go Normal file
View file

@ -0,0 +1,10 @@
// Copyright 2018 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || arm || armbe || mips || mipsle || ppc || s390 || s390x || sparc
// +build 386 arm armbe mips mipsle ppc s390 s390x sparc
package memory // import "modernc.org/memory"
type rawmem [1<<31 - 2]byte

10
memory64.go Normal file
View file

@ -0,0 +1,10 @@
// Copyright 2018 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 || amd64p32 || arm64 || arm64be || mips64 || mips64le || mips64p32 || mips64p32le || ppc64 || ppc64le || sparc64 || riscv64 || loong64
// +build amd64 amd64p32 arm64 arm64be mips64 mips64le mips64p32 mips64p32le ppc64 ppc64le sparc64 riscv64 loong64
package memory // import "modernc.org/memory"
type rawmem [1<<50 - 1]byte

70
mmap_unix.go Normal file
View file

@ -0,0 +1,70 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE-MMAP-GO file.
//go:build unix
// Modifications (c) 2017 The Memory Authors.
package memory // import "modernc.org/memory"
import (
"golang.org/x/sys/unix"
"os"
"unsafe"
)
const pageSizeLog = 16
var (
osPageMask = osPageSize - 1
osPageSize = os.Getpagesize()
)
func unmap(addr uintptr, size int) error {
return unix.MunmapPtr(unsafe.Pointer(addr), uintptr(size))
}
// pageSize aligned.
func mmap(size int) (uintptr, int, error) {
size = roundup(size, osPageSize)
// Ask for more so we can align the result at a pageSize boundary
n := size + pageSize
up, err := unix.MmapPtr(-1, 0, nil, uintptr(n), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANON)
if err != nil {
return 0, 0, err
}
p := uintptr(up)
if p&uintptr(osPageMask) != 0 {
panic("internal error")
}
mod := int(p) & pageMask
if mod != 0 { // Return the extra part before pageSize aligned block
m := pageSize - mod
if err := unmap(p, m); err != nil {
unmap(p, n) // Do not leak the first mmap
return 0, 0, err
}
n -= m
p += uintptr(m)
}
if p&uintptr(pageMask) != 0 {
panic("internal error")
}
if n > size { // Return the extra part after pageSize aligned block
if err := unmap(p+uintptr(size), n-size); err != nil {
// Do not error when the kernel rejects the extra part after, just return the
// unexpectedly enlarged size.
//
// Fixes the bigsort.test failures on linux/s390x, see: https://gitlab.com/cznic/sqlite/-/issues/207
size = n
}
}
return p, size, nil
}

49
mmap_windows.go Normal file
View file

@ -0,0 +1,49 @@
// Copyright 2017 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package memory // import "modernc.org/memory"
import (
syscall "golang.org/x/sys/windows"
"os"
)
const (
_MEM_COMMIT = 0x1000
_MEM_RESERVE = 0x2000
_MEM_DECOMMIT = 0x4000
_MEM_RELEASE = 0x8000
_PAGE_READWRITE = 0x0004
_PAGE_NOACCESS = 0x0001
)
const pageSizeLog = 16
var (
modkernel32 = syscall.NewLazySystemDLL("kernel32.dll")
osPageMask = osPageSize - 1
osPageSize = os.Getpagesize()
procVirtualAlloc = modkernel32.NewProc("VirtualAlloc")
procVirtualFree = modkernel32.NewProc("VirtualFree")
)
// pageSize aligned.
func mmap(size int) (uintptr, int, error) {
size = roundup(size, pageSize)
addr, _, err := procVirtualAlloc.Call(0, uintptr(size), _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)
if err.(syscall.Errno) != 0 || addr == 0 {
return addr, size, err
}
return addr, size, nil
}
func unmap(addr uintptr, size int) error {
r, _, err := procVirtualFree.Call(addr, 0, _MEM_RELEASE)
if r == 0 {
return err
}
return nil
}

10
nocounters.go Normal file
View file

@ -0,0 +1,10 @@
// Copyright 2017 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !memory.counters
// +build !memory.counters
package memory // import "modernc.org/memory"
const counters = false

10
trace_disabled.go Normal file
View file

@ -0,0 +1,10 @@
// Copyright 2017 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !memory.trace
// +build !memory.trace
package memory // import "modernc.org/memory"
const trace = false

10
trace_enabled.go Normal file
View file

@ -0,0 +1,10 @@
// Copyright 2017 The Memory Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build memory.trace
// +build memory.trace
package memory // import "modernc.org/memory"
const trace = true