Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
399 changes: 362 additions & 37 deletions compiler/map.go

Large diffs are not rendered by default.

25 changes: 25 additions & 0 deletions compiler/symbol.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,31 @@ func (c *compilerContext) getFunction(fn *ssa.Function) (llvm.Type, llvm.Value)
case "runtime.stringFromRunes":
llvmFn.AddAttributeAtIndex(1, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
llvmFn.AddAttributeAtIndex(1, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("readonly"), 0))
case "runtime.hashmapSet":
// The key (param 2) and value (param 3) pointers are only read via
// memcpy/hash/equal and are never captured. The indirect calls
// through m.keyHash and m.keyEqual function pointers prevent LLVM's
// functionattrs pass from inferring this automatically.
llvmFn.AddAttributeAtIndex(2, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
llvmFn.AddAttributeAtIndex(3, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
case "runtime.hashmapGet":
// The key (param 2) is read-only and never captured.
// The value (param 3) is written to (receives the result) but never captured.
llvmFn.AddAttributeAtIndex(2, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
llvmFn.AddAttributeAtIndex(3, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
case "runtime.hashmapDelete":
// The key (param 2) is read-only and never captured.
llvmFn.AddAttributeAtIndex(2, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
case "runtime.hashmapGenericSet":
// Same as hashmapBinarySet: key (param 2) and value (param 3) are
// not captured.
llvmFn.AddAttributeAtIndex(2, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
llvmFn.AddAttributeAtIndex(3, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
case "runtime.hashmapGenericGet":
llvmFn.AddAttributeAtIndex(2, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
llvmFn.AddAttributeAtIndex(3, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
case "runtime.hashmapGenericDelete":
llvmFn.AddAttributeAtIndex(2, c.ctx.CreateEnumAttribute(llvm.AttributeKindID("nocapture"), 0))
case "runtime.trackPointer":
// This function is necessary for tracking pointers on the stack in a
// portable way (see gc_stack_portable.go). Indicate to the optimizer
Expand Down
124 changes: 114 additions & 10 deletions src/internal/reflectlite/value.go
Original file line number Diff line number Diff line change
Expand Up @@ -1082,36 +1082,107 @@ func (v Value) MapKeys() []Value {
keys := make([]Value, 0, v.Len())

it := hashmapNewIterator()
k := New(v.typecode.Key())
e := New(v.typecode.Elem())

// Keys are stored as interface{} only for types that still use the
// legacy interface path (e.g., unsafe.Pointer). For those, we need
// to allocate an interface-sized buffer for hashmapNext (which writes
// m.keySize bytes), then unpack the interface to get the actual value.
keyType := v.typecode.key()
keyTypeIsEmptyInterface := keyType.Kind() == Interface && keyType.NumMethod() == 0
shouldUnpackInterface := !keyTypeIsEmptyInterface && keyType.Kind() != String && !keyType.isBinary()
Comment thread
jakebailey marked this conversation as resolved.
shouldUnpackInterface := keyType.Kind() != String && !keyType.isBinary() && !hashmapKeyUsesGenericPath(keyType)

k := newMapKeyAlloc(keyType, shouldUnpackInterface)
for hashmapNext(v.pointer(), it, k.value, e.value) {
if shouldUnpackInterface {
intf := *(*interface{})(k.value)
v := ValueOf(intf)
keys = append(keys, v)
keys = append(keys, ValueOf(intf))
} else {
keys = append(keys, k.Elem())
}
k = New(v.typecode.Key())
k = newMapKeyAlloc(keyType, shouldUnpackInterface)
}

return keys
}

// newMapKeyAlloc allocates a Value suitable for receiving a key from
// hashmapNext. When interfaceStored is true, the map stores keys as
// interface{} (which may be larger than the declared key type), so an
// interface-sized buffer is allocated to avoid overflow.
func newMapKeyAlloc(keyType *RawType, interfaceStored bool) Value {
size := keyType.Size()
if interfaceStored {
var itf interface{}
size = unsafe.Sizeof(itf)
}
return Value{
typecode: pointerTo(keyType),
value: alloc(size, nil),
flags: valueFlagExported,
}
}

//go:linkname hashmapStringGet runtime.hashmapStringGet
func hashmapStringGet(m unsafe.Pointer, key string, value unsafe.Pointer, valueSize uintptr) bool

//go:linkname hashmapBinaryGet runtime.hashmapBinaryGet
func hashmapBinaryGet(m unsafe.Pointer, key, value unsafe.Pointer, valueSize uintptr) bool

//go:linkname hashmapGenericGet runtime.hashmapGenericGet
func hashmapGenericGet(m unsafe.Pointer, key, value unsafe.Pointer, valueSize uintptr) bool

//go:linkname hashmapInterfaceGet runtime.hashmapInterfaceGet
func hashmapInterfaceGet(m unsafe.Pointer, key interface{}, value unsafe.Pointer, valueSize uintptr) bool

// hashmapKeyUsesGenericPath reports whether the given map key type uses the
// compiler-generated hash/equal path (storing keys at their actual type) as
// opposed to the legacy interface path (storing keys as interface{}).
// This must match the compiler's hashmapCanGenerateHashEqual predicate.
func hashmapKeyUsesGenericPath(t *RawType) bool {
switch t.Kind() {
case Bool, Int, Int8, Int16, Int32, Int64,
Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
Float32, Float64, Complex64, Complex128,
String, Chan, Ptr, Interface:
return true
case Array:
return hashmapKeyUsesGenericPath(t.Elem().(*RawType))
case Struct:
for i := 0; i < t.NumField(); i++ {
if !hashmapKeyUsesGenericPath(t.Field(i).Type.(*RawType)) {
return false
}
}
return true
default:
return false
}
}

// genericKeyPtr returns a pointer to key data suitable for passing to the
// hashmapGeneric* functions. When the map's key type is an interface,
// special handling is needed: if the key Value already holds an interface
// (e.g. from MapKeys iteration), its memory already contains the
// {typecode, data} pair the hashmap expects, so we use it directly.
// If the key is a concrete type being assigned to an interface-keyed map,
// we compose the interface first.
func genericKeyPtr(vkey *RawType, key Value) unsafe.Pointer {
if vkey.Kind() == Interface {
if key.Kind() == Interface {
// Key is already an interface value stored indirectly;
// key.value points to {typecode, data}.
return key.value
}
// Concrete value being used as an interface key.
intf := composeInterface(unsafe.Pointer(key.typecode), key.value)
return unsafe.Pointer(&intf)
Comment thread
jakebailey marked this conversation as resolved.
}
if key.isIndirect() || key.typecode.Size() > unsafe.Sizeof(uintptr(0)) {
return key.value
}
return unsafe.Pointer(&key.value)
}

func (v Value) MapIndex(key Value) Value {
if v.Kind() != Map {
panic(&ValueError{Method: "MapIndex", Kind: v.Kind()})
Expand Down Expand Up @@ -1145,7 +1216,17 @@ func (v Value) MapIndex(key Value) Value {
return Value{}
}
return elem.Elem()
} else if hashmapKeyUsesGenericPath(vkey) {
// Compiler-generated hash/equal path: keys are stored at their
// actual type. Use hashmapGenericGet which dispatches through the
// map's own keyHash/keyEqual function pointers.
keyptr := genericKeyPtr(vkey, key)
if ok := hashmapGenericGet(v.pointer(), keyptr, elem.value, elemType.Size()); !ok {
Comment thread
jakebailey marked this conversation as resolved.
return Value{}
}
return elem.Elem()
} else {
// Legacy interface path: keys are stored as interface{}.
if ok := hashmapInterfaceGet(v.pointer(), key.Interface(), elem.value, elemType.Size()); !ok {
return Value{}
}
Expand Down Expand Up @@ -1206,7 +1287,7 @@ func (v Value) SetIterValue(iter *MapIter) {
}

func (it *MapIter) Next() bool {
it.key = New(it.m.typecode.Key())
it.key = newMapKeyAlloc(it.m.typecode.key(), it.unpackKeyInterface)
it.val = New(it.m.typecode.Elem())

it.valid = hashmapNext(it.m.pointer(), it.it, it.key.value, it.val.value)
Expand All @@ -1218,10 +1299,10 @@ func (iter *MapIter) Reset(v Value) {
panic(&ValueError{Method: "MapRange", Kind: v.Kind()})
}

// Keys are stored as interface{} only for types that still use the
// legacy interface path.
keyType := v.typecode.key()

keyTypeIsEmptyInterface := keyType.Kind() == Interface && keyType.NumMethod() == 0
shouldUnpackInterface := !keyTypeIsEmptyInterface && keyType.Kind() != String && !keyType.isBinary()
shouldUnpackInterface := keyType.Kind() != String && !keyType.isBinary() && !hashmapKeyUsesGenericPath(keyType)

*iter = MapIter{
m: v,
Expand Down Expand Up @@ -1969,6 +2050,9 @@ func hashmapStringSet(m unsafe.Pointer, key string, value unsafe.Pointer)
//go:linkname hashmapBinarySet runtime.hashmapBinarySet
func hashmapBinarySet(m unsafe.Pointer, key, value unsafe.Pointer)

//go:linkname hashmapGenericSet runtime.hashmapGenericSet
func hashmapGenericSet(m unsafe.Pointer, key, value unsafe.Pointer)

//go:linkname hashmapInterfaceSet runtime.hashmapInterfaceSet
func hashmapInterfaceSet(m unsafe.Pointer, key interface{}, value unsafe.Pointer)

Expand All @@ -1978,6 +2062,9 @@ func hashmapStringDelete(m unsafe.Pointer, key string)
//go:linkname hashmapBinaryDelete runtime.hashmapBinaryDelete
func hashmapBinaryDelete(m unsafe.Pointer, key unsafe.Pointer)

//go:linkname hashmapGenericDelete runtime.hashmapGenericDelete
func hashmapGenericDelete(m unsafe.Pointer, key unsafe.Pointer)

//go:linkname hashmapInterfaceDelete runtime.hashmapInterfaceDelete
func hashmapInterfaceDelete(m unsafe.Pointer, key interface{})

Expand Down Expand Up @@ -2042,7 +2129,24 @@ func (v Value) SetMapIndex(key, elem Value) {
}
hashmapBinarySet(v.pointer(), keyptr, elemptr)
}
} else if hashmapKeyUsesGenericPath(vkey) {
// Compiler-generated hash/equal path.
keyptr := genericKeyPtr(vkey, key)

if del {
hashmapGenericDelete(v.pointer(), keyptr)
} else {
var elemptr unsafe.Pointer
if elem.isIndirect() || elem.typecode.Size() > unsafe.Sizeof(uintptr(0)) {
elemptr = elem.value
} else {
elemptr = unsafe.Pointer(&elem.value)
}

hashmapGenericSet(v.pointer(), keyptr, elemptr)
Comment thread
jakebailey marked this conversation as resolved.
}
} else {
// Legacy interface path.
if del {
hashmapInterfaceDelete(v.pointer(), key.Interface())
} else {
Expand Down
70 changes: 66 additions & 4 deletions src/runtime/hashmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@ type hashmapBucket struct {
// allocated but as they're of variable size they can't be shown here.
}

// hashmapBucketHeaderSize is the offset in bytes from the start of a bucket to
// the first key, aligned to 8 bytes. This ensures that keys requiring 8-byte
// alignment (float64, complex128, uint64 on strict-alignment architectures
// like MIPS) are properly aligned in the bucket.
const hashmapBucketHeaderSize = (unsafe.Sizeof(hashmapBucket{}) + 7) &^ 7

type hashmapIterator struct {
buckets unsafe.Pointer // pointer to array of hashapBuckets
numBuckets uintptr // length of buckets array
Expand Down Expand Up @@ -66,7 +72,7 @@ func hashmapMake(keySize, valueSize uintptr, sizeHint uintptr, alg uint8) *hashm
bucketBits++
}

bucketBufSize := unsafe.Sizeof(hashmapBucket{}) + keySize*8 + valueSize*8
bucketBufSize := hashmapBucketHeaderSize + keySize*8 + valueSize*8
buckets := alloc(bucketBufSize*(1<<bucketBits), nil)

keyHash := hashmapKeyHashAlg(tinygo.HashmapAlgorithm(alg))
Expand Down Expand Up @@ -172,7 +178,7 @@ func hashmapLen(m *hashmap) int {

//go:inline
func hashmapBucketSize(m *hashmap) uintptr {
return unsafe.Sizeof(hashmapBucket{}) + uintptr(m.keySize)*8 + uintptr(m.valueSize)*8
return hashmapBucketHeaderSize + uintptr(m.keySize)*8 + uintptr(m.valueSize)*8
}

//go:inline
Expand All @@ -191,14 +197,14 @@ func hashmapBucketAddrForHash(m *hashmap, hash uint32) *hashmapBucket {

//go:inline
func hashmapSlotKey(m *hashmap, bucket *hashmapBucket, slot uint8) unsafe.Pointer {
slotKeyOffset := unsafe.Sizeof(hashmapBucket{}) + uintptr(m.keySize)*uintptr(slot)
slotKeyOffset := hashmapBucketHeaderSize + uintptr(m.keySize)*uintptr(slot)
slotKey := unsafe.Add(unsafe.Pointer(bucket), slotKeyOffset)
return slotKey
}

//go:inline
func hashmapSlotValue(m *hashmap, bucket *hashmapBucket, slot uint8) unsafe.Pointer {
slotValueOffset := unsafe.Sizeof(hashmapBucket{}) + uintptr(m.keySize)*8 + uintptr(m.valueSize)*uintptr(slot)
slotValueOffset := hashmapBucketHeaderSize + uintptr(m.keySize)*8 + uintptr(m.valueSize)*uintptr(slot)
slotValue := unsafe.Add(unsafe.Pointer(bucket), slotValueOffset)
return slotValue
}
Expand Down Expand Up @@ -491,6 +497,62 @@ func hashmapBinaryDelete(m *hashmap, key unsafe.Pointer) {
hashmapDelete(m, key, hash)
}

// Hashmap with compiler-generated key hash/equal functions.
// Unlike the binary path (which uses hash32/memequal), these use the
// type-specific keyHash and keyEqual function pointers stored in the hashmap
// struct. This is used for composite key types (e.g. structs containing
// strings) where the compiler generates specialized hash/equal functions.

func hashmapGenericSet(m *hashmap, key, value unsafe.Pointer) {
if m == nil {
nilMapPanic()
}
hash := m.keyHash(key, m.keySize, m.seed)
hashmapSet(m, key, value, hash)
}

func hashmapGenericGet(m *hashmap, key, value unsafe.Pointer, valueSize uintptr) bool {
if m == nil {
memzero(value, uintptr(valueSize))
return false
}
hash := m.keyHash(key, m.keySize, m.seed)
return hashmapGet(m, key, value, valueSize, hash)
}

func hashmapGenericDelete(m *hashmap, key unsafe.Pointer) {
if m == nil {
return
}
hash := m.keyHash(key, m.keySize, m.seed)
hashmapDelete(m, key, hash)
}

// hashmapMakeGeneric creates a new hashmap with compiler-provided hash and
// equal functions. This avoids the interface/reflection path for composite
// key types like structs containing strings.
func hashmapMakeGeneric(keySize, valueSize uintptr, sizeHint uintptr,
keyHash func(key unsafe.Pointer, size, seed uintptr) uint32,
keyEqual func(x, y unsafe.Pointer, n uintptr) bool) *hashmap {
bucketBits := uint8(0)
for hashmapHasSpaceToGrow(bucketBits) && hashmapOverLoadFactor(sizeHint, bucketBits) {
bucketBits++
}

bucketBufSize := hashmapBucketHeaderSize + keySize*8 + valueSize*8
buckets := alloc(bucketBufSize*(1<<bucketBits), nil)

return &hashmap{
buckets: buckets,
seed: uintptr(fastrand()),
keySize: keySize,
valueSize: valueSize,
bucketBits: bucketBits,
keyEqual: keyEqual,
keyHash: keyHash,
}
}

// Hashmap with string keys (a common case).

func hashmapStringEqual(x, y unsafe.Pointer, n uintptr) bool {
Expand Down
21 changes: 21 additions & 0 deletions testdata/map.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package main

import (
"reflect"
"sort"
"unsafe"
)
Expand Down Expand Up @@ -130,6 +131,8 @@ func main() {

mapgrow()

reflectMapIterfaceKey()

interfacerehash()
}

Expand Down Expand Up @@ -308,3 +311,21 @@ func interfacerehash() {
println("no interface lookup failures")
}
}

// Test for issue #3794: reflect MapIter.Key() should return a value with
// interface kind for map[interface{}] keys, not the underlying concrete kind.
func reflectMapIterfaceKey() {
m := make(map[interface{}]int)
m[1] = 2
m["hello"] = 3
rv := reflect.ValueOf(m)
iter := rv.MapRange()
for iter.Next() {
k := iter.Key()
if k.Kind() != reflect.Interface {
println("FAIL #3794: expected interface kind, got", k.Kind().String())
return
}
}
println("reflect map interface key ok")
}
1 change: 1 addition & 0 deletions testdata/map.txt
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,5 @@ tested growing of a map
2
2
done
reflect map interface key ok
no interface lookup failures
Loading