diff --git a/.gitignore b/.gitignore
index 6cbe0f52..91e26c35 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
/seccompagent
+/seccompshell
*.swp
diff --git a/Makefile b/Makefile
index 9e71de81..29e602e2 100644
--- a/Makefile
+++ b/Makefile
@@ -5,10 +5,17 @@ IMAGE_TAG=$(shell ./tools/image-tag)
IMAGE_BRANCH_TAG=$(shell ./tools/image-tag branch)
CONTAINER_REPO ?= quay.io/kinvolk/seccompagent
+.PHONY: all
+all: seccompagent seccompshell
+
.PHONY: seccompagent
seccompagent:
$(GO_BUILD) -o seccompagent ./cmd/seccompagent
+.PHONY: seccompshell
+seccompshell:
+ $(GO_BUILD) -tags seccomp -o seccompshell ./cmd/seccompshell
+
.PHONY: container-build
container-build:
docker build -t $(CONTAINER_REPO):$(IMAGE_TAG) -f Dockerfile .
@@ -28,3 +35,9 @@ vendor:
.PHONY: test
test:
go test -test.v ./...
+
+.PHONY: local-containerd-install
+local-containerd-install:
+ docker build -t local-seccomp-agent .
+ docker save --output local-seccomp-agent.tar local-seccomp-agent
+ sudo ctr --address /run/customcontainerd/containerd.sock --namespace k8s.io images import local-seccomp-agent.tar
diff --git a/cmd/seccompagent/seccompagent.go b/cmd/seccompagent/seccompagent.go
index 288fed1d..b516addc 100644
--- a/cmd/seccompagent/seccompagent.go
+++ b/cmd/seccompagent/seccompagent.go
@@ -15,6 +15,7 @@ import (
"github.com/kinvolk/seccompagent/pkg/handlers"
"github.com/kinvolk/seccompagent/pkg/kuberesolver"
"github.com/kinvolk/seccompagent/pkg/nsenter"
+ "github.com/kinvolk/seccompagent/pkg/opa"
"github.com/kinvolk/seccompagent/pkg/registry"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -30,7 +31,7 @@ var (
func init() {
flag.StringVar(&socketFile, "socketfile", "/run/seccomp-agent.socket", "Socket file")
- flag.StringVar(&resolverParam, "resolver", "", "Container resolver to use [none, demo-basic, kubernetes]")
+ flag.StringVar(&resolverParam, "resolver", "", "Container resolver to use [none, demo-basic, kubernetes, opa]")
flag.StringVar(&logflags, "log", "info", "log level [trace,debug,info,warn,error,fatal,color,nocolor,json]")
}
@@ -69,39 +70,41 @@ func main() {
// Kubernetes API, find the pod, and allow or deny a syscall depending
// on the pod specifications (e.g. namespace, annotations,
// serviceAccount).
- resolver = func(state *specs.ContainerProcessState) *registry.Registry {
- r := registry.New()
+ resolver = func(state *specs.ContainerProcessState) registry.Filter {
+ f := registry.NewSimpleFilter()
// Example:
// / # mount -t proc proc root
// / # ls /root/self/cmdline
// /root/self/cmdline
allowedFilesystems := map[string]struct{}{"proc": struct{}{}}
- r.Add("mount", handlers.Mount(allowedFilesystems))
+ f.AddHandler("mount", handlers.Mount(allowedFilesystems))
// Example:
// # chmod 777 /
// chmod: /: Bad message
- r.Add("chmod", handlers.Error(unix.EBADMSG))
+ f.AddHandler("chmod", handlers.Error(unix.EBADMSG))
// Example:
// # mkdir /abc
// # ls -d /abc*
// /abc-pid-3528098
if state != nil {
- r.Add("mkdir", handlers.MkdirWithSuffix(fmt.Sprintf("-pid-%d", state.State.Pid)))
+ f.AddHandler("mkdir", handlers.MkdirWithSuffix(fmt.Sprintf("-pid-%d", state.State.Pid)))
}
- return r
+ return f
}
case "kubernetes":
- kubeResolverFunc := func(podCtx *kuberesolver.PodContext, metadata map[string]string) *registry.Registry {
+ kubeResolverFunc := func(podCtx *kuberesolver.PodContext, metadata map[string]string) registry.Filter {
log.WithFields(log.Fields{
"pod": podCtx,
"metadata": metadata,
}).Debug("New container")
- r := registry.New()
+ f := registry.NewSimpleFilter()
+
+ f.AddHandler("chmod", handlers.ErrorSeq())
if v, ok := metadata["MKDIR_TMPL"]; ok {
tmpl, err := template.New("mkdirTmpl").Parse(v)
@@ -109,7 +112,7 @@ func main() {
var suffix strings.Builder
err = tmpl.Execute(&suffix, podCtx)
if err == nil {
- r.Add("mkdir", handlers.MkdirWithSuffix(suffix.String()))
+ f.AddHandler("mkdir", handlers.MkdirWithSuffix(suffix.String()))
}
}
}
@@ -118,7 +121,7 @@ func main() {
d, ok := metadata["EXEC_DURATION"]
if ok {
duration, _ := time.ParseDuration(d)
- r.Add("execve", handlers.ExecCondition(fileName, duration))
+ f.AddHandler("execve", handlers.ExecCondition(fileName, duration))
}
}
@@ -126,7 +129,7 @@ func main() {
d, ok := metadata["SIDECARS_DELAY"]
if ok {
duration, _ := time.ParseDuration(d)
- r.Add("execve", handlers.ExecSidecars(podCtx, sidecars, duration))
+ f.AddHandler("execve", handlers.ExecSidecars(podCtx, sidecars, duration))
}
}
@@ -138,15 +141,32 @@ func main() {
allowedFilesystems["sysfs"] = struct{}{}
}
if len(allowedFilesystems) > 0 {
- r.Add("mount", handlers.Mount(allowedFilesystems))
+ f.AddHandler("mount", handlers.Mount(allowedFilesystems))
}
- return r
+ return f
+ }
+ var err error
+ resolver, err = kuberesolver.KubeResolver(kubeResolverFunc)
+ if err != nil {
+ panic(err)
+ }
+ case "opa":
+ kubeResolverFunc := func(podCtx *kuberesolver.PodContext, metadata map[string]string) registry.Filter {
+ log.WithFields(log.Fields{
+ "pod": podCtx,
+ "metadata": metadata,
+ }).Debug("New container")
+
+ f := opa.NewOpaFilter(podCtx)
+
+ return f
}
var err error
resolver, err = kuberesolver.KubeResolver(kubeResolverFunc)
if err != nil {
panic(err)
}
+
default:
panic(errors.New("invalid container resolver"))
}
diff --git a/cmd/seccompshell/seccompshell.go b/cmd/seccompshell/seccompshell.go
new file mode 100644
index 00000000..3f9757a8
--- /dev/null
+++ b/cmd/seccompshell/seccompshell.go
@@ -0,0 +1,129 @@
+// +build linux,cgo
+
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/seccomp"
+ "github.com/opencontainers/runc/libcontainer/specconv"
+ "github.com/opencontainers/runc/libcontainer/utils"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ log "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+var (
+ paramSeccompFile string
+ socketFile string
+ paramMetadata string
+ paramID string
+ logflags string
+)
+
+func init() {
+ flag.StringVar(¶mSeccompFile, "seccomp-policy", "/var/lib/kubelet/seccomp/default.json", "Seccomp Policy file")
+ flag.StringVar(&socketFile, "socketfile", "/run/seccomp-agent.socket", "Socket file")
+ flag.StringVar(¶mMetadata, "metadata", "", "Metadata to send to the seccomp agent")
+ flag.StringVar(¶mID, "id", "", "Container ID to send to the seccomp agent")
+ flag.StringVar(&logflags, "log", "info", "log level [trace,debug,info,warn,error,fatal,color,nocolor,json]")
+}
+
+func sendContainerProcessState(listenerPath string, state *specs.ContainerProcessState, fds ...int) error {
+ conn, err := net.Dial("unix", listenerPath)
+ if err != nil {
+ return fmt.Errorf("cannot connect to %q: %v\n", listenerPath, err)
+ }
+
+ socket, err := conn.(*net.UnixConn).File()
+ if err != nil {
+ return fmt.Errorf("cannot get socket: %v\n", err)
+ }
+ defer socket.Close()
+
+ b, err := json.Marshal(state)
+ if err != nil {
+ return fmt.Errorf("cannot marshall seccomp state: %v\n", err)
+ }
+
+ err = utils.SendFds(socket, b, fds...)
+ if err != nil {
+ return fmt.Errorf("cannot send seccomp fd to %s: %v\n", listenerPath, err)
+ }
+
+ return nil
+}
+
+func main() {
+ flag.Parse()
+ for _, v := range strings.Split(logflags, ",") {
+ if v == "json" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else if v == "color" {
+ log.SetFormatter(&log.TextFormatter{ForceColors: true})
+ } else if v == "nocolor" {
+ log.SetFormatter(&log.TextFormatter{DisableColors: true})
+ } else if lvl, err := log.ParseLevel(v); err == nil {
+ log.SetLevel(lvl)
+ } else {
+ fmt.Fprintf(os.Stderr, "Invalid log level: %s\n", err.Error())
+ flag.Usage()
+ os.Exit(1)
+ }
+ }
+ if flag.NArg() == 0 {
+ panic(errors.New("invalid command"))
+ }
+
+ buf, err := ioutil.ReadFile(paramSeccompFile)
+ if err != nil {
+ panic(fmt.Errorf("cannot read file %q: %s", paramSeccompFile, err))
+ }
+
+ seccompConfigOCI := &specs.LinuxSeccomp{}
+ json.Unmarshal(buf, seccompConfigOCI)
+
+ seccompConfig, err := specconv.SetupSeccomp(seccompConfigOCI)
+ if err != nil {
+ panic(fmt.Errorf("cannot convert seccomp policy from OCI format to libcontainer format: %s", err))
+ }
+
+ if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
+ panic(fmt.Errorf("cannot set nonewprivileges", err))
+ }
+
+ containerProcessState := &specs.ContainerProcessState{
+ Version: specs.Version,
+ FdIndexes: map[specs.FdIndexKey]int{specs.SeccompFdIndexKey: 0},
+ Pid: os.Getpid(),
+ Metadata: paramMetadata,
+ State: specs.State{
+ Version: specs.Version,
+ ID: paramID,
+ Status: specs.StateRunning,
+ Pid: os.Getpid(),
+ Bundle: "",
+ Annotations: map[string]string{},
+ },
+ }
+ seccompFd, err := seccomp.InitSeccomp(seccompConfig)
+ if err != nil || seccompFd == -1 {
+ panic(fmt.Errorf("cannot init seccomp: %s", err))
+ }
+
+ if err := sendContainerProcessState(socketFile,
+ containerProcessState, int(seccompFd)); err != nil {
+ panic(fmt.Errorf("cannot send message to seccomp agent: %s", err))
+ }
+
+ if err := unix.Exec(flag.Arg(0), flag.Args()[1:], os.Environ()); err != nil {
+ panic(fmt.Errorf("cannot exec command", err))
+ }
+}
diff --git a/deploy/seccompagent.yaml b/deploy/seccompagent.yaml
index f17262a2..35bd71d6 100644
--- a/deploy/seccompagent.yaml
+++ b/deploy/seccompagent.yaml
@@ -1,45 +1,98 @@
apiVersion: v1
+kind: Namespace
+metadata:
+ name: seccomp-agent
+---
+apiVersion: v1
kind: ServiceAccount
metadata:
- name: seccompagent
- namespace: kube-system
+ name: seccomp-agent
+ namespace: seccomp-agent
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
- name: seccompagent
+ name: seccomp-agent
subjects:
- kind: ServiceAccount
- name: seccompagent
- namespace: kube-system
+ name: seccomp-agent
+ namespace: seccomp-agent
roleRef:
kind: ClusterRole
name: view
apiGroup: rbac.authorization.k8s.io
---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: seccomp-agent-rego
+ namespace: seccomp-agent
+data:
+ policies.rego: |
+ package syscall.authz
+
+ action[{"passthrough": passthrough, "reason": reason}] {
+ input.syscall = "execve"
+
+ passthrough := true
+ reason := "execve are always accepted"
+ }
+ action[{"handler": handler, "reason": reason, "suffix": suffix}] {
+ input.syscall = "mkdir"
+ input.pod.namespace = "default"
+ input.pod.name = "mynotifypod"
+ startswith(input.arg0, "foo")
+
+ handler := "mkdir"
+ reason := "directories can start with foo"
+ suffix := "-{{.Namespace}}-{{.Pod}}-{{.Container}}"
+ }
+ action[{"handler": handler, "reason": reason, "suffix": suffix}] {
+ input.syscall = "mkdir"
+ input.pod.namespace = "default"
+ input.pod.name = "mynotifypod"
+ endswith(input.arg0, "bar")
+
+ handler := "mkdir"
+ reason := "directories can end with bar"
+ suffix := "-{{.Namespace}}-{{.Pod}}-{{.Container}}"
+ }
+ action[{"handler": handler, "reason": reason}] {
+ input.syscall = "mount"
+ allowedfs := {"proc", "tmpfs"}
+ allowedfs[input.arg2]
+ input.pod.namespace = "default"
+ input.pod.name = "mynotifypod"
+
+ handler := "mount"
+ reason := "mounting specific filesystems is allowed"
+ }
+---
apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: seccompagent
- namespace: kube-system
+ name: seccomp-agent
+ namespace: seccomp-agent
labels:
- k8s-app: seccompagent
+ k8s-app: seccomp-agent
spec:
selector:
matchLabels:
- k8s-app: seccompagent
+ k8s-app: seccomp-agent
template:
metadata:
labels:
- k8s-app: seccompagent
+ k8s-app: seccomp-agent
spec:
- serviceAccount: seccompagent
+ serviceAccount: seccomp-agent
hostPID: true
containers:
- - name: seccompagent
- image: quay.io/kinvolk/seccompagent:latest
- command: [ "/bin/seccompagent", "-resolver=kubernetes", "-log=trace" ]
- imagePullPolicy: Always
+ - name: seccomp-agent
+ #image: quay.io/kinvolk/seccompagent:alban_opa
+ image: local-seccomp-agent
+ #imagePullPolicy: Always
+ imagePullPolicy: Never
+ command: [ "/bin/seccompagent", "-resolver=opa", "-log=trace" ]
env:
- name: NODE_NAME
valueFrom:
@@ -60,6 +113,8 @@ spec:
mountPath: /host/seccomp
- name: run
mountPath: /run
+ - name: seccomp-agent-rego
+ mountPath: /etc/seccomp-agent
tolerations:
- effect: NoSchedule
operator: Exists
@@ -72,3 +127,6 @@ spec:
- name: run
hostPath:
path: /run
+ - name: seccomp-agent-rego
+ configMap:
+ name: seccomp-agent-rego
diff --git a/go.mod b/go.mod
index e68ad79a..d2130e6d 100644
--- a/go.mod
+++ b/go.mod
@@ -3,12 +3,13 @@ module github.com/kinvolk/seccompagent
go 1.15
require (
- github.com/opencontainers/runtime-spec v1.0.2
+ github.com/open-policy-agent/opa v0.26.0
+ github.com/opencontainers/runc v0.0.0-00010101000000-000000000000
+ github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d
github.com/seccomp/libseccomp-golang v0.9.1
github.com/sirupsen/logrus v1.7.0
- golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 // indirect
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
- golang.org/x/sys v0.0.0-20201101102859-da207088b7d1
+ golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
k8s.io/api v0.17.4
k8s.io/apimachinery v0.17.4
@@ -16,6 +17,8 @@ require (
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f // indirect
)
-replace github.com/opencontainers/runtime-spec => github.com/kinvolk/runtime-spec v1.0.2-0.20201110202115-2755fc508653
+replace github.com/opencontainers/runtime-spec => github.com/kinvolk/runtime-spec v1.0.2-0.20210309175439-58798e75e980
replace github.com/seccomp/libseccomp-golang => github.com/kinvolk/libseccomp-golang v0.9.2-0.20201113182948-883917843313
+
+replace github.com/opencontainers/runc => github.com/kinvolk/runc v0.1.1-0.20201126131201-5a620a897292
diff --git a/go.sum b/go.sum
index 6565faa2..ab4588c9 100644
--- a/go.sum
+++ b/go.sum
@@ -1,66 +1,235 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kinvolk/libseccomp-golang v0.9.2-0.20201113182948-883917843313 h1:Yksjjb26OqF2mRph6uc7HUJA3p/UqbehaxfV9g1wx2k=
github.com/kinvolk/libseccomp-golang v0.9.2-0.20201113182948-883917843313/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/kinvolk/runc v0.1.1-0.20201126131201-5a620a897292 h1:4QpAU3NWf4YLOlRbxym5m3s1gERUoioSRPpwpp/0uGI=
+github.com/kinvolk/runc v0.1.1-0.20201126131201-5a620a897292/go.mod h1:R9zp0d+SHUqekKDZrlxUoZbn3OyXxxSTZ7ZAiJL06Cw=
github.com/kinvolk/runtime-spec v1.0.2-0.20201110202115-2755fc508653 h1:amSDd4i3F4aNzi9TmDRDjW31ZQ6T9LN1lXVOazoTjQ8=
github.com/kinvolk/runtime-spec v1.0.2-0.20201110202115-2755fc508653/go.mod h1:x0jDMgm6GEAbohE2lugQZrRwSe07FpeoUJm9jP2a5Sk=
+github.com/kinvolk/runtime-spec v1.0.2-0.20210309175439-58798e75e980 h1:d0LC/se/28Agl/3NeFZSpV1BA3ZXDVTLzp3RDrqTNM0=
+github.com/kinvolk/runtime-spec v1.0.2-0.20210309175439-58798e75e980/go.mod h1:x0jDMgm6GEAbohE2lugQZrRwSe07FpeoUJm9jP2a5Sk=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -68,84 +237,324 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/open-policy-agent/opa v0.26.0 h1:FI0woFdGA73reU8OzSMzgHLFK+XeDMxKIlBpvvpRqDQ=
+github.com/open-policy-agent/opa v0.26.0/go.mod h1:iGThTRECCfKQKICueOZkXUi0opN7BR3qiAnIrNHCmlI=
+github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/wasmerio/go-ext-wasm v0.3.1 h1:G95XP3fE2FszQSwIU+fHPBYzD0Csmd2ef33snQXNA5Q=
+github.com/wasmerio/go-ext-wasm v0.3.1/go.mod h1:VGyarTzasuS7k5KhSIGpM3tciSZlkP31Mp9VJTHMMeI=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b h1:vVRagRXf67ESqAb72hG2C/ZwI8NtJF2u2V76EsuOHGY=
+github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200927032502-5d4f70055728 h1:5wtQIAulKU5AbLQOkjxl32UufnIOqgBX72pS0AV14H0=
+golang.org/x/net v0.0.0-20200927032502-5d4f70055728/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201101102859-da207088b7d1 h1:a/mKvvZr9Jcc8oKfcmgzyp7OwF73JPWsQLvH1z2Kxck=
-golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf h1:kt3wY1Lu5MJAnKTfoMR52Cu4gwvna4VTzNOiT8tY73s=
+golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20201009032223-96877f285f7e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A=
k8s.io/api v0.17.4 h1:HbwOhDapkguO8lTAE8OX3hdF2qp8GtpC9CW/MQATXXo=
k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA=
@@ -168,3 +577,4 @@ k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go
index 0bf614b5..c46a44f8 100644
--- a/pkg/agent/agent.go
+++ b/pkg/agent/agent.go
@@ -14,7 +14,59 @@ import (
"github.com/kinvolk/seccompagent/pkg/registry"
)
-func receiveNewSeccompFile(resolver registry.ResolverFunc, sockfd int) (*registry.Registry, *os.File, error) {
+func closeStateFds(recvFds []int) {
+ // If performance becomes an issue, we can fallback to the new syscall closerange().
+ for i := range recvFds {
+ // Ignore the return code. There isn't anything better to do.
+ unix.Close(i)
+ }
+}
+
+// parseContainerProcessState returns the seccomp-fd and closes the rest of the fds in recvFds.
+// In case of error, all recvFds are closed.
+// StateFds is assumed to be formated as specs.ContainerProcessState.Fds and
+// recvFds the corresponding list of received fds in the same SCM_RIGHT message.
+func parseStateFds(stateFds []string, recvFds []int) (uintptr, error) {
+ // Lets find the index in stateFds of the seccomp-fd.
+ idx := -1
+ err := false
+
+ for i, name := range stateFds {
+ if name == specs.SeccompFdName && idx == -1 {
+ idx = i
+ continue
+ }
+
+ // We found the seccompFdName two times. Error out!
+ if name == specs.SeccompFdName && idx != -1 {
+ err = true
+ }
+ }
+
+ if idx == -1 || err {
+ closeStateFds(recvFds)
+ return 0, fmt.Errorf("seccomp fd not found or malformed containerProcessState.Fds")
+ }
+
+ if idx >= len(recvFds) || idx < 0 {
+ closeStateFds(recvFds)
+ return 0, fmt.Errorf("seccomp fd index out of range")
+ }
+
+ fd := uintptr(recvFds[idx])
+
+ for i := range recvFds {
+ if i == idx {
+ continue
+ }
+
+ unix.Close(recvFds[i])
+ }
+
+ return fd, nil
+}
+
+func receiveNewSeccompFile(resolver registry.ResolverFunc, sockfd int) (registry.Filter, error) {
MaxNameLen := 4096
oobSpace := unix.CmsgSpace(4)
stateBuf := make([]byte, 4096)
@@ -24,10 +76,10 @@ func receiveNewSeccompFile(resolver registry.ResolverFunc, sockfd int) (*registr
n, oobn, _, _, err := unix.Recvmsg(sockfd, stateBuf, oob, 0)
if err != nil {
- return nil, nil, err
+ return nil, err
}
if n >= MaxNameLen || oobn != oobSpace {
- return nil, nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn)
+ return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn)
}
// Truncate.
@@ -37,30 +89,27 @@ func receiveNewSeccompFile(resolver registry.ResolverFunc, sockfd int) (*registr
containerProcessState := &specs.ContainerProcessState{}
err = json.Unmarshal(stateBuf, containerProcessState)
if err != nil {
- return nil, nil, fmt.Errorf("cannot parse OCI state: %v\n", err)
- }
- seccompFdIndex, ok := containerProcessState.FdIndexes["seccompFd"]
- if !ok || seccompFdIndex < 0 {
- return nil, nil, fmt.Errorf("recvfd: didn't receive seccomp fd")
+ return nil, fmt.Errorf("cannot parse OCI state: %v\n", err)
}
scms, err := unix.ParseSocketControlMessage(oob)
if err != nil {
- return nil, nil, err
+ return nil, err
}
if len(scms) != 1 {
- return nil, nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms))
+ return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms))
}
scm := scms[0]
fds, err := unix.ParseUnixRights(&scm)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- if seccompFdIndex >= len(fds) {
- return nil, nil, fmt.Errorf("recvfd: number of fds is %d and seccompFdIndex is %d", len(fds), seccompFdIndex)
+
+ fd, err := parseStateFds(containerProcessState.Fds, fds)
+ if err != nil {
+ return nil, err
}
- fd := uintptr(fds[seccompFdIndex])
log.WithFields(log.Fields{
"fd": fd,
@@ -70,28 +119,31 @@ func receiveNewSeccompFile(resolver registry.ResolverFunc, sockfd int) (*registr
"annotations": containerProcessState.State.Annotations,
}).Debug("New seccomp fd received on socket")
- for i := 0; i < len(fds); i++ {
- if i != seccompFdIndex {
- unix.Close(fds[i])
- }
- }
-
- var reg *registry.Registry
+ var filter registry.Filter
if resolver != nil {
- reg = resolver(containerProcessState)
+ filter = resolver(containerProcessState)
+ } else {
+ filter = registry.NewSimpleFilter()
}
+ filter.SetSeccompFile(os.NewFile(fd, fmt.Sprintf("seccomp:[%s]", containerProcessState.State.ID)))
- return reg, os.NewFile(fd, "seccomp-fd"), nil
+ return filter, nil
}
// notifHandler handles seccomp notifications and responses
-func notifHandler(reg *registry.Registry, seccompFile *os.File) {
+func notifHandler(filter registry.Filter) {
+ seccompFile := filter.SeccompFile()
+ if seccompFile == nil {
+ panic("SeccompFile not set")
+ }
+
fd := libseccomp.ScmpFd(seccompFile.Fd())
defer func() {
log.WithFields(log.Fields{
"fd": fd,
}).Debug("Closing seccomp fd")
seccompFile.Close()
+ seccompFile = nil
}()
for {
@@ -140,10 +192,10 @@ func notifHandler(reg *registry.Registry, seccompFile *os.File) {
Flags: libseccomp.NotifRespFlagContinue,
}
- if reg != nil {
- handler, ok := reg.SyscallHandler[syscallName]
+ if filter != nil {
+ handler, ok := filter.LookupHandler(syscallName)
if ok {
- result := handler(fd, req)
+ result := handler(filter, req)
if result.Intr {
log.WithFields(log.Fields{
"fd": fd,
@@ -202,7 +254,7 @@ func StartAgent(socketFile string, resolver registry.ResolverFunc) error {
return fmt.Errorf("cannot get socket: %v\n", err)
}
- reg, newSeccompFile, err := receiveNewSeccompFile(resolver, int(socket.Fd()))
+ reg, err := receiveNewSeccompFile(resolver, int(socket.Fd()))
if err != nil {
log.WithFields(log.Fields{
"socket": socketFile,
@@ -211,7 +263,7 @@ func StartAgent(socketFile string, resolver registry.ResolverFunc) error {
}
socket.Close()
- go notifHandler(reg, newSeccompFile)
+ go notifHandler(reg)
}
}
diff --git a/pkg/handlers/error.go b/pkg/handlers/error.go
index 7cdb2dac..3a355ca5 100644
--- a/pkg/handlers/error.go
+++ b/pkg/handlers/error.go
@@ -4,10 +4,35 @@ import (
"github.com/kinvolk/seccompagent/pkg/registry"
libseccomp "github.com/seccomp/libseccomp-golang"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ keyErrorSeq = "ErrorSeq"
)
func Error(err error) registry.HandlerFunc {
- return func(fd libseccomp.ScmpFd, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
+ return func(filter registry.Filter, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
return registry.HandlerResultErrno(err)
}
}
+
+func ErrorSeq() registry.HandlerFunc {
+ return func(filter registry.Filter, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
+ var seq *int
+ i := filter.Value(keyErrorSeq)
+ if i == nil {
+ newVal := 0
+ filter.SetValue(keyErrorSeq, &newVal)
+ seq = &newVal
+ } else {
+ seq = i.(*int)
+ }
+
+ *seq += 1
+ if *seq >= 5 {
+ *seq = 1
+ }
+ return registry.HandlerResultErrno(unix.Errno(*seq))
+ }
+}
diff --git a/pkg/handlers/exec.go b/pkg/handlers/exec.go
index 0abc762b..9169fc18 100644
--- a/pkg/handlers/exec.go
+++ b/pkg/handlers/exec.go
@@ -15,7 +15,7 @@ import (
)
func ExecCondition(filePattern string, duration time.Duration) registry.HandlerFunc {
- return func(fd libseccomp.ScmpFd, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
+ return func(filter registry.Filter, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
// This handlers does not change the behaviour but just delay the return
result = registry.HandlerResult{Flags: libseccomp.NotifRespFlagContinue}
@@ -25,23 +25,23 @@ func ExecCondition(filePattern string, duration time.Duration) registry.HandlerF
}
defer memFile.Close()
- if err := libseccomp.NotifIDValid(fd, req.ID); err != nil {
+ if err := filter.NotifIDValid(req); err != nil {
return registry.HandlerResult{Intr: true}
}
fileName, err := readarg.ReadString(memFile, int64(req.Data.Args[0]))
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "err": err,
}).Error("Cannot read argument")
return
}
if fileName == filePattern {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"filename": fileName,
"file-pattern": filePattern,
@@ -50,7 +50,7 @@ func ExecCondition(filePattern string, duration time.Duration) registry.HandlerF
time.Sleep(duration)
} else {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"filename": fileName,
"file-pattern": filePattern,
@@ -66,7 +66,7 @@ func ExecSidecars(podCtx *kuberesolver.PodContext, sidecarsList string, duration
sidecars[sidecar] = struct{}{}
}
- return func(fd libseccomp.ScmpFd, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
+ return func(filter registry.Filter, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
// This handlers does not change the behaviour but just delay the return
result = registry.HandlerResult{Flags: libseccomp.NotifRespFlagContinue}
@@ -82,7 +82,7 @@ func ExecSidecars(podCtx *kuberesolver.PodContext, sidecarsList string, duration
// Sidecars can go on
if _, ok := sidecars[podCtx.Container]; ok {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"container": podCtx.Container,
}).Debug("Execve: found sidecar")
@@ -95,7 +95,7 @@ func ExecSidecars(podCtx *kuberesolver.PodContext, sidecarsList string, duration
err := unix.Stat(fmt.Sprintf("/proc/%d", req.Pid), &stat)
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"container": podCtx.Container,
}).Error("Execve: cannot read procfs")
@@ -105,7 +105,7 @@ func ExecSidecars(podCtx *kuberesolver.PodContext, sidecarsList string, duration
diff := ctime.Add(duration).Sub(time.Now())
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"container": podCtx.Container,
"ctime": ctime.String(),
diff --git a/pkg/handlers/mkdir.go b/pkg/handlers/mkdir.go
index 18cbc915..f2483019 100644
--- a/pkg/handlers/mkdir.go
+++ b/pkg/handlers/mkdir.go
@@ -37,23 +37,23 @@ func runMkdirInNamespaces(param []byte) string {
}
func MkdirWithSuffix(suffix string) registry.HandlerFunc {
- return func(fd libseccomp.ScmpFd, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
+ return func(filter registry.Filter, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
memFile, err := readarg.OpenMem(req.Pid)
if err != nil {
return registry.HandlerResult{Flags: libseccomp.NotifRespFlagContinue}
}
defer memFile.Close()
- if err := libseccomp.NotifIDValid(fd, req.ID); err != nil {
+ if err := filter.NotifIDValid(req); err != nil {
return registry.HandlerResultIntr()
}
fileName, err := readarg.ReadString(memFile, int64(req.Data.Args[0]))
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "err": err,
}).Error("Cannot read argument")
return registry.HandlerResultErrno(unix.EFAULT)
}
@@ -67,9 +67,9 @@ func MkdirWithSuffix(suffix string) registry.HandlerFunc {
mntns, err := nsenter.OpenNamespace(req.Pid, "mnt")
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "err": err,
}).Error("Cannot open namespace")
return registry.HandlerResultErrno(unix.EPERM)
}
@@ -78,9 +78,9 @@ func MkdirWithSuffix(suffix string) registry.HandlerFunc {
root, err := nsenter.OpenRoot(req.Pid)
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "err": err,
}).Error("Cannot open root")
return registry.HandlerResultErrno(unix.EPERM)
}
@@ -89,19 +89,19 @@ func MkdirWithSuffix(suffix string) registry.HandlerFunc {
cwd, err := nsenter.OpenCwd(req.Pid)
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "err": err,
}).Error("Cannot open cwd")
return registry.HandlerResultErrno(unix.EPERM)
}
defer cwd.Close()
- if err := libseccomp.NotifIDValid(fd, req.ID); err != nil {
+ if err := filter.NotifIDValid(req); err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "req": req,
- "err": err,
+ "filter": filter.Name(),
+ "req": req,
+ "err": err,
}).Debug("Notification no longer valid")
return registry.HandlerResultIntr()
}
@@ -109,7 +109,7 @@ func MkdirWithSuffix(suffix string) registry.HandlerFunc {
output, err := nsenter.Run(root, cwd, mntns, nil, nil, params)
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"output": output,
"err": err,
@@ -119,7 +119,7 @@ func MkdirWithSuffix(suffix string) registry.HandlerFunc {
errno, err := strconv.Atoi(string(output))
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"output": output,
"err": err,
diff --git a/pkg/handlers/mount.go b/pkg/handlers/mount.go
index e2529ad7..e4025b8b 100644
--- a/pkg/handlers/mount.go
+++ b/pkg/handlers/mount.go
@@ -40,50 +40,50 @@ func runMountInNamespaces(param []byte) string {
}
func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
- return func(fd libseccomp.ScmpFd, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
+ return func(filter registry.Filter, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
memFile, err := readarg.OpenMem(req.Pid)
if err != nil {
return registry.HandlerResultErrno(unix.EPERM)
}
defer memFile.Close()
- if err := libseccomp.NotifIDValid(fd, req.ID); err != nil {
+ if err := filter.NotifIDValid(req); err != nil {
return registry.HandlerResultIntr()
}
source, err := readarg.ReadString(memFile, int64(req.Data.Args[0]))
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "arg": 0,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "arg": 0,
+ "err": err,
}).Error("Cannot read argument")
return registry.HandlerResultErrno(unix.EFAULT)
}
dest, err := readarg.ReadString(memFile, int64(req.Data.Args[1]))
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "arg": 1,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "arg": 1,
+ "err": err,
}).Error("Cannot read argument")
return registry.HandlerResultErrno(unix.EFAULT)
}
filesystem, err := readarg.ReadString(memFile, int64(req.Data.Args[2]))
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "arg": 2,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "arg": 2,
+ "err": err,
}).Error("Cannot read argument")
return registry.HandlerResultErrno(unix.EFAULT)
}
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"source": source,
"dest": dest,
@@ -106,10 +106,10 @@ func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
mntns, err := nsenter.OpenNamespace(req.Pid, "mnt")
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "kind": "mnt",
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "kind": "mnt",
+ "err": err,
}).Error("Cannot open namespace")
return registry.HandlerResultErrno(unix.EPERM)
}
@@ -118,10 +118,10 @@ func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
netns, err := nsenter.OpenNamespace(req.Pid, "net")
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "kind": "net",
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "kind": "net",
+ "err": err,
}).Error("Cannot open namespace")
return registry.HandlerResultErrno(unix.EPERM)
}
@@ -130,10 +130,10 @@ func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
pidns, err := nsenter.OpenNamespace(req.Pid, "pid")
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "kind": "pid",
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "kind": "pid",
+ "err": err,
}).Error("Cannot open namespace")
return registry.HandlerResultErrno(unix.EPERM)
}
@@ -142,9 +142,9 @@ func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
root, err := nsenter.OpenRoot(req.Pid)
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "err": err,
}).Error("Cannot open root")
return registry.HandlerResultErrno(unix.EPERM)
}
@@ -153,19 +153,19 @@ func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
cwd, err := nsenter.OpenCwd(req.Pid)
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "pid": req.Pid,
- "err": err,
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "err": err,
}).Error("Cannot open cwd")
return registry.HandlerResultErrno(unix.EPERM)
}
defer cwd.Close()
- if err := libseccomp.NotifIDValid(fd, req.ID); err != nil {
+ if err := filter.NotifIDValid(req); err != nil {
log.WithFields(log.Fields{
- "fd": fd,
- "req": req,
- "err": err,
+ "filter": filter.Name(),
+ "req": req,
+ "err": err,
}).Debug("Notification no longer valid")
return registry.HandlerResultIntr()
}
@@ -173,7 +173,7 @@ func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
output, err := nsenter.Run(root, cwd, mntns, netns, pidns, params)
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"output": output,
"err": err,
@@ -183,7 +183,7 @@ func Mount(allowedFilesystems map[string]struct{}) registry.HandlerFunc {
errno, err := strconv.Atoi(string(output))
if err != nil {
log.WithFields(log.Fields{
- "fd": fd,
+ "filter": filter.Name(),
"pid": req.Pid,
"output": output,
"err": err,
diff --git a/pkg/kuberesolver/kuberesolver.go b/pkg/kuberesolver/kuberesolver.go
index 15ab1b09..9f3016de 100644
--- a/pkg/kuberesolver/kuberesolver.go
+++ b/pkg/kuberesolver/kuberesolver.go
@@ -1,7 +1,7 @@
package kuberesolver
import (
- "errors"
+ "fmt"
"os"
"strings"
@@ -44,7 +44,7 @@ type PodContext struct {
Pid1 int
}
-type KubeResolverFunc func(pod *PodContext, metadata map[string]string) *registry.Registry
+type KubeResolverFunc func(pod *PodContext, metadata map[string]string) registry.Filter
func parseKV(metadata string) map[string]string {
vars := map[string]string{}
@@ -88,10 +88,10 @@ func KubeResolver(f KubeResolverFunc) (registry.ResolverFunc, error) {
nodeName := os.Getenv("NODE_NAME")
k8sClient, err := k8s.NewK8sClient(nodeName)
if err != nil {
- return nil, errors.New("cannot create kubernetes client")
+ return nil, fmt.Errorf("cannot create kubernetes client: %v", err)
}
- return func(state *specs.ContainerProcessState) *registry.Registry {
+ return func(state *specs.ContainerProcessState) registry.Filter {
vars := parseKV(state.Metadata)
podCtx := readAnnotations(state.State.Annotations)
diff --git a/pkg/opa/eval.go b/pkg/opa/eval.go
new file mode 100644
index 00000000..10d191cc
--- /dev/null
+++ b/pkg/opa/eval.go
@@ -0,0 +1,217 @@
+package opa
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "text/template"
+
+ "github.com/kinvolk/seccompagent/pkg/handlers"
+ "github.com/kinvolk/seccompagent/pkg/kuberesolver"
+ "github.com/kinvolk/seccompagent/pkg/registry"
+ "github.com/open-policy-agent/opa/rego"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+ log "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+func eval(filter registry.Filter, req *libseccomp.ScmpNotifReq,
+ policy string, podCtx *kuberesolver.PodContext,
+ syscallName string, args [6]string) (result registry.HandlerResult) {
+
+ result = registry.HandlerResult{Flags: libseccomp.NotifRespFlagContinue}
+
+ input := map[string]interface{}{
+ "syscall": syscallName,
+ "arg0": args[0],
+ "arg1": args[1],
+ "arg2": args[2],
+ "arg3": args[3],
+ "arg4": args[4],
+ "arg5": args[5],
+ "pod": map[string]interface{}{
+ "namespace": podCtx.Namespace,
+ "name": podCtx.Pod,
+ "container": podCtx.Container,
+ },
+ }
+
+ ctx := context.TODO()
+ opaActionsQuery, err := rego.New(
+ rego.Query("actions = data.syscall.authz.action"),
+ rego.Module("example.rego", policy),
+ ).PrepareForEval(ctx)
+ if err != nil {
+ log.WithFields(log.Fields{
+ "arg": args,
+ "input": input,
+ "rego": policy,
+ "err": err,
+ }).Error("Cannot prepare rego for allow evaluation")
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ return
+ }
+
+ results, err := opaActionsQuery.Eval(ctx, rego.EvalInput(input))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ "err": err,
+ }).Error("OPA evaluation error")
+
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ return
+ }
+
+ if len(results) == 0 {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ "err": err,
+ }).Error("OPA undefined result")
+
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ return
+ }
+
+ actions, ok := results[0].Bindings["actions"].([]interface{})
+ if !ok {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "results": results,
+ "actions": fmt.Sprintf("actions=%T", results[0].Bindings["actions"]),
+ "input": input,
+ "err": err,
+ }).Error("OPA unexpected result type")
+
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ return
+ }
+ if len(actions) == 0 {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "results": results,
+ "input": input,
+ "err": err,
+ }).Error("OPA no actions found")
+
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ return
+ }
+ action, ok := actions[0].(map[string]interface{})
+ if !ok {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "results": results,
+ "input": input,
+ "err": err,
+ }).Error("OPA unexpected action type found")
+
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ return
+ }
+
+ // Handle result/decision.
+ // fmt.Printf("%+v", results) => [{Expressions:[true] Bindings:map[x:true]}]
+ if passthrough, ok := action["passthrough"]; ok && passthrough.(bool) {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ }).Trace("OPA action: passthrough")
+
+ result = registry.HandlerResultContinue()
+ return
+ }
+
+ if errno, ok := action["errno"]; ok && errno != "" {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ "errno": errno,
+ }).Trace("OPA action: errno")
+
+ result = registry.HandlerResultErrno(unix.EPERM)
+ return
+ }
+
+ if handler, ok := action["handler"]; ok && handler != "" {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ "handler": handler,
+ }).Trace("OPA action: handler")
+
+ switch handler {
+ case "mkdir":
+ var suffix strings.Builder
+ if tmplStr, ok := action["suffix"].(string); ok {
+ tmpl, err := template.New("mkdirTmpl").Parse(tmplStr)
+ if err == nil {
+ err = tmpl.Execute(&suffix, podCtx)
+ if err == nil {
+ result = handlers.MkdirWithSuffix(suffix.String())(filter, req)
+ } else {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ "handler": handler,
+ "tmpl": tmplStr,
+ "err": err,
+ }).Error("OPA: cannot execute template for mkdir suffix")
+
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ }
+ } else {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ "handler": handler,
+ "tmpl": tmplStr,
+ "err": err,
+ }).Error("OPA: cannot parse template for mkdir suffix")
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ }
+ } else {
+ log.WithFields(log.Fields{
+ "syscall": syscallName,
+ "args": args,
+ "rego": policy,
+ "input": input,
+ "handler": handler,
+ }).Error("OPA: mkdir suffix not found")
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ }
+ case "mount":
+ allowedFilesystems := map[string]struct{}{args[2]: struct{}{}}
+ result = handlers.Mount(allowedFilesystems)(filter, req)
+ default:
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ }
+ return
+ }
+
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+
+ return
+}
diff --git a/pkg/opa/registry.go b/pkg/opa/registry.go
new file mode 100644
index 00000000..64328c8d
--- /dev/null
+++ b/pkg/opa/registry.go
@@ -0,0 +1,125 @@
+package opa
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/kinvolk/seccompagent/pkg/kuberesolver"
+ "github.com/kinvolk/seccompagent/pkg/readarg"
+ "github.com/kinvolk/seccompagent/pkg/registry"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+ log "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+type OpaFilter struct {
+ seccompFile *os.File
+ podCtx *kuberesolver.PodContext
+}
+
+func NewOpaFilter(podCtx *kuberesolver.PodContext) *OpaFilter {
+ return &OpaFilter{
+ podCtx: podCtx,
+ }
+}
+
+func (f *OpaFilter) Name() (name string) {
+ if f.seccompFile != nil {
+ name = f.seccompFile.Name()
+ }
+ return
+}
+
+func (f *OpaFilter) ShortName() (name string) {
+ if f.seccompFile != nil {
+ name = fmt.Sprintf("%d", f.seccompFile.Fd())
+ }
+ return
+}
+
+func (f *OpaFilter) SetSeccompFile(file *os.File) {
+ f.seccompFile = file
+}
+
+func (f *OpaFilter) SeccompFile() *os.File {
+ return f.seccompFile
+}
+
+func (f *OpaFilter) AddHandler(syscallName string, h registry.HandlerFunc) {
+}
+
+var syscallArgs = map[string][6]bool{
+ "mkdir": [6]bool{true, false, false, false, false, false},
+ "mount": [6]bool{true, true, true, false, false, false},
+}
+
+func (f *OpaFilter) LookupHandler(syscallName string) (h registry.HandlerFunc, ok bool) {
+ handler := func(filter registry.Filter, req *libseccomp.ScmpNotifReq) (result registry.HandlerResult) {
+ result = registry.HandlerResult{Flags: libseccomp.NotifRespFlagContinue}
+
+ memFile, err := readarg.OpenMem(req.Pid)
+ if err != nil {
+ return
+ }
+ defer memFile.Close()
+
+ if err := filter.NotifIDValid(req); err != nil {
+ return registry.HandlerResult{Intr: true}
+ }
+
+ var args [6]string
+ if argsSet, ok := syscallArgs[syscallName]; ok {
+ for i := 0; i < 6; i++ {
+ if !argsSet[i] {
+ continue
+ }
+
+ args[i], err = readarg.ReadString(memFile, int64(req.Data.Args[i]))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "filter": filter.Name(),
+ "pid": req.Pid,
+ "i": i,
+ "err": err,
+ }).Error("Cannot read argument")
+ return
+ }
+ }
+ }
+
+ content, err := ioutil.ReadFile("/etc/seccomp-agent/policies.rego")
+ if err != nil {
+ log.WithFields(log.Fields{
+ "err": err,
+ }).Error("Cannot read policies.rego")
+ result = registry.HandlerResultErrno(unix.ENOSYS)
+ return
+ }
+ policy := string(content)
+
+ result = eval(filter, req, policy, f.podCtx, syscallName, args)
+
+ log.WithFields(log.Fields{
+ "podCtx": f.podCtx,
+ "syscall": syscallName,
+ "args": args,
+ "result": result,
+ }).Trace("Result from OPA query")
+
+ return
+ }
+ return handler, true
+}
+
+func (f *OpaFilter) NotifIDValid(req *libseccomp.ScmpNotifReq) error {
+ return libseccomp.NotifIDValid(libseccomp.ScmpFd(f.seccompFile.Fd()), req.ID)
+
+}
+
+func (f *OpaFilter) SetValue(key string, val interface{}) {
+}
+
+func (f *OpaFilter) Value(key string) interface{} {
+ return nil
+}
diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go
index 33c80f64..87becde7 100644
--- a/pkg/registry/registry.go
+++ b/pkg/registry/registry.go
@@ -1,6 +1,9 @@
package registry
import (
+ "fmt"
+ "os"
+
specs "github.com/opencontainers/runtime-spec/specs-go"
libseccomp "github.com/seccomp/libseccomp-golang"
"golang.org/x/sys/unix"
@@ -13,7 +16,8 @@ type HandlerResult struct {
Flags uint32
}
-type HandlerFunc func(libseccomp.ScmpFd, *libseccomp.ScmpNotifReq) HandlerResult
+type ResolverFunc func(state *specs.ContainerProcessState) Filter
+type HandlerFunc func(Filter, *libseccomp.ScmpNotifReq) HandlerResult
// Helper functions for handlers
func HandlerResultIntr() HandlerResult {
@@ -40,20 +44,75 @@ func HandlerResultSuccess() HandlerResult {
return HandlerResult{}
}
-// Registry
+// Filter contains a set of handlers for a specific seccomp filter
+type Filter interface {
+ Name() string
+ ShortName() string
+
+ SetSeccompFile(file *os.File)
+ SeccompFile() *os.File
+
+ NotifIDValid(*libseccomp.ScmpNotifReq) error
+
+ AddHandler(string, HandlerFunc)
+ LookupHandler(string) (HandlerFunc, bool)
+
+ SetValue(key string, val interface{})
+ Value(key string) interface{}
+}
+
+type SimpleFilter struct {
+ seccompFile *os.File
+ syscallHandler map[string]HandlerFunc
+ values map[string]interface{}
+}
-type Registry struct {
- SyscallHandler map[string]HandlerFunc
+func NewSimpleFilter() *SimpleFilter {
+ return &SimpleFilter{
+ syscallHandler: map[string]HandlerFunc{},
+ values: map[string]interface{}{},
+ }
}
-type ResolverFunc func(state *specs.ContainerProcessState) *Registry
+func (f *SimpleFilter) Name() (name string) {
+ if f.seccompFile != nil {
+ name = f.seccompFile.Name()
+ }
+ return
+}
-func New() *Registry {
- return &Registry{
- SyscallHandler: map[string]HandlerFunc{},
+func (f *SimpleFilter) ShortName() (name string) {
+ if f.seccompFile != nil {
+ name = fmt.Sprintf("%d", f.seccompFile.Fd())
}
+ return
+}
+
+func (f *SimpleFilter) SetSeccompFile(file *os.File) {
+ f.seccompFile = file
+}
+
+func (f *SimpleFilter) SeccompFile() *os.File {
+ return f.seccompFile
+}
+
+func (f *SimpleFilter) AddHandler(syscallName string, h HandlerFunc) {
+ f.syscallHandler[syscallName] = h
+}
+func (f *SimpleFilter) LookupHandler(syscallName string) (h HandlerFunc, ok bool) {
+ h, ok = f.syscallHandler[syscallName]
+ return
+}
+
+func (f *SimpleFilter) NotifIDValid(req *libseccomp.ScmpNotifReq) error {
+ return libseccomp.NotifIDValid(libseccomp.ScmpFd(f.seccompFile.Fd()), req.ID)
+
+}
+
+func (f *SimpleFilter) SetValue(key string, val interface{}) {
+ f.values[key] = val
}
-func (r *Registry) Add(name string, f HandlerFunc) {
- r.SyscallHandler[name] = f
+func (f *SimpleFilter) Value(key string) interface{} {
+ return f.values[key]
}
diff --git a/vendor/github.com/OneOfOne/xxhash/.gitignore b/vendor/github.com/OneOfOne/xxhash/.gitignore
new file mode 100644
index 00000000..f4faa7f8
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/.gitignore
@@ -0,0 +1,4 @@
+*.txt
+*.pprof
+cmap2/
+cache/
diff --git a/vendor/github.com/OneOfOne/xxhash/.travis.yml b/vendor/github.com/OneOfOne/xxhash/.travis.yml
new file mode 100644
index 00000000..1c6dc55b
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+sudo: false
+
+go:
+ - "1.10"
+ - "1.11"
+ - "1.12"
+ - master
+
+script:
+ - go test -tags safe ./...
+ - go test ./...
+ -
diff --git a/vendor/github.com/OneOfOne/xxhash/LICENSE b/vendor/github.com/OneOfOne/xxhash/LICENSE
new file mode 100644
index 00000000..9e30b4f3
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/LICENSE
@@ -0,0 +1,187 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
diff --git a/vendor/github.com/OneOfOne/xxhash/README.md b/vendor/github.com/OneOfOne/xxhash/README.md
new file mode 100644
index 00000000..8eea28c3
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/README.md
@@ -0,0 +1,74 @@
+# xxhash [](https://godoc.org/github.com/OneOfOne/xxhash) [](https://travis-ci.org/OneOfOne/xxhash) [](https://gocover.io/github.com/OneOfOne/xxhash)
+
+This is a native Go implementation of the excellent [xxhash](https://github.com/Cyan4973/xxHash)* algorithm, an extremely fast non-cryptographic Hash algorithm, working at speeds close to RAM limits.
+
+* The C implementation is ([Copyright](https://github.com/Cyan4973/xxHash/blob/master/LICENSE) (c) 2012-2014, Yann Collet)
+
+## Install
+
+ go get github.com/OneOfOne/xxhash
+
+## Features
+
+* On Go 1.7+ the pure go version is faster than CGO for all inputs.
+* Supports ChecksumString{32,64} xxhash{32,64}.WriteString, which uses no copies when it can, falls back to copy on appengine.
+* The native version falls back to a less optimized version on appengine due to the lack of unsafe.
+* Almost as fast as the mostly pure assembly version written by the brilliant [cespare](https://github.com/cespare/xxhash), while also supporting seeds.
+* To manually toggle the appengine version build with `-tags safe`.
+
+## Benchmark
+
+### Core i7-4790 @ 3.60GHz, Linux 4.12.6-1-ARCH (64bit), Go tip (+ff90f4af66 2017-08-19)
+
+```bash
+➤ go test -bench '64' -count 5 -tags cespare | benchstat /dev/stdin
+name time/op
+
+# https://github.com/cespare/xxhash
+XXSum64Cespare/Func-8 160ns ± 2%
+XXSum64Cespare/Struct-8 173ns ± 1%
+XXSum64ShortCespare/Func-8 6.78ns ± 1%
+XXSum64ShortCespare/Struct-8 19.6ns ± 2%
+
+# this package (default mode, using unsafe)
+XXSum64/Func-8 170ns ± 1%
+XXSum64/Struct-8 182ns ± 1%
+XXSum64Short/Func-8 13.5ns ± 3%
+XXSum64Short/Struct-8 20.4ns ± 0%
+
+# this package (appengine, *not* using unsafe)
+XXSum64/Func-8 241ns ± 5%
+XXSum64/Struct-8 243ns ± 6%
+XXSum64Short/Func-8 15.2ns ± 2%
+XXSum64Short/Struct-8 23.7ns ± 5%
+
+CRC64ISO-8 1.23µs ± 1%
+CRC64ISOString-8 2.71µs ± 4%
+CRC64ISOShort-8 22.2ns ± 3%
+
+Fnv64-8 2.34µs ± 1%
+Fnv64Short-8 74.7ns ± 8%
+```
+
+## Usage
+
+```go
+ h := xxhash.New64()
+ // r, err := os.Open("......")
+ // defer f.Close()
+ r := strings.NewReader(F)
+ io.Copy(h, r)
+ fmt.Println("xxhash.Backend:", xxhash.Backend)
+ fmt.Println("File checksum:", h.Sum64())
+```
+
+[playground ](https://play.golang.org/p/wHKBwfu6CPV)
+
+## TODO
+
+* Rewrite the 32bit version to be more optimized.
+* General cleanup as the Go inliner gets smarter.
+
+## License
+
+This project is released under the Apache v2. license. See [LICENSE](LICENSE) for more details.
diff --git a/vendor/github.com/OneOfOne/xxhash/go.mod b/vendor/github.com/OneOfOne/xxhash/go.mod
new file mode 100644
index 00000000..c6da85e0
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/go.mod
@@ -0,0 +1,3 @@
+module github.com/OneOfOne/xxhash
+
+go 1.11
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash.go b/vendor/github.com/OneOfOne/xxhash/xxhash.go
new file mode 100644
index 00000000..af2496b7
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/xxhash.go
@@ -0,0 +1,294 @@
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "hash"
+)
+
+const (
+ prime32x1 uint32 = 2654435761
+ prime32x2 uint32 = 2246822519
+ prime32x3 uint32 = 3266489917
+ prime32x4 uint32 = 668265263
+ prime32x5 uint32 = 374761393
+
+ prime64x1 uint64 = 11400714785074694791
+ prime64x2 uint64 = 14029467366897019727
+ prime64x3 uint64 = 1609587929392839161
+ prime64x4 uint64 = 9650029242287828579
+ prime64x5 uint64 = 2870177450012600261
+
+ maxInt32 int32 = (1<<31 - 1)
+
+ // precomputed zero Vs for seed 0
+ zero64x1 = 0x60ea27eeadc0b5d6
+ zero64x2 = 0xc2b2ae3d27d4eb4f
+ zero64x3 = 0x0
+ zero64x4 = 0x61c8864e7a143579
+)
+
+const (
+ magic32 = "xxh\x07"
+ magic64 = "xxh\x08"
+ marshaled32Size = len(magic32) + 4*7 + 16
+ marshaled64Size = len(magic64) + 8*6 + 32 + 1
+)
+
+func NewHash32() hash.Hash { return New32() }
+func NewHash64() hash.Hash { return New64() }
+
+// Checksum32 returns the checksum of the input data with the seed set to 0.
+func Checksum32(in []byte) uint32 {
+ return Checksum32S(in, 0)
+}
+
+// ChecksumString32 returns the checksum of the input data, without creating a copy, with the seed set to 0.
+func ChecksumString32(s string) uint32 {
+ return ChecksumString32S(s, 0)
+}
+
+type XXHash32 struct {
+ mem [16]byte
+ ln, memIdx int32
+ v1, v2, v3, v4 uint32
+ seed uint32
+}
+
+// Size returns the number of bytes Sum will return.
+func (xx *XXHash32) Size() int {
+ return 4
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (xx *XXHash32) BlockSize() int {
+ return 16
+}
+
+// NewS32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the specific seed.
+func NewS32(seed uint32) (xx *XXHash32) {
+ xx = &XXHash32{
+ seed: seed,
+ }
+ xx.Reset()
+ return
+}
+
+// New32 creates a new hash.Hash32 computing the 32bit xxHash checksum starting with the seed set to 0.
+func New32() *XXHash32 {
+ return NewS32(0)
+}
+
+func (xx *XXHash32) Reset() {
+ xx.v1 = xx.seed + prime32x1 + prime32x2
+ xx.v2 = xx.seed + prime32x2
+ xx.v3 = xx.seed
+ xx.v4 = xx.seed - prime32x1
+ xx.ln, xx.memIdx = 0, 0
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xx *XXHash32) Sum(in []byte) []byte {
+ s := xx.Sum32()
+ return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (xx *XXHash32) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaled32Size)
+ b = append(b, magic32...)
+ b = appendUint32(b, xx.v1)
+ b = appendUint32(b, xx.v2)
+ b = appendUint32(b, xx.v3)
+ b = appendUint32(b, xx.v4)
+ b = appendUint32(b, xx.seed)
+ b = appendInt32(b, xx.ln)
+ b = appendInt32(b, xx.memIdx)
+ b = append(b, xx.mem[:]...)
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (xx *XXHash32) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic32) || string(b[:len(magic32)]) != magic32 {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaled32Size {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic32):]
+ b, xx.v1 = consumeUint32(b)
+ b, xx.v2 = consumeUint32(b)
+ b, xx.v3 = consumeUint32(b)
+ b, xx.v4 = consumeUint32(b)
+ b, xx.seed = consumeUint32(b)
+ b, xx.ln = consumeInt32(b)
+ b, xx.memIdx = consumeInt32(b)
+ copy(xx.mem[:], b)
+ return nil
+}
+
+// Checksum64 an alias for Checksum64S(in, 0)
+func Checksum64(in []byte) uint64 {
+ return Checksum64S(in, 0)
+}
+
+// ChecksumString64 returns the checksum of the input data, without creating a copy, with the seed set to 0.
+func ChecksumString64(s string) uint64 {
+ return ChecksumString64S(s, 0)
+}
+
+type XXHash64 struct {
+ v1, v2, v3, v4 uint64
+ seed uint64
+ ln uint64
+ mem [32]byte
+ memIdx int8
+}
+
+// Size returns the number of bytes Sum will return.
+func (xx *XXHash64) Size() int {
+ return 8
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (xx *XXHash64) BlockSize() int {
+ return 32
+}
+
+// NewS64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the specific seed.
+func NewS64(seed uint64) (xx *XXHash64) {
+ xx = &XXHash64{
+ seed: seed,
+ }
+ xx.Reset()
+ return
+}
+
+// New64 creates a new hash.Hash64 computing the 64bit xxHash checksum starting with the seed set to 0x0.
+func New64() *XXHash64 {
+ return NewS64(0)
+}
+
+func (xx *XXHash64) Reset() {
+ xx.ln, xx.memIdx = 0, 0
+ xx.v1, xx.v2, xx.v3, xx.v4 = resetVs64(xx.seed)
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xx *XXHash64) Sum(in []byte) []byte {
+ s := xx.Sum64()
+ return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (xx *XXHash64) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaled64Size)
+ b = append(b, magic64...)
+ b = appendUint64(b, xx.v1)
+ b = appendUint64(b, xx.v2)
+ b = appendUint64(b, xx.v3)
+ b = appendUint64(b, xx.v4)
+ b = appendUint64(b, xx.seed)
+ b = appendUint64(b, xx.ln)
+ b = append(b, byte(xx.memIdx))
+ b = append(b, xx.mem[:]...)
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (xx *XXHash64) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic64) || string(b[:len(magic64)]) != magic64 {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaled64Size {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic64):]
+ b, xx.v1 = consumeUint64(b)
+ b, xx.v2 = consumeUint64(b)
+ b, xx.v3 = consumeUint64(b)
+ b, xx.v4 = consumeUint64(b)
+ b, xx.seed = consumeUint64(b)
+ b, xx.ln = consumeUint64(b)
+ xx.memIdx = int8(b[0])
+ b = b[1:]
+ copy(xx.mem[:], b)
+ return nil
+}
+
+func appendInt32(b []byte, x int32) []byte { return appendUint32(b, uint32(x)) }
+
+func appendUint32(b []byte, x uint32) []byte {
+ var a [4]byte
+ binary.LittleEndian.PutUint32(a[:], x)
+ return append(b, a[:]...)
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeInt32(b []byte) ([]byte, int32) { bn, x := consumeUint32(b); return bn, int32(x) }
+func consumeUint32(b []byte) ([]byte, uint32) { x := u32(b); return b[4:], x }
+func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b); return b[8:], x }
+
+// force the compiler to use ROTL instructions
+
+func rotl32_1(x uint32) uint32 { return (x << 1) | (x >> (32 - 1)) }
+func rotl32_7(x uint32) uint32 { return (x << 7) | (x >> (32 - 7)) }
+func rotl32_11(x uint32) uint32 { return (x << 11) | (x >> (32 - 11)) }
+func rotl32_12(x uint32) uint32 { return (x << 12) | (x >> (32 - 12)) }
+func rotl32_13(x uint32) uint32 { return (x << 13) | (x >> (32 - 13)) }
+func rotl32_17(x uint32) uint32 { return (x << 17) | (x >> (32 - 17)) }
+func rotl32_18(x uint32) uint32 { return (x << 18) | (x >> (32 - 18)) }
+
+func rotl64_1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
+func rotl64_7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
+func rotl64_11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
+func rotl64_12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
+func rotl64_18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
+func rotl64_23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
+func rotl64_27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
+func rotl64_31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
+
+func mix64(h uint64) uint64 {
+ h ^= h >> 33
+ h *= prime64x2
+ h ^= h >> 29
+ h *= prime64x3
+ h ^= h >> 32
+ return h
+}
+
+func resetVs64(seed uint64) (v1, v2, v3, v4 uint64) {
+ if seed == 0 {
+ return zero64x1, zero64x2, zero64x3, zero64x4
+ }
+ return (seed + prime64x1 + prime64x2), (seed + prime64x2), (seed), (seed - prime64x1)
+}
+
+// borrowed from cespare
+func round64(h, v uint64) uint64 {
+ h += v * prime64x2
+ h = rotl64_31(h)
+ h *= prime64x1
+ return h
+}
+
+func mergeRound64(h, v uint64) uint64 {
+ v = round64(0, v)
+ h ^= v
+ h = h*prime64x1 + prime64x4
+ return h
+}
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go b/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
new file mode 100644
index 00000000..ae48e0c5
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/xxhash_go17.go
@@ -0,0 +1,161 @@
+package xxhash
+
+func u32(in []byte) uint32 {
+ return uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+}
+
+func u64(in []byte) uint64 {
+ return uint64(in[0]) | uint64(in[1])<<8 | uint64(in[2])<<16 | uint64(in[3])<<24 | uint64(in[4])<<32 | uint64(in[5])<<40 | uint64(in[6])<<48 | uint64(in[7])<<56
+}
+
+// Checksum32S returns the checksum of the input bytes with the specific seed.
+func Checksum32S(in []byte, seed uint32) (h uint32) {
+ var i int
+
+ if len(in) > 15 {
+ var (
+ v1 = seed + prime32x1 + prime32x2
+ v2 = seed + prime32x2
+ v3 = seed + 0
+ v4 = seed - prime32x1
+ )
+ for ; i < len(in)-15; i += 16 {
+ in := in[i : i+16 : len(in)]
+ v1 += u32(in[0:4:len(in)]) * prime32x2
+ v1 = rotl32_13(v1) * prime32x1
+
+ v2 += u32(in[4:8:len(in)]) * prime32x2
+ v2 = rotl32_13(v2) * prime32x1
+
+ v3 += u32(in[8:12:len(in)]) * prime32x2
+ v3 = rotl32_13(v3) * prime32x1
+
+ v4 += u32(in[12:16:len(in)]) * prime32x2
+ v4 = rotl32_13(v4) * prime32x1
+ }
+
+ h = rotl32_1(v1) + rotl32_7(v2) + rotl32_12(v3) + rotl32_18(v4)
+
+ } else {
+ h = seed + prime32x5
+ }
+
+ h += uint32(len(in))
+ for ; i <= len(in)-4; i += 4 {
+ in := in[i : i+4 : len(in)]
+ h += u32(in[0:4:len(in)]) * prime32x3
+ h = rotl32_17(h) * prime32x4
+ }
+
+ for ; i < len(in); i++ {
+ h += uint32(in[i]) * prime32x5
+ h = rotl32_11(h) * prime32x1
+ }
+
+ h ^= h >> 15
+ h *= prime32x2
+ h ^= h >> 13
+ h *= prime32x3
+ h ^= h >> 16
+
+ return
+}
+
+func (xx *XXHash32) Write(in []byte) (n int, err error) {
+ i, ml := 0, int(xx.memIdx)
+ n = len(in)
+ xx.ln += int32(n)
+
+ if d := 16 - ml; ml > 0 && ml+len(in) > 16 {
+ xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[:d]))
+ ml, in = 16, in[d:len(in):len(in)]
+ } else if ml+len(in) < 16 {
+ xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in))
+ return
+ }
+
+ if ml > 0 {
+ i += 16 - ml
+ xx.memIdx += int32(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
+ in := xx.mem[:16:len(xx.mem)]
+
+ xx.v1 += u32(in[0:4:len(in)]) * prime32x2
+ xx.v1 = rotl32_13(xx.v1) * prime32x1
+
+ xx.v2 += u32(in[4:8:len(in)]) * prime32x2
+ xx.v2 = rotl32_13(xx.v2) * prime32x1
+
+ xx.v3 += u32(in[8:12:len(in)]) * prime32x2
+ xx.v3 = rotl32_13(xx.v3) * prime32x1
+
+ xx.v4 += u32(in[12:16:len(in)]) * prime32x2
+ xx.v4 = rotl32_13(xx.v4) * prime32x1
+
+ xx.memIdx = 0
+ }
+
+ for ; i <= len(in)-16; i += 16 {
+ in := in[i : i+16 : len(in)]
+ xx.v1 += u32(in[0:4:len(in)]) * prime32x2
+ xx.v1 = rotl32_13(xx.v1) * prime32x1
+
+ xx.v2 += u32(in[4:8:len(in)]) * prime32x2
+ xx.v2 = rotl32_13(xx.v2) * prime32x1
+
+ xx.v3 += u32(in[8:12:len(in)]) * prime32x2
+ xx.v3 = rotl32_13(xx.v3) * prime32x1
+
+ xx.v4 += u32(in[12:16:len(in)]) * prime32x2
+ xx.v4 = rotl32_13(xx.v4) * prime32x1
+ }
+
+ if len(in)-i != 0 {
+ xx.memIdx += int32(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
+ }
+
+ return
+}
+
+func (xx *XXHash32) Sum32() (h uint32) {
+ var i int32
+ if xx.ln > 15 {
+ h = rotl32_1(xx.v1) + rotl32_7(xx.v2) + rotl32_12(xx.v3) + rotl32_18(xx.v4)
+ } else {
+ h = xx.seed + prime32x5
+ }
+
+ h += uint32(xx.ln)
+
+ if xx.memIdx > 0 {
+ for ; i < xx.memIdx-3; i += 4 {
+ in := xx.mem[i : i+4 : len(xx.mem)]
+ h += u32(in[0:4:len(in)]) * prime32x3
+ h = rotl32_17(h) * prime32x4
+ }
+
+ for ; i < xx.memIdx; i++ {
+ h += uint32(xx.mem[i]) * prime32x5
+ h = rotl32_11(h) * prime32x1
+ }
+ }
+ h ^= h >> 15
+ h *= prime32x2
+ h ^= h >> 13
+ h *= prime32x3
+ h ^= h >> 16
+
+ return
+}
+
+// Checksum64S returns the 64bit xxhash checksum for a single input
+func Checksum64S(in []byte, seed uint64) uint64 {
+ if len(in) == 0 && seed == 0 {
+ return 0xef46db3751d8e999
+ }
+
+ if len(in) > 31 {
+ return checksum64(in, seed)
+ }
+
+ return checksum64Short(in, seed)
+}
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
new file mode 100644
index 00000000..e92ec29e
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/xxhash_safe.go
@@ -0,0 +1,183 @@
+// +build appengine safe ppc64le ppc64be mipsle mips s390x
+
+package xxhash
+
+// Backend returns the current version of xxhash being used.
+const Backend = "GoSafe"
+
+func ChecksumString32S(s string, seed uint32) uint32 {
+ return Checksum32S([]byte(s), seed)
+}
+
+func (xx *XXHash32) WriteString(s string) (int, error) {
+ if len(s) == 0 {
+ return 0, nil
+ }
+ return xx.Write([]byte(s))
+}
+
+func ChecksumString64S(s string, seed uint64) uint64 {
+ return Checksum64S([]byte(s), seed)
+}
+
+func (xx *XXHash64) WriteString(s string) (int, error) {
+ if len(s) == 0 {
+ return 0, nil
+ }
+ return xx.Write([]byte(s))
+}
+
+func checksum64(in []byte, seed uint64) (h uint64) {
+ var (
+ v1, v2, v3, v4 = resetVs64(seed)
+
+ i int
+ )
+
+ for ; i < len(in)-31; i += 32 {
+ in := in[i : i+32 : len(in)]
+ v1 = round64(v1, u64(in[0:8:len(in)]))
+ v2 = round64(v2, u64(in[8:16:len(in)]))
+ v3 = round64(v3, u64(in[16:24:len(in)]))
+ v4 = round64(v4, u64(in[24:32:len(in)]))
+ }
+
+ h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
+
+ h = mergeRound64(h, v1)
+ h = mergeRound64(h, v2)
+ h = mergeRound64(h, v3)
+ h = mergeRound64(h, v4)
+
+ h += uint64(len(in))
+
+ for ; i < len(in)-7; i += 8 {
+ h ^= round64(0, u64(in[i:len(in):len(in)]))
+ h = rotl64_27(h)*prime64x1 + prime64x4
+ }
+
+ for ; i < len(in)-3; i += 4 {
+ h ^= uint64(u32(in[i:len(in):len(in)])) * prime64x1
+ h = rotl64_23(h)*prime64x2 + prime64x3
+ }
+
+ for ; i < len(in); i++ {
+ h ^= uint64(in[i]) * prime64x5
+ h = rotl64_11(h) * prime64x1
+ }
+
+ return mix64(h)
+}
+
+func checksum64Short(in []byte, seed uint64) uint64 {
+ var (
+ h = seed + prime64x5 + uint64(len(in))
+ i int
+ )
+
+ for ; i < len(in)-7; i += 8 {
+ k := u64(in[i : i+8 : len(in)])
+ h ^= round64(0, k)
+ h = rotl64_27(h)*prime64x1 + prime64x4
+ }
+
+ for ; i < len(in)-3; i += 4 {
+ h ^= uint64(u32(in[i:i+4:len(in)])) * prime64x1
+ h = rotl64_23(h)*prime64x2 + prime64x3
+ }
+
+ for ; i < len(in); i++ {
+ h ^= uint64(in[i]) * prime64x5
+ h = rotl64_11(h) * prime64x1
+ }
+
+ return mix64(h)
+}
+
+func (xx *XXHash64) Write(in []byte) (n int, err error) {
+ var (
+ ml = int(xx.memIdx)
+ d = 32 - ml
+ )
+
+ n = len(in)
+ xx.ln += uint64(n)
+
+ if ml+len(in) < 32 {
+ xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in))
+ return
+ }
+
+ i, v1, v2, v3, v4 := 0, xx.v1, xx.v2, xx.v3, xx.v4
+ if ml > 0 && ml+len(in) > 32 {
+ xx.memIdx += int8(copy(xx.mem[xx.memIdx:len(xx.mem):len(xx.mem)], in[:d:len(in)]))
+ in = in[d:len(in):len(in)]
+
+ in := xx.mem[0:32:len(xx.mem)]
+
+ v1 = round64(v1, u64(in[0:8:len(in)]))
+ v2 = round64(v2, u64(in[8:16:len(in)]))
+ v3 = round64(v3, u64(in[16:24:len(in)]))
+ v4 = round64(v4, u64(in[24:32:len(in)]))
+
+ xx.memIdx = 0
+ }
+
+ for ; i < len(in)-31; i += 32 {
+ in := in[i : i+32 : len(in)]
+ v1 = round64(v1, u64(in[0:8:len(in)]))
+ v2 = round64(v2, u64(in[8:16:len(in)]))
+ v3 = round64(v3, u64(in[16:24:len(in)]))
+ v4 = round64(v4, u64(in[24:32:len(in)]))
+ }
+
+ if len(in)-i != 0 {
+ xx.memIdx += int8(copy(xx.mem[xx.memIdx:], in[i:len(in):len(in)]))
+ }
+
+ xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
+
+ return
+}
+
+func (xx *XXHash64) Sum64() (h uint64) {
+ var i int
+ if xx.ln > 31 {
+ v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
+ h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
+
+ h = mergeRound64(h, v1)
+ h = mergeRound64(h, v2)
+ h = mergeRound64(h, v3)
+ h = mergeRound64(h, v4)
+ } else {
+ h = xx.seed + prime64x5
+ }
+
+ h += uint64(xx.ln)
+ if xx.memIdx > 0 {
+ in := xx.mem[:xx.memIdx]
+ for ; i < int(xx.memIdx)-7; i += 8 {
+ in := in[i : i+8 : len(in)]
+ k := u64(in[0:8:len(in)])
+ k *= prime64x2
+ k = rotl64_31(k)
+ k *= prime64x1
+ h ^= k
+ h = rotl64_27(h)*prime64x1 + prime64x4
+ }
+
+ for ; i < int(xx.memIdx)-3; i += 4 {
+ in := in[i : i+4 : len(in)]
+ h ^= uint64(u32(in[0:4:len(in)])) * prime64x1
+ h = rotl64_23(h)*prime64x2 + prime64x3
+ }
+
+ for ; i < int(xx.memIdx); i++ {
+ h ^= uint64(in[i]) * prime64x5
+ h = rotl64_11(h) * prime64x1
+ }
+ }
+
+ return mix64(h)
+}
diff --git a/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go b/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
new file mode 100644
index 00000000..1e2b5e8f
--- /dev/null
+++ b/vendor/github.com/OneOfOne/xxhash/xxhash_unsafe.go
@@ -0,0 +1,240 @@
+// +build !safe
+// +build !appengine
+// +build !ppc64le
+// +build !mipsle
+// +build !ppc64be
+// +build !mips
+// +build !s390x
+
+package xxhash
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Backend returns the current version of xxhash being used.
+const Backend = "GoUnsafe"
+
+// ChecksumString32S returns the checksum of the input data, without creating a copy, with the specific seed.
+func ChecksumString32S(s string, seed uint32) uint32 {
+ if len(s) == 0 {
+ return Checksum32S(nil, seed)
+ }
+ ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ return Checksum32S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
+}
+
+func (xx *XXHash32) WriteString(s string) (int, error) {
+ if len(s) == 0 {
+ return 0, nil
+ }
+
+ ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
+}
+
+// ChecksumString64S returns the checksum of the input data, without creating a copy, with the specific seed.
+func ChecksumString64S(s string, seed uint64) uint64 {
+ if len(s) == 0 {
+ return Checksum64S(nil, seed)
+ }
+
+ ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ return Checksum64S((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)], seed)
+}
+
+func (xx *XXHash64) WriteString(s string) (int, error) {
+ if len(s) == 0 {
+ return 0, nil
+ }
+ ss := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ return xx.Write((*[maxInt32]byte)(unsafe.Pointer(ss.Data))[:len(s):len(s)])
+}
+
+//go:nocheckptr
+func checksum64(in []byte, seed uint64) uint64 {
+ var (
+ wordsLen = len(in) >> 3
+ words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
+
+ v1, v2, v3, v4 = resetVs64(seed)
+
+ h uint64
+ i int
+ )
+
+ for ; i < len(words)-3; i += 4 {
+ words := (*[4]uint64)(unsafe.Pointer(&words[i]))
+
+ v1 = round64(v1, words[0])
+ v2 = round64(v2, words[1])
+ v3 = round64(v3, words[2])
+ v4 = round64(v4, words[3])
+ }
+
+ h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
+
+ h = mergeRound64(h, v1)
+ h = mergeRound64(h, v2)
+ h = mergeRound64(h, v3)
+ h = mergeRound64(h, v4)
+
+ h += uint64(len(in))
+
+ for _, k := range words[i:] {
+ h ^= round64(0, k)
+ h = rotl64_27(h)*prime64x1 + prime64x4
+ }
+
+ if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
+ words := (*[1]uint32)(unsafe.Pointer(&in[0]))
+ h ^= uint64(words[0]) * prime64x1
+ h = rotl64_23(h)*prime64x2 + prime64x3
+
+ in = in[4:len(in):len(in)]
+ }
+
+ for _, b := range in {
+ h ^= uint64(b) * prime64x5
+ h = rotl64_11(h) * prime64x1
+ }
+
+ return mix64(h)
+}
+
+//go:nocheckptr
+func checksum64Short(in []byte, seed uint64) uint64 {
+ var (
+ h = seed + prime64x5 + uint64(len(in))
+ i int
+ )
+
+ if len(in) > 7 {
+ var (
+ wordsLen = len(in) >> 3
+ words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
+ )
+
+ for i := range words {
+ h ^= round64(0, words[i])
+ h = rotl64_27(h)*prime64x1 + prime64x4
+ }
+
+ i = wordsLen << 3
+ }
+
+ if in = in[i:len(in):len(in)]; len(in) > 3 {
+ words := (*[1]uint32)(unsafe.Pointer(&in[0]))
+ h ^= uint64(words[0]) * prime64x1
+ h = rotl64_23(h)*prime64x2 + prime64x3
+
+ in = in[4:len(in):len(in)]
+ }
+
+ for _, b := range in {
+ h ^= uint64(b) * prime64x5
+ h = rotl64_11(h) * prime64x1
+ }
+
+ return mix64(h)
+}
+
+func (xx *XXHash64) Write(in []byte) (n int, err error) {
+ mem, idx := xx.mem[:], int(xx.memIdx)
+
+ xx.ln, n = xx.ln+uint64(len(in)), len(in)
+
+ if idx+len(in) < 32 {
+ xx.memIdx += int8(copy(mem[idx:len(mem):len(mem)], in))
+ return
+ }
+
+ var (
+ v1, v2, v3, v4 = xx.v1, xx.v2, xx.v3, xx.v4
+
+ i int
+ )
+
+ if d := 32 - int(idx); d > 0 && int(idx)+len(in) > 31 {
+ copy(mem[idx:len(mem):len(mem)], in[:len(in):len(in)])
+
+ words := (*[4]uint64)(unsafe.Pointer(&mem[0]))
+
+ v1 = round64(v1, words[0])
+ v2 = round64(v2, words[1])
+ v3 = round64(v3, words[2])
+ v4 = round64(v4, words[3])
+
+ if in, xx.memIdx = in[d:len(in):len(in)], 0; len(in) == 0 {
+ goto RET
+ }
+ }
+
+ for ; i < len(in)-31; i += 32 {
+ words := (*[4]uint64)(unsafe.Pointer(&in[i]))
+
+ v1 = round64(v1, words[0])
+ v2 = round64(v2, words[1])
+ v3 = round64(v3, words[2])
+ v4 = round64(v4, words[3])
+ }
+
+ if len(in)-i != 0 {
+ xx.memIdx += int8(copy(mem[xx.memIdx:len(mem):len(mem)], in[i:len(in):len(in)]))
+ }
+
+RET:
+ xx.v1, xx.v2, xx.v3, xx.v4 = v1, v2, v3, v4
+
+ return
+}
+
+func (xx *XXHash64) Sum64() (h uint64) {
+ if seed := xx.seed; xx.ln > 31 {
+ v1, v2, v3, v4 := xx.v1, xx.v2, xx.v3, xx.v4
+ h = rotl64_1(v1) + rotl64_7(v2) + rotl64_12(v3) + rotl64_18(v4)
+
+ h = mergeRound64(h, v1)
+ h = mergeRound64(h, v2)
+ h = mergeRound64(h, v3)
+ h = mergeRound64(h, v4)
+ } else if seed == 0 {
+ h = prime64x5
+ } else {
+ h = seed + prime64x5
+ }
+
+ h += uint64(xx.ln)
+
+ if xx.memIdx == 0 {
+ return mix64(h)
+ }
+
+ var (
+ in = xx.mem[:xx.memIdx:xx.memIdx]
+ wordsLen = len(in) >> 3
+ words = ((*[maxInt32 / 8]uint64)(unsafe.Pointer(&in[0])))[:wordsLen:wordsLen]
+ )
+
+ for _, k := range words {
+ h ^= round64(0, k)
+ h = rotl64_27(h)*prime64x1 + prime64x4
+ }
+
+ if in = in[wordsLen<<3 : len(in) : len(in)]; len(in) > 3 {
+ words := (*[1]uint32)(unsafe.Pointer(&in[0]))
+
+ h ^= uint64(words[0]) * prime64x1
+ h = rotl64_23(h)*prime64x2 + prime64x3
+
+ in = in[4:len(in):len(in)]
+ }
+
+ for _, b := range in {
+ h ^= uint64(b) * prime64x5
+ h = rotl64_11(h) * prime64x1
+ }
+
+ return mix64(h)
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE
new file mode 100644
index 00000000..37ec93a1
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/coreos/go-systemd/v22/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE
new file mode 100644
index 00000000..23a0ada2
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2018 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
new file mode 100644
index 00000000..91584a16
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go
@@ -0,0 +1,240 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
+package dbus
+
+import (
+ "encoding/hex"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/godbus/dbus/v5"
+)
+
+const (
+ alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
+ num = `0123456789`
+ alphanum = alpha + num
+ signalBuffer = 100
+)
+
+// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
+func needsEscape(i int, b byte) bool {
+ // Escape everything that is not a-z-A-Z-0-9
+ // Also escape 0-9 if it's the first character
+ return strings.IndexByte(alphanum, b) == -1 ||
+ (i == 0 && strings.IndexByte(num, b) != -1)
+}
+
+// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
+// rules that systemd uses for serializing special characters.
+func PathBusEscape(path string) string {
+ // Special case the empty string
+ if len(path) == 0 {
+ return "_"
+ }
+ n := []byte{}
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if needsEscape(i, c) {
+ e := fmt.Sprintf("_%x", c)
+ n = append(n, []byte(e)...)
+ } else {
+ n = append(n, c)
+ }
+ }
+ return string(n)
+}
+
+// pathBusUnescape is the inverse of PathBusEscape.
+func pathBusUnescape(path string) string {
+ if path == "_" {
+ return ""
+ }
+ n := []byte{}
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if c == '_' && i+2 < len(path) {
+ res, err := hex.DecodeString(path[i+1 : i+3])
+ if err == nil {
+ n = append(n, res...)
+ }
+ i += 2
+ } else {
+ n = append(n, c)
+ }
+ }
+ return string(n)
+}
+
+// Conn is a connection to systemd's dbus endpoint.
+type Conn struct {
+ // sysconn/sysobj are only used to call dbus methods
+ sysconn *dbus.Conn
+ sysobj dbus.BusObject
+
+ // sigconn/sigobj are only used to receive dbus signals
+ sigconn *dbus.Conn
+ sigobj dbus.BusObject
+
+ jobListener struct {
+ jobs map[dbus.ObjectPath]chan<- string
+ sync.Mutex
+ }
+ subStateSubscriber struct {
+ updateCh chan<- *SubStateUpdate
+ errCh chan<- error
+ sync.Mutex
+ ignore map[dbus.ObjectPath]int64
+ cleanIgnore int64
+ }
+ propertiesSubscriber struct {
+ updateCh chan<- *PropertiesUpdate
+ errCh chan<- error
+ sync.Mutex
+ }
+}
+
+// New establishes a connection to any available bus and authenticates.
+// Callers should call Close() when done with the connection.
+func New() (*Conn, error) {
+ conn, err := NewSystemConnection()
+ if err != nil && os.Geteuid() == 0 {
+ return NewSystemdConnection()
+ }
+ return conn, err
+}
+
+// NewSystemConnection establishes a connection to the system bus and authenticates.
+// Callers should call Close() when done with the connection
+func NewSystemConnection() (*Conn, error) {
+ return NewConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SystemBusPrivate)
+ })
+}
+
+// NewUserConnection establishes a connection to the session bus and
+// authenticates. This can be used to connect to systemd user instances.
+// Callers should call Close() when done with the connection.
+func NewUserConnection() (*Conn, error) {
+ return NewConnection(func() (*dbus.Conn, error) {
+ return dbusAuthHelloConnection(dbus.SessionBusPrivate)
+ })
+}
+
+// NewSystemdConnection establishes a private, direct connection to systemd.
+// This can be used for communicating with systemd without a dbus daemon.
+// Callers should call Close() when done with the connection.
+func NewSystemdConnection() (*Conn, error) {
+ return NewConnection(func() (*dbus.Conn, error) {
+ // We skip Hello when talking directly to systemd.
+ return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) {
+ return dbus.Dial("unix:path=/run/systemd/private")
+ })
+ })
+}
+
+// Close closes an established connection
+func (c *Conn) Close() {
+ c.sysconn.Close()
+ c.sigconn.Close()
+}
+
+// NewConnection establishes a connection to a bus using a caller-supplied function.
+// This allows connecting to remote buses through a user-supplied mechanism.
+// The supplied function may be called multiple times, and should return independent connections.
+// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded,
+// and any authentication should be handled by the function.
+func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) {
+ sysconn, err := dialBus()
+ if err != nil {
+ return nil, err
+ }
+
+ sigconn, err := dialBus()
+ if err != nil {
+ sysconn.Close()
+ return nil, err
+ }
+
+ c := &Conn{
+ sysconn: sysconn,
+ sysobj: systemdObject(sysconn),
+ sigconn: sigconn,
+ sigobj: systemdObject(sigconn),
+ }
+
+ c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64)
+ c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
+
+ // Setup the listeners on jobs so that we can get completions
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
+
+ c.dispatch()
+ return c, nil
+}
+
+// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
+// interface. The value is returned in its string representation, as defined at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html
+func (c *Conn) GetManagerProperty(prop string) (string, error) {
+ variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
+ if err != nil {
+ return "", err
+ }
+ return variant.String(), nil
+}
+
+func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := createBus()
+ if err != nil {
+ return nil, err
+ }
+
+ // Only use EXTERNAL method, and hardcode the uid (not username)
+ // to avoid a username lookup (which requires a dynamically linked
+ // libc)
+ methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
+
+ err = conn.Auth(methods)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) {
+ conn, err := dbusAuthConnection(createBus)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func systemdObject(conn *dbus.Conn) dbus.BusObject {
+ return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
new file mode 100644
index 00000000..e38659d7
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go
@@ -0,0 +1,600 @@
+// Copyright 2015, 2018 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "path"
+ "strconv"
+
+ "github.com/godbus/dbus/v5"
+)
+
+func (c *Conn) jobComplete(signal *dbus.Signal) {
+ var id uint32
+ var job dbus.ObjectPath
+ var unit string
+ var result string
+ dbus.Store(signal.Body, &id, &job, &unit, &result)
+ c.jobListener.Lock()
+ out, ok := c.jobListener.jobs[job]
+ if ok {
+ out <- result
+ delete(c.jobListener.jobs, job)
+ }
+ c.jobListener.Unlock()
+}
+
+func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
+ if ch != nil {
+ c.jobListener.Lock()
+ defer c.jobListener.Unlock()
+ }
+
+ var p dbus.ObjectPath
+ err := c.sysobj.Call(job, 0, args...).Store(&p)
+ if err != nil {
+ return 0, err
+ }
+
+ if ch != nil {
+ c.jobListener.jobs[p] = ch
+ }
+
+ // ignore error since 0 is fine if conversion fails
+ jobID, _ := strconv.Atoi(path.Base(string(p)))
+
+ return jobID, nil
+}
+
+// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
+// specified by the mode string).
+//
+// Takes the unit to activate, plus a mode string. The mode needs to be one of
+// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
+// "replace" the call will start the unit and its dependencies, possibly
+// replacing already queued jobs that conflict with this. If "fail" the call
+// will start the unit and its dependencies, but will fail if this would change
+// an already queued job. If "isolate" the call will start the unit in question
+// and terminate all units that aren't dependencies of it. If
+// "ignore-dependencies" it will start a unit but ignore all its dependencies.
+// If "ignore-requirements" it will start a unit but only ignore the
+// requirement dependencies. It is not recommended to make use of the latter
+// two options.
+//
+// If the provided channel is non-nil, a result string will be sent to it upon
+// job completion: one of done, canceled, timeout, failed, dependency, skipped.
+// done indicates successful execution of a job. canceled indicates that a job
+// has been canceled before it finished execution. timeout indicates that the
+// job timeout was reached. failed indicates that the job failed. dependency
+// indicates that a job this job has been depending on failed and the job hence
+// has been removed too. skipped indicates that a job was skipped because it
+// didn't apply to the units current state.
+//
+// If no error occurs, the ID of the underlying systemd job will be returned. There
+// does exist the possibility for no error to be returned, but for the returned job
+// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
+// should not be considered authoritative.
+//
+// If an error does occur, it will be returned to the user alongside a job ID of 0.
+func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
+}
+
+// StopUnit is similar to StartUnit but stops the specified unit rather
+// than starting it.
+func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
+}
+
+// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
+func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
+}
+
+// RestartUnit restarts a service. If a service is restarted that isn't
+// running it will be started.
+func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
+}
+
+// TryRestartUnit is like RestartUnit, except that a service that isn't running
+// is not affected by the restart.
+func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
+}
+
+// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart
+// otherwise.
+func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
+}
+
+// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try"
+// flavored restart otherwise.
+func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
+}
+
+// StartTransientUnit() may be used to create and start a transient unit, which
+// will be released as soon as it is not running or referenced anymore or the
+// system is rebooted. name is the unit name including suffix, and must be
+// unique. mode is the same as in StartUnit(), properties contains properties
+// of the unit.
+func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
+ return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
+}
+
+// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
+// processes are killed.
+func (c *Conn) KillUnit(name string, signal int32) {
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
+}
+
+// ResetFailedUnit resets the "failed" state of a specific unit.
+func (c *Conn) ResetFailedUnit(name string) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
+}
+
+// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`.
+func (c *Conn) SystemState() (*Property, error) {
+ var err error
+ var prop dbus.Variant
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
+ err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Property{Name: "SystemState", Value: prop}, nil
+}
+
+// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface
+func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) {
+ var err error
+ var props map[string]dbus.Variant
+
+ if !path.IsValid() {
+ return nil, fmt.Errorf("invalid unit name: %v", path)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make(map[string]interface{}, len(props))
+ for k, v := range props {
+ out[k] = v.Value()
+ }
+
+ return out, nil
+}
+
+// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties.
+func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
+ path := unitPath(unit)
+ return c.getProperties(path, "org.freedesktop.systemd1.Unit")
+}
+
+// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties.
+func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) {
+ return c.getProperties(path, "org.freedesktop.systemd1.Unit")
+}
+
+// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties.
+func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) {
+ path := unitPath(unit)
+ return c.getProperties(path, "")
+}
+
+func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
+ var err error
+ var prop dbus.Variant
+
+ path := unitPath(unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Property{Name: propertyName, Value: prop}, nil
+}
+
+func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
+}
+
+// GetServiceProperty returns property for given service name and property name
+func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
+ return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName)
+}
+
+// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
+// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
+// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
+func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
+ path := unitPath(unit)
+ return c.getProperties(path, "org.freedesktop.systemd1."+unitType)
+}
+
+// SetUnitProperties() may be used to modify certain unit properties at runtime.
+// Not all properties may be changed at runtime, but many resource management
+// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
+// instantly, and stored on disk for future boots, unless runtime is true, in which
+// case the settings only apply until the next reboot. name is the name of the unit
+// to modify. properties are the settings to set, encoded as an array of property
+// name and value pairs.
+func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
+}
+
+func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
+}
+
+type UnitStatus struct {
+ Name string // The primary unit name as string
+ Description string // The human readable description string
+ LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
+ ActiveState string // The active state (i.e. whether the unit is currently started or not)
+ SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
+ Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
+ Path dbus.ObjectPath // The unit object path
+ JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
+ JobType string // The job type as string
+ JobPath dbus.ObjectPath // The job object path
+}
+
+type storeFunc func(retvalues ...interface{}) error
+
+func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
+ result := make([][]interface{}, 0)
+ err := f(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ status := make([]UnitStatus, len(result))
+ statusInterface := make([]interface{}, len(status))
+ for i := range status {
+ statusInterface[i] = &status[i]
+ }
+
+ err = dbus.Store(resultInterface, statusInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
+
+// ListUnits returns an array with all currently loaded units. Note that
+// units may be known by multiple names at the same time, and hence there might
+// be more unit names loaded than actual units behind them.
+// Also note that a unit is only loaded if it is active and/or enabled.
+// Units that are both disabled and inactive will thus not be returned.
+func (c *Conn) ListUnits() ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
+}
+
+// ListUnitsFiltered returns an array with units filtered by state.
+// It takes a list of units' statuses to filter.
+func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
+}
+
+// ListUnitsByPatterns returns an array with units.
+// It takes a list of units' statuses and names to filter.
+// Note that units may be known by multiple names at the same time,
+// and hence there might be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
+}
+
+// ListUnitsByNames returns an array with units. It takes a list of units'
+// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns
+// method, this method returns statuses even for inactive or non-existing
+// units. Input array should contain exact unit names, but not patterns.
+// Note: Requires systemd v230 or higher
+func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
+ return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
+}
+
+type UnitFile struct {
+ Path string
+ Type string
+}
+
+func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
+ result := make([][]interface{}, 0)
+ err := f(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ files := make([]UnitFile, len(result))
+ fileInterface := make([]interface{}, len(files))
+ for i := range files {
+ fileInterface[i] = &files[i]
+ }
+
+ err = dbus.Store(resultInterface, fileInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return files, nil
+}
+
+// ListUnitFiles returns an array of all available units on disk.
+func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
+ return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
+}
+
+// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
+func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
+ return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
+}
+
+type LinkUnitFileChange EnableUnitFileChange
+
+// LinkUnitFiles() links unit files (that are located outside of the
+// usual unit search paths) into the unit search path.
+//
+// It takes a list of absolute paths to unit files to link and two
+// booleans. The first boolean controls whether the unit shall be
+// enabled for runtime only (true, /run), or persistently (false,
+// /etc).
+// The second controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns a list of the changes made. The list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]LinkUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+// EnableUnitFiles() may be used to enable one or more units in the system (by
+// creating symlinks to them in /etc or /run).
+//
+// It takes a list of unit files to enable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and two booleans: the first controls whether the unit shall
+// be enabled for runtime only (true, /run), or persistently (false, /etc).
+// The second one controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns one boolean and an array with the changes made. The
+// boolean signals whether the unit files contained any enablement
+// information (i.e. an [Install]) section. The changes list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+ var carries_install_info bool
+
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]EnableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return false, nil, err
+ }
+
+ return carries_install_info, changes, nil
+}
+
+type EnableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// DisableUnitFiles() may be used to disable one or more units in the system (by
+// removing symlinks to them from /etc or /run).
+//
+// It takes a list of unit files to disable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and one boolean: whether the unit was enabled for runtime
+// only (true, /run), or persistently (false, /etc).
+//
+// This call returns an array with the changes made. The changes list
+// consists of structures with three strings: the type of the change (one of
+// symlink or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]DisableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type DisableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// MaskUnitFiles masks one or more units in the system
+//
+// It takes three arguments:
+// * list of units to mask (either just file names or full
+// absolute paths if the unit files are residing outside
+// the usual unit search paths)
+// * runtime to specify whether the unit was enabled for runtime
+// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
+// * force flag
+func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]MaskUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type MaskUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// UnmaskUnitFiles unmasks one or more units in the system
+//
+// It takes two arguments:
+// * list of unit files to mask (either just file names or full
+// absolute paths if the unit files are residing outside
+// the usual unit search paths)
+// * runtime to specify whether the unit was enabled for runtime
+// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
+func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]UnmaskUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type UnmaskUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// Reload instructs systemd to scan for and reload unit files. This is
+// equivalent to a 'systemctl daemon-reload'.
+func (c *Conn) Reload() error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
+}
+
+func unitPath(name string) dbus.ObjectPath {
+ return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
+}
+
+// unitName returns the unescaped base element of the supplied escaped path
+func unitName(dpath dbus.ObjectPath) string {
+ return pathBusUnescape(path.Base(string(dpath)))
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go
new file mode 100644
index 00000000..fb42b627
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go
@@ -0,0 +1,237 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "github.com/godbus/dbus/v5"
+)
+
+// From the systemd docs:
+//
+// The properties array of StartTransientUnit() may take many of the settings
+// that may also be configured in unit files. Not all parameters are currently
+// accepted though, but we plan to cover more properties with future release.
+// Currently you may set the Description, Slice and all dependency types of
+// units, as well as RemainAfterExit, ExecStart for service units,
+// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
+// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
+// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
+// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
+// directly to their counterparts in unit files and as normal D-Bus object
+// properties. The exception here is the PIDs field of scope units which is
+// used for construction of the scope only and specifies the initial PIDs to
+// add to the scope object.
+
+type Property struct {
+ Name string
+ Value dbus.Variant
+}
+
+type PropertyCollection struct {
+ Name string
+ Properties []Property
+}
+
+type execStart struct {
+ Path string // the binary path to execute
+ Args []string // an array with all arguments to pass to the executed command, starting with argument 0
+ UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
+}
+
+// PropExecStart sets the ExecStart service property. The first argument is a
+// slice with the binary path to execute followed by the arguments to pass to
+// the executed command. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
+func PropExecStart(command []string, uncleanIsFailure bool) Property {
+ execStarts := []execStart{
+ {
+ Path: command[0],
+ Args: command,
+ UncleanIsFailure: uncleanIsFailure,
+ },
+ }
+
+ return Property{
+ Name: "ExecStart",
+ Value: dbus.MakeVariant(execStarts),
+ }
+}
+
+// PropRemainAfterExit sets the RemainAfterExit service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
+func PropRemainAfterExit(b bool) Property {
+ return Property{
+ Name: "RemainAfterExit",
+ Value: dbus.MakeVariant(b),
+ }
+}
+
+// PropType sets the Type service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
+func PropType(t string) Property {
+ return Property{
+ Name: "Type",
+ Value: dbus.MakeVariant(t),
+ }
+}
+
+// PropDescription sets the Description unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
+func PropDescription(desc string) Property {
+ return Property{
+ Name: "Description",
+ Value: dbus.MakeVariant(desc),
+ }
+}
+
+func propDependency(name string, units []string) Property {
+ return Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+// PropRequires sets the Requires unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
+func PropRequires(units ...string) Property {
+ return propDependency("Requires", units)
+}
+
+// PropRequiresOverridable sets the RequiresOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
+func PropRequiresOverridable(units ...string) Property {
+ return propDependency("RequiresOverridable", units)
+}
+
+// PropRequisite sets the Requisite unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
+func PropRequisite(units ...string) Property {
+ return propDependency("Requisite", units)
+}
+
+// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
+func PropRequisiteOverridable(units ...string) Property {
+ return propDependency("RequisiteOverridable", units)
+}
+
+// PropWants sets the Wants unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
+func PropWants(units ...string) Property {
+ return propDependency("Wants", units)
+}
+
+// PropBindsTo sets the BindsTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
+func PropBindsTo(units ...string) Property {
+ return propDependency("BindsTo", units)
+}
+
+// PropRequiredBy sets the RequiredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
+func PropRequiredBy(units ...string) Property {
+ return propDependency("RequiredBy", units)
+}
+
+// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
+func PropRequiredByOverridable(units ...string) Property {
+ return propDependency("RequiredByOverridable", units)
+}
+
+// PropWantedBy sets the WantedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
+func PropWantedBy(units ...string) Property {
+ return propDependency("WantedBy", units)
+}
+
+// PropBoundBy sets the BoundBy unit property. See
+// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
+func PropBoundBy(units ...string) Property {
+ return propDependency("BoundBy", units)
+}
+
+// PropConflicts sets the Conflicts unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
+func PropConflicts(units ...string) Property {
+ return propDependency("Conflicts", units)
+}
+
+// PropConflictedBy sets the ConflictedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
+func PropConflictedBy(units ...string) Property {
+ return propDependency("ConflictedBy", units)
+}
+
+// PropBefore sets the Before unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
+func PropBefore(units ...string) Property {
+ return propDependency("Before", units)
+}
+
+// PropAfter sets the After unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
+func PropAfter(units ...string) Property {
+ return propDependency("After", units)
+}
+
+// PropOnFailure sets the OnFailure unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
+func PropOnFailure(units ...string) Property {
+ return propDependency("OnFailure", units)
+}
+
+// PropTriggers sets the Triggers unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
+func PropTriggers(units ...string) Property {
+ return propDependency("Triggers", units)
+}
+
+// PropTriggeredBy sets the TriggeredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
+func PropTriggeredBy(units ...string) Property {
+ return propDependency("TriggeredBy", units)
+}
+
+// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
+func PropPropagatesReloadTo(units ...string) Property {
+ return propDependency("PropagatesReloadTo", units)
+}
+
+// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
+func PropRequiresMountsFor(units ...string) Property {
+ return propDependency("RequiresMountsFor", units)
+}
+
+// PropSlice sets the Slice unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
+func PropSlice(slice string) Property {
+ return Property{
+ Name: "Slice",
+ Value: dbus.MakeVariant(slice),
+ }
+}
+
+// PropPids sets the PIDs field of scope units used in the initial construction
+// of the scope only and specifies the initial PIDs to add to the scope object.
+// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties
+func PropPids(pids ...uint32) Property {
+ return Property{
+ Name: "PIDs",
+ Value: dbus.MakeVariant(pids),
+ }
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go
new file mode 100644
index 00000000..17c5d485
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go
@@ -0,0 +1,47 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+type set struct {
+ data map[string]bool
+}
+
+func (s *set) Add(value string) {
+ s.data[value] = true
+}
+
+func (s *set) Remove(value string) {
+ delete(s.data, value)
+}
+
+func (s *set) Contains(value string) (exists bool) {
+ _, exists = s.data[value]
+ return
+}
+
+func (s *set) Length() int {
+ return len(s.data)
+}
+
+func (s *set) Values() (values []string) {
+ for val := range s.data {
+ values = append(values, val)
+ }
+ return
+}
+
+func newSet() *set {
+ return &set{make(map[string]bool)}
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go
new file mode 100644
index 00000000..7e370fea
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go
@@ -0,0 +1,333 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "errors"
+ "log"
+ "time"
+
+ "github.com/godbus/dbus/v5"
+)
+
+const (
+ cleanIgnoreInterval = int64(10 * time.Second)
+ ignoreInterval = int64(30 * time.Millisecond)
+)
+
+// Subscribe sets up this connection to subscribe to all systemd dbus events.
+// This is required before calling SubscribeUnits. When the connection closes
+// systemd will automatically stop sending signals so there is no need to
+// explicitly call Unsubscribe().
+func (c *Conn) Subscribe() error {
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
+ c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
+
+ return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
+}
+
+// Unsubscribe this connection from systemd dbus events.
+func (c *Conn) Unsubscribe() error {
+ return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
+}
+
+func (c *Conn) dispatch() {
+ ch := make(chan *dbus.Signal, signalBuffer)
+
+ c.sigconn.Signal(ch)
+
+ go func() {
+ for {
+ signal, ok := <-ch
+ if !ok {
+ return
+ }
+
+ if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
+ c.jobComplete(signal)
+ }
+
+ if c.subStateSubscriber.updateCh == nil &&
+ c.propertiesSubscriber.updateCh == nil {
+ continue
+ }
+
+ var unitPath dbus.ObjectPath
+ switch signal.Name {
+ case "org.freedesktop.systemd1.Manager.JobRemoved":
+ unitName := signal.Body[2].(string)
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
+ case "org.freedesktop.systemd1.Manager.UnitNew":
+ unitPath = signal.Body[1].(dbus.ObjectPath)
+ case "org.freedesktop.DBus.Properties.PropertiesChanged":
+ if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
+ unitPath = signal.Path
+
+ if len(signal.Body) >= 2 {
+ if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok {
+ c.sendPropertiesUpdate(unitPath, changed)
+ }
+ }
+ }
+ }
+
+ if unitPath == dbus.ObjectPath("") {
+ continue
+ }
+
+ c.sendSubStateUpdate(unitPath)
+ }
+ }()
+}
+
+// SubscribeUnits returns two unbuffered channels which will receive all changed units every
+// interval. Deleted units are sent as nil.
+func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
+ return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
+}
+
+// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
+// size of the channels, the comparison function for detecting changes and a filter
+// function for cutting down on the noise that your channel receives.
+func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
+ old := make(map[string]*UnitStatus)
+ statusChan := make(chan map[string]*UnitStatus, buffer)
+ errChan := make(chan error, buffer)
+
+ go func() {
+ for {
+ timerChan := time.After(interval)
+
+ units, err := c.ListUnits()
+ if err == nil {
+ cur := make(map[string]*UnitStatus)
+ for i := range units {
+ if filterUnit != nil && filterUnit(units[i].Name) {
+ continue
+ }
+ cur[units[i].Name] = &units[i]
+ }
+
+ // add all new or changed units
+ changed := make(map[string]*UnitStatus)
+ for n, u := range cur {
+ if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
+ changed[n] = u
+ }
+ delete(old, n)
+ }
+
+ // add all deleted units
+ for oldN := range old {
+ changed[oldN] = nil
+ }
+
+ old = cur
+
+ if len(changed) != 0 {
+ statusChan <- changed
+ }
+ } else {
+ errChan <- err
+ }
+
+ <-timerChan
+ }
+ }()
+
+ return statusChan, errChan
+}
+
+type SubStateUpdate struct {
+ UnitName string
+ SubState string
+}
+
+// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
+// Although this writes to updateCh on every state change, the reported state
+// may be more recent than the change that generated it (due to an unavoidable
+// race in the systemd dbus interface). That is, this method provides a good
+// way to keep a current view of all units' states, but is not guaranteed to
+// show every state transition they go through. Furthermore, state changes
+// will only be written to the channel with non-blocking writes. If updateCh
+// is full, it attempts to write an error to errCh; if errCh is full, the error
+// passes silently.
+func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
+ if c == nil {
+ msg := "nil receiver"
+ select {
+ case errCh <- errors.New(msg):
+ default:
+ log.Printf("full error channel while reporting: %s\n", msg)
+ }
+ return
+ }
+
+ c.subStateSubscriber.Lock()
+ defer c.subStateSubscriber.Unlock()
+ c.subStateSubscriber.updateCh = updateCh
+ c.subStateSubscriber.errCh = errCh
+}
+
+func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) {
+ c.subStateSubscriber.Lock()
+ defer c.subStateSubscriber.Unlock()
+
+ if c.subStateSubscriber.updateCh == nil {
+ return
+ }
+
+ isIgnored := c.shouldIgnore(unitPath)
+ defer c.cleanIgnore()
+ if isIgnored {
+ return
+ }
+
+ info, err := c.GetUnitPathProperties(unitPath)
+ if err != nil {
+ select {
+ case c.subStateSubscriber.errCh <- err:
+ default:
+ log.Printf("full error channel while reporting: %s\n", err)
+ }
+ return
+ }
+ defer c.updateIgnore(unitPath, info)
+
+ name, ok := info["Id"].(string)
+ if !ok {
+ msg := "failed to cast info.Id"
+ select {
+ case c.subStateSubscriber.errCh <- errors.New(msg):
+ default:
+ log.Printf("full error channel while reporting: %s\n", err)
+ }
+ return
+ }
+ substate, ok := info["SubState"].(string)
+ if !ok {
+ msg := "failed to cast info.SubState"
+ select {
+ case c.subStateSubscriber.errCh <- errors.New(msg):
+ default:
+ log.Printf("full error channel while reporting: %s\n", msg)
+ }
+ return
+ }
+
+ update := &SubStateUpdate{name, substate}
+ select {
+ case c.subStateSubscriber.updateCh <- update:
+ default:
+ msg := "update channel is full"
+ select {
+ case c.subStateSubscriber.errCh <- errors.New(msg):
+ default:
+ log.Printf("full error channel while reporting: %s\n", msg)
+ }
+ return
+ }
+}
+
+// The ignore functions work around a wart in the systemd dbus interface.
+// Requesting the properties of an unloaded unit will cause systemd to send a
+// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
+// properties on UnitNew (as that's the only indication of a new unit coming up
+// for the first time), we would enter an infinite loop if we did not attempt
+// to detect and ignore these spurious signals. The signal themselves are
+// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
+// unloaded unit's signals for a short time after requesting its properties.
+// This means that we will miss e.g. a transient unit being restarted
+// *immediately* upon failure and also a transient unit being started
+// immediately after requesting its status (with systemctl status, for example,
+// because this causes a UnitNew signal to be sent which then causes us to fetch
+// the properties).
+
+func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
+ t, ok := c.subStateSubscriber.ignore[path]
+ return ok && t >= time.Now().UnixNano()
+}
+
+func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
+ loadState, ok := info["LoadState"].(string)
+ if !ok {
+ return
+ }
+
+ // unit is unloaded - it will trigger bad systemd dbus behavior
+ if loadState == "not-found" {
+ c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
+ }
+}
+
+// without this, ignore would grow unboundedly over time
+func (c *Conn) cleanIgnore() {
+ now := time.Now().UnixNano()
+ if c.subStateSubscriber.cleanIgnore < now {
+ c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval
+
+ for p, t := range c.subStateSubscriber.ignore {
+ if t < now {
+ delete(c.subStateSubscriber.ignore, p)
+ }
+ }
+ }
+}
+
+// PropertiesUpdate holds a map of a unit's changed properties
+type PropertiesUpdate struct {
+ UnitName string
+ Changed map[string]dbus.Variant
+}
+
+// SetPropertiesSubscriber writes to updateCh when any unit's properties
+// change. Every property change reported by systemd will be sent; that is, no
+// transitions will be "missed" (as they might be with SetSubStateSubscriber).
+// However, state changes will only be written to the channel with non-blocking
+// writes. If updateCh is full, it attempts to write an error to errCh; if
+// errCh is full, the error passes silently.
+func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) {
+ c.propertiesSubscriber.Lock()
+ defer c.propertiesSubscriber.Unlock()
+ c.propertiesSubscriber.updateCh = updateCh
+ c.propertiesSubscriber.errCh = errCh
+}
+
+// we don't need to worry about shouldIgnore() here because
+// sendPropertiesUpdate doesn't call GetProperties()
+func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) {
+ c.propertiesSubscriber.Lock()
+ defer c.propertiesSubscriber.Unlock()
+
+ if c.propertiesSubscriber.updateCh == nil {
+ return
+ }
+
+ update := &PropertiesUpdate{unitName(unitPath), changedProps}
+
+ select {
+ case c.propertiesSubscriber.updateCh <- update:
+ default:
+ msg := "update channel is full"
+ select {
+ case c.propertiesSubscriber.errCh <- errors.New(msg):
+ default:
+ log.Printf("full error channel while reporting: %s\n", msg)
+ }
+ return
+ }
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go
new file mode 100644
index 00000000..5b408d58
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go
@@ -0,0 +1,57 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+ "time"
+)
+
+// SubscriptionSet returns a subscription set which is like conn.Subscribe but
+// can filter to only return events for a set of units.
+type SubscriptionSet struct {
+ *set
+ conn *Conn
+}
+
+func (s *SubscriptionSet) filter(unit string) bool {
+ return !s.Contains(unit)
+}
+
+// Subscribe starts listening for dbus events for all of the units in the set.
+// Returns channels identical to conn.SubscribeUnits.
+func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
+ // TODO: Make fully evented by using systemd 209 with properties changed values
+ return s.conn.SubscribeUnitsCustom(time.Second, 0,
+ mismatchUnitStatus,
+ func(unit string) bool { return s.filter(unit) },
+ )
+}
+
+// NewSubscriptionSet returns a new subscription set.
+func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
+ return &SubscriptionSet{newSet(), conn}
+}
+
+// mismatchUnitStatus returns true if the provided UnitStatus objects
+// are not equivalent. false is returned if the objects are equivalent.
+// Only the Name, Description and state-related fields are used in
+// the comparison.
+func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
+ return u1.Name != u2.Name ||
+ u1.Description != u2.Description ||
+ u1.LoadState != u2.LoadState ||
+ u1.ActiveState != u2.ActiveState ||
+ u1.SubState != u2.SubState
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
new file mode 100644
index 00000000..3938f383
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml
@@ -0,0 +1,19 @@
+# Copyright (C) 2017 SUSE LLC. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+language: go
+go:
+ - 1.7.x
+ - 1.8.x
+ - tip
+
+os:
+ - linux
+ - osx
+
+script:
+ - go test -cover -v ./...
+
+notifications:
+ email: false
diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE
new file mode 100644
index 00000000..bec842f2
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE
@@ -0,0 +1,28 @@
+Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+Copyright (C) 2017 SUSE LLC. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md
new file mode 100644
index 00000000..49b2baa9
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/README.md
@@ -0,0 +1,65 @@
+## `filepath-securejoin` ##
+
+[](https://travis-ci.org/cyphar/filepath-securejoin)
+
+An implementation of `SecureJoin`, a [candidate for inclusion in the Go
+standard library][go#20126]. The purpose of this function is to be a "secure"
+alternative to `filepath.Join`, and in particular it provides certain
+guarantees that are not provided by `filepath.Join`.
+
+This is the function prototype:
+
+```go
+func SecureJoin(root, unsafePath string) (string, error)
+```
+
+This library **guarantees** the following:
+
+* If no error is set, the resulting string **must** be a child path of
+ `SecureJoin` and will not contain any symlink path components (they will all
+ be expanded).
+
+* When expanding symlinks, all symlink path components **must** be resolved
+ relative to the provided root. In particular, this can be considered a
+ userspace implementation of how `chroot(2)` operates on file paths. Note that
+ these symlinks will **not** be expanded lexically (`filepath.Clean` is not
+ called on the input before processing).
+
+* Non-existant path components are unaffected by `SecureJoin` (similar to
+ `filepath.EvalSymlinks`'s semantics).
+
+* The returned path will always be `filepath.Clean`ed and thus not contain any
+ `..` components.
+
+A (trivial) implementation of this function on GNU/Linux systems could be done
+with the following (note that this requires root privileges and is far more
+opaque than the implementation in this library, and also requires that
+`readlink` is inside the `root` path):
+
+```go
+package securejoin
+
+import (
+ "os/exec"
+ "path/filepath"
+)
+
+func SecureJoin(root, unsafePath string) (string, error) {
+ unsafePath = string(filepath.Separator) + unsafePath
+ cmd := exec.Command("chroot", root,
+ "readlink", "--canonicalize-missing", "--no-newline", unsafePath)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ expanded := string(output)
+ return filepath.Join(root, expanded), nil
+}
+```
+
+[go#20126]: https://github.com/golang/go/issues/20126
+
+### License ###
+
+The license of this project is the same as Go, which is a BSD 3-clause license
+available in the `LICENSE` file.
diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION
new file mode 100644
index 00000000..ee1372d3
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION
@@ -0,0 +1 @@
+0.2.2
diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go
new file mode 100644
index 00000000..c4ca3d71
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/join.go
@@ -0,0 +1,134 @@
+// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved.
+// Copyright (C) 2017 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package securejoin is an implementation of the hopefully-soon-to-be-included
+// SecureJoin helper that is meant to be part of the "path/filepath" package.
+// The purpose of this project is to provide a PoC implementation to make the
+// SecureJoin proposal (https://github.com/golang/go/issues/20126) more
+// tangible.
+package securejoin
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/pkg/errors"
+)
+
+// ErrSymlinkLoop is returned by SecureJoinVFS when too many symlinks have been
+// evaluated in attempting to securely join the two given paths.
+var ErrSymlinkLoop = errors.Wrap(syscall.ELOOP, "secure join")
+
+// IsNotExist tells you if err is an error that implies that either the path
+// accessed does not exist (or path components don't exist). This is
+// effectively a more broad version of os.IsNotExist.
+func IsNotExist(err error) bool {
+ // If it's a bone-fide ENOENT just bail.
+ if os.IsNotExist(errors.Cause(err)) {
+ return true
+ }
+
+ // Check that it's not actually an ENOTDIR, which in some cases is a more
+ // convoluted case of ENOENT (usually involving weird paths).
+ var errno error
+ switch err := errors.Cause(err).(type) {
+ case *os.PathError:
+ errno = err.Err
+ case *os.LinkError:
+ errno = err.Err
+ case *os.SyscallError:
+ errno = err.Err
+ }
+ return errno == syscall.ENOTDIR || errno == syscall.ENOENT
+}
+
+// SecureJoinVFS joins the two given path components (similar to Join) except
+// that the returned path is guaranteed to be scoped inside the provided root
+// path (when evaluated). Any symbolic links in the path are evaluated with the
+// given root treated as the root of the filesystem, similar to a chroot. The
+// filesystem state is evaluated through the given VFS interface (if nil, the
+// standard os.* family of functions are used).
+//
+// Note that the guarantees provided by this function only apply if the path
+// components in the returned string are not modified (in other words are not
+// replaced with symlinks on the filesystem) after this function has returned.
+// Such a symlink race is necessarily out-of-scope of SecureJoin.
+func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
+ // Use the os.* VFS implementation if none was specified.
+ if vfs == nil {
+ vfs = osVFS{}
+ }
+
+ var path bytes.Buffer
+ n := 0
+ for unsafePath != "" {
+ if n > 255 {
+ return "", ErrSymlinkLoop
+ }
+
+ // Next path component, p.
+ i := strings.IndexRune(unsafePath, filepath.Separator)
+ var p string
+ if i == -1 {
+ p, unsafePath = unsafePath, ""
+ } else {
+ p, unsafePath = unsafePath[:i], unsafePath[i+1:]
+ }
+
+ // Create a cleaned path, using the lexical semantics of /../a, to
+ // create a "scoped" path component which can safely be joined to fullP
+ // for evaluation. At this point, path.String() doesn't contain any
+ // symlink components.
+ cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p)
+ if cleanP == string(filepath.Separator) {
+ path.Reset()
+ continue
+ }
+ fullP := filepath.Clean(root + cleanP)
+
+ // Figure out whether the path is a symlink.
+ fi, err := vfs.Lstat(fullP)
+ if err != nil && !IsNotExist(err) {
+ return "", err
+ }
+ // Treat non-existent path components the same as non-symlinks (we
+ // can't do any better here).
+ if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 {
+ path.WriteString(p)
+ path.WriteRune(filepath.Separator)
+ continue
+ }
+
+ // Only increment when we actually dereference a link.
+ n++
+
+ // It's a symlink, expand it by prepending it to the yet-unparsed path.
+ dest, err := vfs.Readlink(fullP)
+ if err != nil {
+ return "", err
+ }
+ // Absolute symlinks reset any work we've already done.
+ if filepath.IsAbs(dest) {
+ path.Reset()
+ }
+ unsafePath = dest + string(filepath.Separator) + unsafePath
+ }
+
+ // We have to clean path.String() here because it may contain '..'
+ // components that are entirely lexical, but would be misleading otherwise.
+ // And finally do a final clean to ensure that root is also lexically
+ // clean.
+ fullP := filepath.Clean(string(filepath.Separator) + path.String())
+ return filepath.Clean(root + fullP), nil
+}
+
+// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library
+// of functions as the VFS. If in doubt, use this function over SecureJoinVFS.
+func SecureJoin(root, unsafePath string) (string, error) {
+ return SecureJoinVFS(root, unsafePath, nil)
+}
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vendor.conf b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
new file mode 100644
index 00000000..66bb574b
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/vendor.conf
@@ -0,0 +1 @@
+github.com/pkg/errors v0.8.0
diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
new file mode 100644
index 00000000..a82a5eae
--- /dev/null
+++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go
@@ -0,0 +1,41 @@
+// Copyright (C) 2017 SUSE LLC. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package securejoin
+
+import "os"
+
+// In future this should be moved into a separate package, because now there
+// are several projects (umoci and go-mtree) that are using this sort of
+// interface.
+
+// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is
+// equivalent to using the standard os.* family of functions. This is mainly
+// used for the purposes of mock testing, but also can be used to otherwise use
+// SecureJoin with VFS-like system.
+type VFS interface {
+ // Lstat returns a FileInfo describing the named file. If the file is a
+ // symbolic link, the returned FileInfo describes the symbolic link. Lstat
+ // makes no attempt to follow the link. These semantics are identical to
+ // os.Lstat.
+ Lstat(name string) (os.FileInfo, error)
+
+ // Readlink returns the destination of the named symbolic link. These
+ // semantics are identical to os.Readlink.
+ Readlink(name string) (string, error)
+}
+
+// osVFS is the "nil" VFS, in that it just passes everything through to the os
+// module.
+type osVFS struct{}
+
+// Lstat returns a FileInfo describing the named file. If the file is a
+// symbolic link, the returned FileInfo describes the symbolic link. Lstat
+// makes no attempt to follow the link. These semantics are identical to
+// os.Lstat.
+func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
+
+// Readlink returns the destination of the named symbolic link. These
+// semantics are identical to os.Readlink.
+func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) }
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 00000000..e256a31e
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 00000000..0e9d6edc
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+ - 1.3
+ - 1.4
+script:
+ - go test
+ - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 00000000..7805d36d
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 00000000..0200f75b
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"age"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ age: 30
+ name: John
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err = yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 00000000..58600740
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ if v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ } else {
+ v = reflect.New(v.Type().Elem())
+ }
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 00000000..4fb4054a
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshalling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore
new file mode 100644
index 00000000..b4ae623b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/.gitignore
@@ -0,0 +1,8 @@
+glob.iml
+.idea
+*.cpu
+*.mem
+*.test
+*.dot
+*.png
+*.svg
diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml
new file mode 100644
index 00000000..e8a27682
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/.travis.yml
@@ -0,0 +1,9 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.5.3
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE
new file mode 100644
index 00000000..9d4735ca
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Sergey Kamardin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh
new file mode 100644
index 00000000..804cf22e
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/bench.sh
@@ -0,0 +1,26 @@
+#! /bin/bash
+
+bench() {
+ filename="/tmp/$1-$2.bench"
+ if test -e "${filename}";
+ then
+ echo "Already exists ${filename}"
+ else
+ backup=`git rev-parse --abbrev-ref HEAD`
+ git checkout $1
+ echo -n "Creating ${filename}... "
+ go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem
+ echo "OK"
+ git checkout ${backup}
+ sleep 5
+ fi
+}
+
+
+to=$1
+current=`git rev-parse --abbrev-ref HEAD`
+
+bench ${to} $2
+bench ${current} $2
+
+benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench"
diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go
new file mode 100644
index 00000000..02e7de80
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/compiler/compiler.go
@@ -0,0 +1,525 @@
+package compiler
+
+// TODO use constructor with all matchers, and to their structs private
+// TODO glue multiple Text nodes (like after QuoteMeta)
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/gobwas/glob/match"
+ "github.com/gobwas/glob/syntax/ast"
+ "github.com/gobwas/glob/util/runes"
+)
+
+func optimizeMatcher(matcher match.Matcher) match.Matcher {
+ switch m := matcher.(type) {
+
+ case match.Any:
+ if len(m.Separators) == 0 {
+ return match.NewSuper()
+ }
+
+ case match.AnyOf:
+ if len(m.Matchers) == 1 {
+ return m.Matchers[0]
+ }
+
+ return m
+
+ case match.List:
+ if m.Not == false && len(m.List) == 1 {
+ return match.NewText(string(m.List))
+ }
+
+ return m
+
+ case match.BTree:
+ m.Left = optimizeMatcher(m.Left)
+ m.Right = optimizeMatcher(m.Right)
+
+ r, ok := m.Value.(match.Text)
+ if !ok {
+ return m
+ }
+
+ var (
+ leftNil = m.Left == nil
+ rightNil = m.Right == nil
+ )
+ if leftNil && rightNil {
+ return match.NewText(r.Str)
+ }
+
+ _, leftSuper := m.Left.(match.Super)
+ lp, leftPrefix := m.Left.(match.Prefix)
+ la, leftAny := m.Left.(match.Any)
+
+ _, rightSuper := m.Right.(match.Super)
+ rs, rightSuffix := m.Right.(match.Suffix)
+ ra, rightAny := m.Right.(match.Any)
+
+ switch {
+ case leftSuper && rightSuper:
+ return match.NewContains(r.Str, false)
+
+ case leftSuper && rightNil:
+ return match.NewSuffix(r.Str)
+
+ case rightSuper && leftNil:
+ return match.NewPrefix(r.Str)
+
+ case leftNil && rightSuffix:
+ return match.NewPrefixSuffix(r.Str, rs.Suffix)
+
+ case rightNil && leftPrefix:
+ return match.NewPrefixSuffix(lp.Prefix, r.Str)
+
+ case rightNil && leftAny:
+ return match.NewSuffixAny(r.Str, la.Separators)
+
+ case leftNil && rightAny:
+ return match.NewPrefixAny(r.Str, ra.Separators)
+ }
+
+ return m
+ }
+
+ return matcher
+}
+
+func compileMatchers(matchers []match.Matcher) (match.Matcher, error) {
+ if len(matchers) == 0 {
+ return nil, fmt.Errorf("compile error: need at least one matcher")
+ }
+ if len(matchers) == 1 {
+ return matchers[0], nil
+ }
+ if m := glueMatchers(matchers); m != nil {
+ return m, nil
+ }
+
+ idx := -1
+ maxLen := -1
+ var val match.Matcher
+ for i, matcher := range matchers {
+ if l := matcher.Len(); l != -1 && l >= maxLen {
+ maxLen = l
+ idx = i
+ val = matcher
+ }
+ }
+
+ if val == nil { // not found matcher with static length
+ r, err := compileMatchers(matchers[1:])
+ if err != nil {
+ return nil, err
+ }
+ return match.NewBTree(matchers[0], nil, r), nil
+ }
+
+ left := matchers[:idx]
+ var right []match.Matcher
+ if len(matchers) > idx+1 {
+ right = matchers[idx+1:]
+ }
+
+ var l, r match.Matcher
+ var err error
+ if len(left) > 0 {
+ l, err = compileMatchers(left)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(right) > 0 {
+ r, err = compileMatchers(right)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return match.NewBTree(val, l, r), nil
+}
+
+func glueMatchers(matchers []match.Matcher) match.Matcher {
+ if m := glueMatchersAsEvery(matchers); m != nil {
+ return m
+ }
+ if m := glueMatchersAsRow(matchers); m != nil {
+ return m
+ }
+ return nil
+}
+
+func glueMatchersAsRow(matchers []match.Matcher) match.Matcher {
+ if len(matchers) <= 1 {
+ return nil
+ }
+
+ var (
+ c []match.Matcher
+ l int
+ )
+ for _, matcher := range matchers {
+ if ml := matcher.Len(); ml == -1 {
+ return nil
+ } else {
+ c = append(c, matcher)
+ l += ml
+ }
+ }
+ return match.NewRow(l, c...)
+}
+
+func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher {
+ if len(matchers) <= 1 {
+ return nil
+ }
+
+ var (
+ hasAny bool
+ hasSuper bool
+ hasSingle bool
+ min int
+ separator []rune
+ )
+
+ for i, matcher := range matchers {
+ var sep []rune
+
+ switch m := matcher.(type) {
+ case match.Super:
+ sep = []rune{}
+ hasSuper = true
+
+ case match.Any:
+ sep = m.Separators
+ hasAny = true
+
+ case match.Single:
+ sep = m.Separators
+ hasSingle = true
+ min++
+
+ case match.List:
+ if !m.Not {
+ return nil
+ }
+ sep = m.List
+ hasSingle = true
+ min++
+
+ default:
+ return nil
+ }
+
+ // initialize
+ if i == 0 {
+ separator = sep
+ }
+
+ if runes.Equal(sep, separator) {
+ continue
+ }
+
+ return nil
+ }
+
+ if hasSuper && !hasAny && !hasSingle {
+ return match.NewSuper()
+ }
+
+ if hasAny && !hasSuper && !hasSingle {
+ return match.NewAny(separator)
+ }
+
+ if (hasAny || hasSuper) && min > 0 && len(separator) == 0 {
+ return match.NewMin(min)
+ }
+
+ every := match.NewEveryOf()
+
+ if min > 0 {
+ every.Add(match.NewMin(min))
+
+ if !hasAny && !hasSuper {
+ every.Add(match.NewMax(min))
+ }
+ }
+
+ if len(separator) > 0 {
+ every.Add(match.NewContains(string(separator), true))
+ }
+
+ return every
+}
+
+func minimizeMatchers(matchers []match.Matcher) []match.Matcher {
+ var done match.Matcher
+ var left, right, count int
+
+ for l := 0; l < len(matchers); l++ {
+ for r := len(matchers); r > l; r-- {
+ if glued := glueMatchers(matchers[l:r]); glued != nil {
+ var swap bool
+
+ if done == nil {
+ swap = true
+ } else {
+ cl, gl := done.Len(), glued.Len()
+ swap = cl > -1 && gl > -1 && gl > cl
+ swap = swap || count < r-l
+ }
+
+ if swap {
+ done = glued
+ left = l
+ right = r
+ count = r - l
+ }
+ }
+ }
+ }
+
+ if done == nil {
+ return matchers
+ }
+
+ next := append(append([]match.Matcher{}, matchers[:left]...), done)
+ if right < len(matchers) {
+ next = append(next, matchers[right:]...)
+ }
+
+ if len(next) == len(matchers) {
+ return next
+ }
+
+ return minimizeMatchers(next)
+}
+
+// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree
+func minimizeTree(tree *ast.Node) *ast.Node {
+ switch tree.Kind {
+ case ast.KindAnyOf:
+ return minimizeTreeAnyOf(tree)
+ default:
+ return nil
+ }
+}
+
+// minimizeAnyOf tries to find common children of given node of AnyOf pattern
+// it searches for common children from left and from right
+// if any common children are found – then it returns new optimized ast tree
+// else it returns nil
+func minimizeTreeAnyOf(tree *ast.Node) *ast.Node {
+ if !areOfSameKind(tree.Children, ast.KindPattern) {
+ return nil
+ }
+
+ commonLeft, commonRight := commonChildren(tree.Children)
+ commonLeftCount, commonRightCount := len(commonLeft), len(commonRight)
+ if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts
+ return nil
+ }
+
+ var result []*ast.Node
+ if commonLeftCount > 0 {
+ result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...))
+ }
+
+ var anyOf []*ast.Node
+ for _, child := range tree.Children {
+ reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount]
+ var node *ast.Node
+ if len(reuse) == 0 {
+ // this pattern is completely reduced by commonLeft and commonRight patterns
+ // so it become nothing
+ node = ast.NewNode(ast.KindNothing, nil)
+ } else {
+ node = ast.NewNode(ast.KindPattern, nil, reuse...)
+ }
+ anyOf = appendIfUnique(anyOf, node)
+ }
+ switch {
+ case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing:
+ result = append(result, anyOf[0])
+ case len(anyOf) > 1:
+ result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...))
+ }
+
+ if commonRightCount > 0 {
+ result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...))
+ }
+
+ return ast.NewNode(ast.KindPattern, nil, result...)
+}
+
+func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) {
+ if len(nodes) <= 1 {
+ return
+ }
+
+ // find node that has least number of children
+ idx := leastChildren(nodes)
+ if idx == -1 {
+ return
+ }
+ tree := nodes[idx]
+ treeLength := len(tree.Children)
+
+ // allocate max able size for rightCommon slice
+ // to get ability insert elements in reverse order (from end to start)
+ // without sorting
+ commonRight = make([]*ast.Node, treeLength)
+ lastRight := treeLength // will use this to get results as commonRight[lastRight:]
+
+ var (
+ breakLeft bool
+ breakRight bool
+ commonTotal int
+ )
+ for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 {
+ treeLeft := tree.Children[i]
+ treeRight := tree.Children[j]
+
+ for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ {
+ // skip least children node
+ if k == idx {
+ continue
+ }
+
+ restLeft := nodes[k].Children[i]
+ restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength]
+
+ breakLeft = breakLeft || !treeLeft.Equal(restLeft)
+
+ // disable searching for right common parts, if left part is already overlapping
+ breakRight = breakRight || (!breakLeft && j <= i)
+ breakRight = breakRight || !treeRight.Equal(restRight)
+ }
+
+ if !breakLeft {
+ commonTotal++
+ commonLeft = append(commonLeft, treeLeft)
+ }
+ if !breakRight {
+ commonTotal++
+ lastRight = j
+ commonRight[j] = treeRight
+ }
+ }
+
+ commonRight = commonRight[lastRight:]
+
+ return
+}
+
+func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node {
+ for _, n := range target {
+ if reflect.DeepEqual(n, val) {
+ return target
+ }
+ }
+ return append(target, val)
+}
+
+func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool {
+ for _, n := range nodes {
+ if n.Kind != kind {
+ return false
+ }
+ }
+ return true
+}
+
+func leastChildren(nodes []*ast.Node) int {
+ min := -1
+ idx := -1
+ for i, n := range nodes {
+ if idx == -1 || (len(n.Children) < min) {
+ min = len(n.Children)
+ idx = i
+ }
+ }
+ return idx
+}
+
+func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) {
+ var matchers []match.Matcher
+ for _, desc := range tree.Children {
+ m, err := compile(desc, sep)
+ if err != nil {
+ return nil, err
+ }
+ matchers = append(matchers, optimizeMatcher(m))
+ }
+ return matchers, nil
+}
+
+func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) {
+ switch tree.Kind {
+ case ast.KindAnyOf:
+ // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go)
+ if n := minimizeTree(tree); n != nil {
+ return compile(n, sep)
+ }
+ matchers, err := compileTreeChildren(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+ return match.NewAnyOf(matchers...), nil
+
+ case ast.KindPattern:
+ if len(tree.Children) == 0 {
+ return match.NewNothing(), nil
+ }
+ matchers, err := compileTreeChildren(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+ m, err = compileMatchers(minimizeMatchers(matchers))
+ if err != nil {
+ return nil, err
+ }
+
+ case ast.KindAny:
+ m = match.NewAny(sep)
+
+ case ast.KindSuper:
+ m = match.NewSuper()
+
+ case ast.KindSingle:
+ m = match.NewSingle(sep)
+
+ case ast.KindNothing:
+ m = match.NewNothing()
+
+ case ast.KindList:
+ l := tree.Value.(ast.List)
+ m = match.NewList([]rune(l.Chars), l.Not)
+
+ case ast.KindRange:
+ r := tree.Value.(ast.Range)
+ m = match.NewRange(r.Lo, r.Hi, r.Not)
+
+ case ast.KindText:
+ t := tree.Value.(ast.Text)
+ m = match.NewText(t.Text)
+
+ default:
+ return nil, fmt.Errorf("could not compile tree: unknown node type")
+ }
+
+ return optimizeMatcher(m), nil
+}
+
+func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) {
+ m, err := compile(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go
new file mode 100644
index 00000000..2afde343
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/glob.go
@@ -0,0 +1,80 @@
+package glob
+
+import (
+ "github.com/gobwas/glob/compiler"
+ "github.com/gobwas/glob/syntax"
+)
+
+// Glob represents compiled glob pattern.
+type Glob interface {
+ Match(string) bool
+}
+
+// Compile creates Glob for given pattern and strings (if any present after pattern) as separators.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+//
+// term:
+// `*` matches any sequence of non-separator characters
+// `**` matches any sequence of characters
+// `?` matches any single non-separator character
+// `[` [ `!` ] { character-range } `]`
+// character class (must be non-empty)
+// `{` pattern-list `}`
+// pattern alternatives
+// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
+// `\` c matches character c
+//
+// character-range:
+// c matches character c (c != `\\`, `-`, `]`)
+// `\` c matches character c
+// lo `-` hi matches character c for lo <= c <= hi
+//
+// pattern-list:
+// pattern { `,` pattern }
+// comma-separated (without spaces) patterns
+//
+func Compile(pattern string, separators ...rune) (Glob, error) {
+ ast, err := syntax.Parse(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ matcher, err := compiler.Compile(ast, separators)
+ if err != nil {
+ return nil, err
+ }
+
+ return matcher, nil
+}
+
+// MustCompile is the same as Compile, except that if Compile returns error, this will panic
+func MustCompile(pattern string, separators ...rune) Glob {
+ g, err := Compile(pattern, separators...)
+ if err != nil {
+ panic(err)
+ }
+
+ return g
+}
+
+// QuoteMeta returns a string that quotes all glob pattern meta characters
+// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`.
+func QuoteMeta(s string) string {
+ b := make([]byte, 2*len(s))
+
+ // a byte loop is correct because all meta characters are ASCII
+ j := 0
+ for i := 0; i < len(s); i++ {
+ if syntax.Special(s[i]) {
+ b[j] = '\\'
+ j++
+ }
+ b[j] = s[i]
+ j++
+ }
+
+ return string(b[0:j])
+}
diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go
new file mode 100644
index 00000000..514a9a5c
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/any.go
@@ -0,0 +1,45 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/strings"
+)
+
+type Any struct {
+ Separators []rune
+}
+
+func NewAny(s []rune) Any {
+ return Any{s}
+}
+
+func (self Any) Match(s string) bool {
+ return strings.IndexAnyRunes(s, self.Separators) == -1
+}
+
+func (self Any) Index(s string) (int, []int) {
+ found := strings.IndexAnyRunes(s, self.Separators)
+ switch found {
+ case -1:
+ case 0:
+ return 0, segments0
+ default:
+ s = s[:found]
+ }
+
+ segments := acquireSegments(len(s))
+ for i := range s {
+ segments = append(segments, i)
+ }
+ segments = append(segments, len(s))
+
+ return 0, segments
+}
+
+func (self Any) Len() int {
+ return lenNo
+}
+
+func (self Any) String() string {
+ return fmt.Sprintf("", string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go
new file mode 100644
index 00000000..8e65356c
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/any_of.go
@@ -0,0 +1,82 @@
+package match
+
+import "fmt"
+
+type AnyOf struct {
+ Matchers Matchers
+}
+
+func NewAnyOf(m ...Matcher) AnyOf {
+ return AnyOf{Matchers(m)}
+}
+
+func (self *AnyOf) Add(m Matcher) error {
+ self.Matchers = append(self.Matchers, m)
+ return nil
+}
+
+func (self AnyOf) Match(s string) bool {
+ for _, m := range self.Matchers {
+ if m.Match(s) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (self AnyOf) Index(s string) (int, []int) {
+ index := -1
+
+ segments := acquireSegments(len(s))
+ for _, m := range self.Matchers {
+ idx, seg := m.Index(s)
+ if idx == -1 {
+ continue
+ }
+
+ if index == -1 || idx < index {
+ index = idx
+ segments = append(segments[:0], seg...)
+ continue
+ }
+
+ if idx > index {
+ continue
+ }
+
+ // here idx == index
+ segments = appendMerge(segments, seg)
+ }
+
+ if index == -1 {
+ releaseSegments(segments)
+ return -1, nil
+ }
+
+ return index, segments
+}
+
+func (self AnyOf) Len() (l int) {
+ l = -1
+ for _, m := range self.Matchers {
+ ml := m.Len()
+ switch {
+ case l == -1:
+ l = ml
+ continue
+
+ case ml == -1:
+ return -1
+
+ case l != ml:
+ return -1
+ }
+ }
+
+ return
+}
+
+func (self AnyOf) String() string {
+ return fmt.Sprintf("", self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go
new file mode 100644
index 00000000..a8130e93
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/btree.go
@@ -0,0 +1,146 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type BTree struct {
+ Value Matcher
+ Left Matcher
+ Right Matcher
+ ValueLengthRunes int
+ LeftLengthRunes int
+ RightLengthRunes int
+ LengthRunes int
+}
+
+func NewBTree(Value, Left, Right Matcher) (tree BTree) {
+ tree.Value = Value
+ tree.Left = Left
+ tree.Right = Right
+
+ lenOk := true
+ if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 {
+ lenOk = false
+ }
+
+ if Left != nil {
+ if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 {
+ lenOk = false
+ }
+ }
+
+ if Right != nil {
+ if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 {
+ lenOk = false
+ }
+ }
+
+ if lenOk {
+ tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes
+ } else {
+ tree.LengthRunes = -1
+ }
+
+ return tree
+}
+
+func (self BTree) Len() int {
+ return self.LengthRunes
+}
+
+// todo?
+func (self BTree) Index(s string) (int, []int) {
+ return -1, nil
+}
+
+func (self BTree) Match(s string) bool {
+ inputLen := len(s)
+
+ // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part
+ // here we manipulating byte length for better optimizations
+ // but these checks still works, cause minLen of 1-rune string is 1 byte.
+ if self.LengthRunes != -1 && self.LengthRunes > inputLen {
+ return false
+ }
+
+ // try to cut unnecessary parts
+ // by knowledge of length of right and left part
+ var offset, limit int
+ if self.LeftLengthRunes >= 0 {
+ offset = self.LeftLengthRunes
+ }
+ if self.RightLengthRunes >= 0 {
+ limit = inputLen - self.RightLengthRunes
+ } else {
+ limit = inputLen
+ }
+
+ for offset < limit {
+ // search for matching part in substring
+ index, segments := self.Value.Index(s[offset:limit])
+ if index == -1 {
+ releaseSegments(segments)
+ return false
+ }
+
+ l := s[:offset+index]
+ var left bool
+ if self.Left != nil {
+ left = self.Left.Match(l)
+ } else {
+ left = l == ""
+ }
+
+ if left {
+ for i := len(segments) - 1; i >= 0; i-- {
+ length := segments[i]
+
+ var right bool
+ var r string
+ // if there is no string for the right branch
+ if inputLen <= offset+index+length {
+ r = ""
+ } else {
+ r = s[offset+index+length:]
+ }
+
+ if self.Right != nil {
+ right = self.Right.Match(r)
+ } else {
+ right = r == ""
+ }
+
+ if right {
+ releaseSegments(segments)
+ return true
+ }
+ }
+ }
+
+ _, step := utf8.DecodeRuneInString(s[offset+index:])
+ offset += index + step
+
+ releaseSegments(segments)
+ }
+
+ return false
+}
+
+func (self BTree) String() string {
+ const n string = ""
+ var l, r string
+ if self.Left == nil {
+ l = n
+ } else {
+ l = self.Left.String()
+ }
+ if self.Right == nil {
+ r = n
+ } else {
+ r = self.Right.String()
+ }
+
+ return fmt.Sprintf("%s]>", l, self.Value, r)
+}
diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go
new file mode 100644
index 00000000..0998e95b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/contains.go
@@ -0,0 +1,58 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Contains struct {
+ Needle string
+ Not bool
+}
+
+func NewContains(needle string, not bool) Contains {
+ return Contains{needle, not}
+}
+
+func (self Contains) Match(s string) bool {
+ return strings.Contains(s, self.Needle) != self.Not
+}
+
+func (self Contains) Index(s string) (int, []int) {
+ var offset int
+
+ idx := strings.Index(s, self.Needle)
+
+ if !self.Not {
+ if idx == -1 {
+ return -1, nil
+ }
+
+ offset = idx + len(self.Needle)
+ if len(s) <= offset {
+ return 0, []int{offset}
+ }
+ s = s[offset:]
+ } else if idx != -1 {
+ s = s[:idx]
+ }
+
+ segments := acquireSegments(len(s) + 1)
+ for i := range s {
+ segments = append(segments, offset+i)
+ }
+
+ return 0, append(segments, offset+len(s))
+}
+
+func (self Contains) Len() int {
+ return lenNo
+}
+
+func (self Contains) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+ return fmt.Sprintf("", not, self.Needle)
+}
diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go
new file mode 100644
index 00000000..7c968ee3
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/every_of.go
@@ -0,0 +1,99 @@
+package match
+
+import (
+ "fmt"
+)
+
+type EveryOf struct {
+ Matchers Matchers
+}
+
+func NewEveryOf(m ...Matcher) EveryOf {
+ return EveryOf{Matchers(m)}
+}
+
+func (self *EveryOf) Add(m Matcher) error {
+ self.Matchers = append(self.Matchers, m)
+ return nil
+}
+
+func (self EveryOf) Len() (l int) {
+ for _, m := range self.Matchers {
+ if ml := m.Len(); l > 0 {
+ l += ml
+ } else {
+ return -1
+ }
+ }
+
+ return
+}
+
+func (self EveryOf) Index(s string) (int, []int) {
+ var index int
+ var offset int
+
+ // make `in` with cap as len(s),
+ // cause it is the maximum size of output segments values
+ next := acquireSegments(len(s))
+ current := acquireSegments(len(s))
+
+ sub := s
+ for i, m := range self.Matchers {
+ idx, seg := m.Index(sub)
+ if idx == -1 {
+ releaseSegments(next)
+ releaseSegments(current)
+ return -1, nil
+ }
+
+ if i == 0 {
+ // we use copy here instead of `current = seg`
+ // cause seg is a slice from reusable buffer `in`
+ // and it could be overwritten in next iteration
+ current = append(current, seg...)
+ } else {
+ // clear the next
+ next = next[:0]
+
+ delta := index - (idx + offset)
+ for _, ex := range current {
+ for _, n := range seg {
+ if ex+delta == n {
+ next = append(next, n)
+ }
+ }
+ }
+
+ if len(next) == 0 {
+ releaseSegments(next)
+ releaseSegments(current)
+ return -1, nil
+ }
+
+ current = append(current[:0], next...)
+ }
+
+ index = idx + offset
+ sub = s[index:]
+ offset += idx
+ }
+
+ releaseSegments(next)
+
+ return index, current
+}
+
+func (self EveryOf) Match(s string) bool {
+ for _, m := range self.Matchers {
+ if !m.Match(s) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self EveryOf) String() string {
+ return fmt.Sprintf("", self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go
new file mode 100644
index 00000000..7fd763ec
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/list.go
@@ -0,0 +1,49 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+type List struct {
+ List []rune
+ Not bool
+}
+
+func NewList(list []rune, not bool) List {
+ return List{list, not}
+}
+
+func (self List) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ inList := runes.IndexRune(self.List, r) != -1
+ return inList == !self.Not
+}
+
+func (self List) Len() int {
+ return lenOne
+}
+
+func (self List) Index(s string) (int, []int) {
+ for i, r := range s {
+ if self.Not == (runes.IndexRune(self.List, r) == -1) {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self List) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+
+ return fmt.Sprintf("", not, string(self.List))
+}
diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go
new file mode 100644
index 00000000..f80e007f
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/match.go
@@ -0,0 +1,81 @@
+package match
+
+// todo common table of rune's length
+
+import (
+ "fmt"
+ "strings"
+)
+
+const lenOne = 1
+const lenZero = 0
+const lenNo = -1
+
+type Matcher interface {
+ Match(string) bool
+ Index(string) (int, []int)
+ Len() int
+ String() string
+}
+
+type Matchers []Matcher
+
+func (m Matchers) String() string {
+ var s []string
+ for _, matcher := range m {
+ s = append(s, fmt.Sprint(matcher))
+ }
+
+ return fmt.Sprintf("%s", strings.Join(s, ","))
+}
+
+// appendMerge merges and sorts given already SORTED and UNIQUE segments.
+func appendMerge(target, sub []int) []int {
+ lt, ls := len(target), len(sub)
+ out := make([]int, 0, lt+ls)
+
+ for x, y := 0, 0; x < lt || y < ls; {
+ if x >= lt {
+ out = append(out, sub[y:]...)
+ break
+ }
+
+ if y >= ls {
+ out = append(out, target[x:]...)
+ break
+ }
+
+ xValue := target[x]
+ yValue := sub[y]
+
+ switch {
+
+ case xValue == yValue:
+ out = append(out, xValue)
+ x++
+ y++
+
+ case xValue < yValue:
+ out = append(out, xValue)
+ x++
+
+ case yValue < xValue:
+ out = append(out, yValue)
+ y++
+
+ }
+ }
+
+ target = append(target[:0], out...)
+
+ return target
+}
+
+func reverseSegments(input []int) {
+ l := len(input)
+ m := l / 2
+
+ for i := 0; i < m; i++ {
+ input[i], input[l-i-1] = input[l-i-1], input[i]
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go
new file mode 100644
index 00000000..d72f69ef
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/max.go
@@ -0,0 +1,49 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Max struct {
+ Limit int
+}
+
+func NewMax(l int) Max {
+ return Max{l}
+}
+
+func (self Max) Match(s string) bool {
+ var l int
+ for range s {
+ l += 1
+ if l > self.Limit {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self Max) Index(s string) (int, []int) {
+ segments := acquireSegments(self.Limit + 1)
+ segments = append(segments, 0)
+ var count int
+ for i, r := range s {
+ count++
+ if count > self.Limit {
+ break
+ }
+ segments = append(segments, i+utf8.RuneLen(r))
+ }
+
+ return 0, segments
+}
+
+func (self Max) Len() int {
+ return lenNo
+}
+
+func (self Max) String() string {
+ return fmt.Sprintf("", self.Limit)
+}
diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go
new file mode 100644
index 00000000..db57ac8e
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/min.go
@@ -0,0 +1,57 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Min struct {
+ Limit int
+}
+
+func NewMin(l int) Min {
+ return Min{l}
+}
+
+func (self Min) Match(s string) bool {
+ var l int
+ for range s {
+ l += 1
+ if l >= self.Limit {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (self Min) Index(s string) (int, []int) {
+ var count int
+
+ c := len(s) - self.Limit + 1
+ if c <= 0 {
+ return -1, nil
+ }
+
+ segments := acquireSegments(c)
+ for i, r := range s {
+ count++
+ if count >= self.Limit {
+ segments = append(segments, i+utf8.RuneLen(r))
+ }
+ }
+
+ if len(segments) == 0 {
+ return -1, nil
+ }
+
+ return 0, segments
+}
+
+func (self Min) Len() int {
+ return lenNo
+}
+
+func (self Min) String() string {
+ return fmt.Sprintf("", self.Limit)
+}
diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go
new file mode 100644
index 00000000..0d4ecd36
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/nothing.go
@@ -0,0 +1,27 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Nothing struct{}
+
+func NewNothing() Nothing {
+ return Nothing{}
+}
+
+func (self Nothing) Match(s string) bool {
+ return len(s) == 0
+}
+
+func (self Nothing) Index(s string) (int, []int) {
+ return 0, segments0
+}
+
+func (self Nothing) Len() int {
+ return lenZero
+}
+
+func (self Nothing) String() string {
+ return fmt.Sprintf("")
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go
new file mode 100644
index 00000000..a7347250
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix.go
@@ -0,0 +1,50 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type Prefix struct {
+ Prefix string
+}
+
+func NewPrefix(p string) Prefix {
+ return Prefix{p}
+}
+
+func (self Prefix) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Prefix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ length := len(self.Prefix)
+ var sub string
+ if len(s) > idx+length {
+ sub = s[idx+length:]
+ } else {
+ sub = ""
+ }
+
+ segments := acquireSegments(len(sub) + 1)
+ segments = append(segments, length)
+ for i, r := range sub {
+ segments = append(segments, length+i+utf8.RuneLen(r))
+ }
+
+ return idx, segments
+}
+
+func (self Prefix) Len() int {
+ return lenNo
+}
+
+func (self Prefix) Match(s string) bool {
+ return strings.HasPrefix(s, self.Prefix)
+}
+
+func (self Prefix) String() string {
+ return fmt.Sprintf("", self.Prefix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go
new file mode 100644
index 00000000..8ee58fe1
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix_any.go
@@ -0,0 +1,55 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ sutil "github.com/gobwas/glob/util/strings"
+)
+
+type PrefixAny struct {
+ Prefix string
+ Separators []rune
+}
+
+func NewPrefixAny(s string, sep []rune) PrefixAny {
+ return PrefixAny{s, sep}
+}
+
+func (self PrefixAny) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Prefix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ n := len(self.Prefix)
+ sub := s[idx+n:]
+ i := sutil.IndexAnyRunes(sub, self.Separators)
+ if i > -1 {
+ sub = sub[:i]
+ }
+
+ seg := acquireSegments(len(sub) + 1)
+ seg = append(seg, n)
+ for i, r := range sub {
+ seg = append(seg, n+i+utf8.RuneLen(r))
+ }
+
+ return idx, seg
+}
+
+func (self PrefixAny) Len() int {
+ return lenNo
+}
+
+func (self PrefixAny) Match(s string) bool {
+ if !strings.HasPrefix(s, self.Prefix) {
+ return false
+ }
+ return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1
+}
+
+func (self PrefixAny) String() string {
+ return fmt.Sprintf("", self.Prefix, string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go
new file mode 100644
index 00000000..8208085a
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go
@@ -0,0 +1,62 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PrefixSuffix struct {
+ Prefix, Suffix string
+}
+
+func NewPrefixSuffix(p, s string) PrefixSuffix {
+ return PrefixSuffix{p, s}
+}
+
+func (self PrefixSuffix) Index(s string) (int, []int) {
+ prefixIdx := strings.Index(s, self.Prefix)
+ if prefixIdx == -1 {
+ return -1, nil
+ }
+
+ suffixLen := len(self.Suffix)
+ if suffixLen <= 0 {
+ return prefixIdx, []int{len(s) - prefixIdx}
+ }
+
+ if (len(s) - prefixIdx) <= 0 {
+ return -1, nil
+ }
+
+ segments := acquireSegments(len(s) - prefixIdx)
+ for sub := s[prefixIdx:]; ; {
+ suffixIdx := strings.LastIndex(sub, self.Suffix)
+ if suffixIdx == -1 {
+ break
+ }
+
+ segments = append(segments, suffixIdx+suffixLen)
+ sub = sub[:suffixIdx]
+ }
+
+ if len(segments) == 0 {
+ releaseSegments(segments)
+ return -1, nil
+ }
+
+ reverseSegments(segments)
+
+ return prefixIdx, segments
+}
+
+func (self PrefixSuffix) Len() int {
+ return lenNo
+}
+
+func (self PrefixSuffix) Match(s string) bool {
+ return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix)
+}
+
+func (self PrefixSuffix) String() string {
+ return fmt.Sprintf("", self.Prefix, self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go
new file mode 100644
index 00000000..ce30245a
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/range.go
@@ -0,0 +1,48 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Range struct {
+ Lo, Hi rune
+ Not bool
+}
+
+func NewRange(lo, hi rune, not bool) Range {
+ return Range{lo, hi, not}
+}
+
+func (self Range) Len() int {
+ return lenOne
+}
+
+func (self Range) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ inRange := r >= self.Lo && r <= self.Hi
+
+ return inRange == !self.Not
+}
+
+func (self Range) Index(s string) (int, []int) {
+ for i, r := range s {
+ if self.Not != (r >= self.Lo && r <= self.Hi) {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self Range) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+ return fmt.Sprintf("", not, string(self.Lo), string(self.Hi))
+}
diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go
new file mode 100644
index 00000000..4379042e
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/row.go
@@ -0,0 +1,77 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Row struct {
+ Matchers Matchers
+ RunesLength int
+ Segments []int
+}
+
+func NewRow(len int, m ...Matcher) Row {
+ return Row{
+ Matchers: Matchers(m),
+ RunesLength: len,
+ Segments: []int{len},
+ }
+}
+
+func (self Row) matchAll(s string) bool {
+ var idx int
+ for _, m := range self.Matchers {
+ length := m.Len()
+
+ var next, i int
+ for next = range s[idx:] {
+ i++
+ if i == length {
+ break
+ }
+ }
+
+ if i < length || !m.Match(s[idx:idx+next+1]) {
+ return false
+ }
+
+ idx += next + 1
+ }
+
+ return true
+}
+
+func (self Row) lenOk(s string) bool {
+ var i int
+ for range s {
+ i++
+ if i > self.RunesLength {
+ return false
+ }
+ }
+ return self.RunesLength == i
+}
+
+func (self Row) Match(s string) bool {
+ return self.lenOk(s) && self.matchAll(s)
+}
+
+func (self Row) Len() (l int) {
+ return self.RunesLength
+}
+
+func (self Row) Index(s string) (int, []int) {
+ for i := range s {
+ if len(s[i:]) < self.RunesLength {
+ break
+ }
+ if self.matchAll(s[i:]) {
+ return i, self.Segments
+ }
+ }
+ return -1, nil
+}
+
+func (self Row) String() string {
+ return fmt.Sprintf("", self.RunesLength, self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go
new file mode 100644
index 00000000..9ea6f309
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/segments.go
@@ -0,0 +1,91 @@
+package match
+
+import (
+ "sync"
+)
+
+type SomePool interface {
+ Get() []int
+ Put([]int)
+}
+
+var segmentsPools [1024]sync.Pool
+
+func toPowerOfTwo(v int) int {
+ v--
+ v |= v >> 1
+ v |= v >> 2
+ v |= v >> 4
+ v |= v >> 8
+ v |= v >> 16
+ v++
+
+ return v
+}
+
+const (
+ cacheFrom = 16
+ cacheToAndHigher = 1024
+ cacheFromIndex = 15
+ cacheToAndHigherIndex = 1023
+)
+
+var (
+ segments0 = []int{0}
+ segments1 = []int{1}
+ segments2 = []int{2}
+ segments3 = []int{3}
+ segments4 = []int{4}
+)
+
+var segmentsByRuneLength [5][]int = [5][]int{
+ 0: segments0,
+ 1: segments1,
+ 2: segments2,
+ 3: segments3,
+ 4: segments4,
+}
+
+func init() {
+ for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 {
+ func(i int) {
+ segmentsPools[i-1] = sync.Pool{New: func() interface{} {
+ return make([]int, 0, i)
+ }}
+ }(i)
+ }
+}
+
+func getTableIndex(c int) int {
+ p := toPowerOfTwo(c)
+ switch {
+ case p >= cacheToAndHigher:
+ return cacheToAndHigherIndex
+ case p <= cacheFrom:
+ return cacheFromIndex
+ default:
+ return p - 1
+ }
+}
+
+func acquireSegments(c int) []int {
+ // make []int with less capacity than cacheFrom
+ // is faster than acquiring it from pool
+ if c < cacheFrom {
+ return make([]int, 0, c)
+ }
+
+ return segmentsPools[getTableIndex(c)].Get().([]int)[:0]
+}
+
+func releaseSegments(s []int) {
+ c := cap(s)
+
+ // make []int with less capacity than cacheFrom
+ // is faster than acquiring it from pool
+ if c < cacheFrom {
+ return
+ }
+
+ segmentsPools[getTableIndex(c)].Put(s)
+}
diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go
new file mode 100644
index 00000000..ee6e3954
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/single.go
@@ -0,0 +1,43 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+// single represents ?
+type Single struct {
+ Separators []rune
+}
+
+func NewSingle(s []rune) Single {
+ return Single{s}
+}
+
+func (self Single) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ return runes.IndexRune(self.Separators, r) == -1
+}
+
+func (self Single) Len() int {
+ return lenOne
+}
+
+func (self Single) Index(s string) (int, []int) {
+ for i, r := range s {
+ if runes.IndexRune(self.Separators, r) == -1 {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self Single) String() string {
+ return fmt.Sprintf("", string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go
new file mode 100644
index 00000000..85bea8c6
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/suffix.go
@@ -0,0 +1,35 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Suffix struct {
+ Suffix string
+}
+
+func NewSuffix(s string) Suffix {
+ return Suffix{s}
+}
+
+func (self Suffix) Len() int {
+ return lenNo
+}
+
+func (self Suffix) Match(s string) bool {
+ return strings.HasSuffix(s, self.Suffix)
+}
+
+func (self Suffix) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Suffix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ return 0, []int{idx + len(self.Suffix)}
+}
+
+func (self Suffix) String() string {
+ return fmt.Sprintf("", self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go
new file mode 100644
index 00000000..c5106f81
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/suffix_any.go
@@ -0,0 +1,43 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+
+ sutil "github.com/gobwas/glob/util/strings"
+)
+
+type SuffixAny struct {
+ Suffix string
+ Separators []rune
+}
+
+func NewSuffixAny(s string, sep []rune) SuffixAny {
+ return SuffixAny{s, sep}
+}
+
+func (self SuffixAny) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Suffix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1
+
+ return i, []int{idx + len(self.Suffix) - i}
+}
+
+func (self SuffixAny) Len() int {
+ return lenNo
+}
+
+func (self SuffixAny) Match(s string) bool {
+ if !strings.HasSuffix(s, self.Suffix) {
+ return false
+ }
+ return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1
+}
+
+func (self SuffixAny) String() string {
+ return fmt.Sprintf("", string(self.Separators), self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go
new file mode 100644
index 00000000..3875950b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/super.go
@@ -0,0 +1,33 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Super struct{}
+
+func NewSuper() Super {
+ return Super{}
+}
+
+func (self Super) Match(s string) bool {
+ return true
+}
+
+func (self Super) Len() int {
+ return lenNo
+}
+
+func (self Super) Index(s string) (int, []int) {
+ segments := acquireSegments(len(s) + 1)
+ for i := range s {
+ segments = append(segments, i)
+ }
+ segments = append(segments, len(s))
+
+ return 0, segments
+}
+
+func (self Super) String() string {
+ return fmt.Sprintf("")
+}
diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go
new file mode 100644
index 00000000..0a17616d
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/text.go
@@ -0,0 +1,45 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// raw represents raw string to match
+type Text struct {
+ Str string
+ RunesLength int
+ BytesLength int
+ Segments []int
+}
+
+func NewText(s string) Text {
+ return Text{
+ Str: s,
+ RunesLength: utf8.RuneCountInString(s),
+ BytesLength: len(s),
+ Segments: []int{len(s)},
+ }
+}
+
+func (self Text) Match(s string) bool {
+ return self.Str == s
+}
+
+func (self Text) Len() int {
+ return self.RunesLength
+}
+
+func (self Text) Index(s string) (int, []int) {
+ index := strings.Index(s, self.Str)
+ if index == -1 {
+ return -1, nil
+ }
+
+ return index, self.Segments
+}
+
+func (self Text) String() string {
+ return fmt.Sprintf("", self.Str)
+}
diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md
new file mode 100644
index 00000000..f58144e7
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/readme.md
@@ -0,0 +1,148 @@
+# glob.[go](https://golang.org)
+
+[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url]
+
+> Go Globbing Library.
+
+## Install
+
+```shell
+ go get github.com/gobwas/glob
+```
+
+## Example
+
+```go
+
+package main
+
+import "github.com/gobwas/glob"
+
+func main() {
+ var g glob.Glob
+
+ // create simple glob
+ g = glob.MustCompile("*.github.com")
+ g.Match("api.github.com") // true
+
+ // quote meta characters and then create simple glob
+ g = glob.MustCompile(glob.QuoteMeta("*.github.com"))
+ g.Match("*.github.com") // true
+
+ // create new glob with set of delimiters as ["."]
+ g = glob.MustCompile("api.*.com", '.')
+ g.Match("api.github.com") // true
+ g.Match("api.gi.hub.com") // false
+
+ // create new glob with set of delimiters as ["."]
+ // but now with super wildcard
+ g = glob.MustCompile("api.**.com", '.')
+ g.Match("api.github.com") // true
+ g.Match("api.gi.hub.com") // true
+
+ // create glob with single symbol wildcard
+ g = glob.MustCompile("?at")
+ g.Match("cat") // true
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with single symbol wildcard and delimiters ['f']
+ g = glob.MustCompile("?at", 'f')
+ g.Match("cat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-list matchers
+ g = glob.MustCompile("[abc]at")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-list matchers
+ g = glob.MustCompile("[!abc]at")
+ g.Match("cat") // false
+ g.Match("bat") // false
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with character-range matchers
+ g = glob.MustCompile("[a-c]at")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-range matchers
+ g = glob.MustCompile("[!a-c]at")
+ g.Match("cat") // false
+ g.Match("bat") // false
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with pattern-alternatives list
+ g = glob.MustCompile("{cat,bat,[fr]at}")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // true
+ g.Match("rat") // true
+ g.Match("at") // false
+ g.Match("zat") // false
+}
+
+```
+
+## Performance
+
+This library is created for compile-once patterns. This means, that compilation could take time, but
+strings matching is done faster, than in case when always parsing template.
+
+If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower.
+
+Run `go test -bench=.` from source root to see the benchmarks:
+
+Pattern | Fixture | Match | Speed (ns/op)
+--------|---------|-------|--------------
+`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432
+`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199
+`https://*.google.*` | `https://account.google.com` | `true` | 96
+`https://*.google.*` | `https://google.com` | `false` | 66
+`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163
+`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197
+`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22
+`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24
+`abc*` | `abcdef` | `true` | 8.15
+`abc*` | `af` | `false` | 5.68
+`*def` | `abcdef` | `true` | 8.84
+`*def` | `af` | `false` | 5.74
+`ab*ef` | `abcdef` | `true` | 15.2
+`ab*ef` | `af` | `false` | 10.4
+
+The same things with `regexp` package:
+
+Pattern | Fixture | Match | Speed (ns/op)
+--------|---------|-------|--------------
+`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553
+`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383
+`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205
+`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767
+`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435
+`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674
+`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039
+`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272
+`^abc.*$` | `abcdef` | `true` | 237
+`^abc.*$` | `af` | `false` | 100
+`^.*def$` | `abcdef` | `true` | 464
+`^.*def$` | `af` | `false` | 265
+`^ab.*ef$` | `abcdef` | `true` | 375
+`^ab.*ef$` | `af` | `false` | 145
+
+[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/glob
+[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master
+[travis-url]: https://travis-ci.org/gobwas/glob
+
+## Syntax
+
+Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm),
+except that `**` is aka super-asterisk, that do not sensitive for separators.
\ No newline at end of file
diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go
new file mode 100644
index 00000000..3220a694
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go
@@ -0,0 +1,122 @@
+package ast
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type Node struct {
+ Parent *Node
+ Children []*Node
+ Value interface{}
+ Kind Kind
+}
+
+func NewNode(k Kind, v interface{}, ch ...*Node) *Node {
+ n := &Node{
+ Kind: k,
+ Value: v,
+ }
+ for _, c := range ch {
+ Insert(n, c)
+ }
+ return n
+}
+
+func (a *Node) Equal(b *Node) bool {
+ if a.Kind != b.Kind {
+ return false
+ }
+ if a.Value != b.Value {
+ return false
+ }
+ if len(a.Children) != len(b.Children) {
+ return false
+ }
+ for i, c := range a.Children {
+ if !c.Equal(b.Children[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Node) String() string {
+ var buf bytes.Buffer
+ buf.WriteString(a.Kind.String())
+ if a.Value != nil {
+ buf.WriteString(" =")
+ buf.WriteString(fmt.Sprintf("%v", a.Value))
+ }
+ if len(a.Children) > 0 {
+ buf.WriteString(" [")
+ for i, c := range a.Children {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(c.String())
+ }
+ buf.WriteString("]")
+ }
+ return buf.String()
+}
+
+func Insert(parent *Node, children ...*Node) {
+ parent.Children = append(parent.Children, children...)
+ for _, ch := range children {
+ ch.Parent = parent
+ }
+}
+
+type List struct {
+ Not bool
+ Chars string
+}
+
+type Range struct {
+ Not bool
+ Lo, Hi rune
+}
+
+type Text struct {
+ Text string
+}
+
+type Kind int
+
+const (
+ KindNothing Kind = iota
+ KindPattern
+ KindList
+ KindRange
+ KindText
+ KindAny
+ KindSuper
+ KindSingle
+ KindAnyOf
+)
+
+func (k Kind) String() string {
+ switch k {
+ case KindNothing:
+ return "Nothing"
+ case KindPattern:
+ return "Pattern"
+ case KindList:
+ return "List"
+ case KindRange:
+ return "Range"
+ case KindText:
+ return "Text"
+ case KindAny:
+ return "Any"
+ case KindSuper:
+ return "Super"
+ case KindSingle:
+ return "Single"
+ case KindAnyOf:
+ return "AnyOf"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go
new file mode 100644
index 00000000..429b4094
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go
@@ -0,0 +1,157 @@
+package ast
+
+import (
+ "errors"
+ "fmt"
+ "github.com/gobwas/glob/syntax/lexer"
+ "unicode/utf8"
+)
+
+type Lexer interface {
+ Next() lexer.Token
+}
+
+type parseFn func(*Node, Lexer) (parseFn, *Node, error)
+
+func Parse(lexer Lexer) (*Node, error) {
+ var parser parseFn
+
+ root := NewNode(KindPattern, nil)
+
+ var (
+ tree *Node
+ err error
+ )
+ for parser, tree = parserMain, root; parser != nil; {
+ parser, tree, err = parser(tree, lexer)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return root, nil
+}
+
+func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) {
+ for {
+ token := lex.Next()
+ switch token.Type {
+ case lexer.EOF:
+ return nil, tree, nil
+
+ case lexer.Error:
+ return nil, tree, errors.New(token.Raw)
+
+ case lexer.Text:
+ Insert(tree, NewNode(KindText, Text{token.Raw}))
+ return parserMain, tree, nil
+
+ case lexer.Any:
+ Insert(tree, NewNode(KindAny, nil))
+ return parserMain, tree, nil
+
+ case lexer.Super:
+ Insert(tree, NewNode(KindSuper, nil))
+ return parserMain, tree, nil
+
+ case lexer.Single:
+ Insert(tree, NewNode(KindSingle, nil))
+ return parserMain, tree, nil
+
+ case lexer.RangeOpen:
+ return parserRange, tree, nil
+
+ case lexer.TermsOpen:
+ a := NewNode(KindAnyOf, nil)
+ Insert(tree, a)
+
+ p := NewNode(KindPattern, nil)
+ Insert(a, p)
+
+ return parserMain, p, nil
+
+ case lexer.Separator:
+ p := NewNode(KindPattern, nil)
+ Insert(tree.Parent, p)
+
+ return parserMain, p, nil
+
+ case lexer.TermsClose:
+ return parserMain, tree.Parent.Parent, nil
+
+ default:
+ return nil, tree, fmt.Errorf("unexpected token: %s", token)
+ }
+ }
+ return nil, tree, fmt.Errorf("unknown error")
+}
+
+func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) {
+ var (
+ not bool
+ lo rune
+ hi rune
+ chars string
+ )
+ for {
+ token := lex.Next()
+ switch token.Type {
+ case lexer.EOF:
+ return nil, tree, errors.New("unexpected end")
+
+ case lexer.Error:
+ return nil, tree, errors.New(token.Raw)
+
+ case lexer.Not:
+ not = true
+
+ case lexer.RangeLo:
+ r, w := utf8.DecodeRuneInString(token.Raw)
+ if len(token.Raw) > w {
+ return nil, tree, fmt.Errorf("unexpected length of lo character")
+ }
+ lo = r
+
+ case lexer.RangeBetween:
+ //
+
+ case lexer.RangeHi:
+ r, w := utf8.DecodeRuneInString(token.Raw)
+ if len(token.Raw) > w {
+ return nil, tree, fmt.Errorf("unexpected length of lo character")
+ }
+
+ hi = r
+
+ if hi < lo {
+ return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo))
+ }
+
+ case lexer.Text:
+ chars = token.Raw
+
+ case lexer.RangeClose:
+ isRange := lo != 0 && hi != 0
+ isChars := chars != ""
+
+ if isChars == isRange {
+ return nil, tree, fmt.Errorf("could not parse range")
+ }
+
+ if isRange {
+ Insert(tree, NewNode(KindRange, Range{
+ Lo: lo,
+ Hi: hi,
+ Not: not,
+ }))
+ } else {
+ Insert(tree, NewNode(KindList, List{
+ Chars: chars,
+ Not: not,
+ }))
+ }
+
+ return parserMain, tree, nil
+ }
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
new file mode 100644
index 00000000..a1c8d196
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
@@ -0,0 +1,273 @@
+package lexer
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+const (
+ char_any = '*'
+ char_comma = ','
+ char_single = '?'
+ char_escape = '\\'
+ char_range_open = '['
+ char_range_close = ']'
+ char_terms_open = '{'
+ char_terms_close = '}'
+ char_range_not = '!'
+ char_range_between = '-'
+)
+
+var specials = []byte{
+ char_any,
+ char_single,
+ char_escape,
+ char_range_open,
+ char_range_close,
+ char_terms_open,
+ char_terms_close,
+}
+
+func Special(c byte) bool {
+ return bytes.IndexByte(specials, c) != -1
+}
+
+type tokens []Token
+
+func (i *tokens) shift() (ret Token) {
+ ret = (*i)[0]
+ copy(*i, (*i)[1:])
+ *i = (*i)[:len(*i)-1]
+ return
+}
+
+func (i *tokens) push(v Token) {
+ *i = append(*i, v)
+}
+
+func (i *tokens) empty() bool {
+ return len(*i) == 0
+}
+
+var eof rune = 0
+
+type lexer struct {
+ data string
+ pos int
+ err error
+
+ tokens tokens
+ termsLevel int
+
+ lastRune rune
+ lastRuneSize int
+ hasRune bool
+}
+
+func NewLexer(source string) *lexer {
+ l := &lexer{
+ data: source,
+ tokens: tokens(make([]Token, 0, 4)),
+ }
+ return l
+}
+
+func (l *lexer) Next() Token {
+ if l.err != nil {
+ return Token{Error, l.err.Error()}
+ }
+ if !l.tokens.empty() {
+ return l.tokens.shift()
+ }
+
+ l.fetchItem()
+ return l.Next()
+}
+
+func (l *lexer) peek() (r rune, w int) {
+ if l.pos == len(l.data) {
+ return eof, 0
+ }
+
+ r, w = utf8.DecodeRuneInString(l.data[l.pos:])
+ if r == utf8.RuneError {
+ l.errorf("could not read rune")
+ r = eof
+ w = 0
+ }
+
+ return
+}
+
+func (l *lexer) read() rune {
+ if l.hasRune {
+ l.hasRune = false
+ l.seek(l.lastRuneSize)
+ return l.lastRune
+ }
+
+ r, s := l.peek()
+ l.seek(s)
+
+ l.lastRune = r
+ l.lastRuneSize = s
+
+ return r
+}
+
+func (l *lexer) seek(w int) {
+ l.pos += w
+}
+
+func (l *lexer) unread() {
+ if l.hasRune {
+ l.errorf("could not unread rune")
+ return
+ }
+ l.seek(-l.lastRuneSize)
+ l.hasRune = true
+}
+
+func (l *lexer) errorf(f string, v ...interface{}) {
+ l.err = fmt.Errorf(f, v...)
+}
+
+func (l *lexer) inTerms() bool {
+ return l.termsLevel > 0
+}
+
+func (l *lexer) termsEnter() {
+ l.termsLevel++
+}
+
+func (l *lexer) termsLeave() {
+ l.termsLevel--
+}
+
+var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open}
+var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma)
+
+func (l *lexer) fetchItem() {
+ r := l.read()
+ switch {
+ case r == eof:
+ l.tokens.push(Token{EOF, ""})
+
+ case r == char_terms_open:
+ l.termsEnter()
+ l.tokens.push(Token{TermsOpen, string(r)})
+
+ case r == char_comma && l.inTerms():
+ l.tokens.push(Token{Separator, string(r)})
+
+ case r == char_terms_close && l.inTerms():
+ l.tokens.push(Token{TermsClose, string(r)})
+ l.termsLeave()
+
+ case r == char_range_open:
+ l.tokens.push(Token{RangeOpen, string(r)})
+ l.fetchRange()
+
+ case r == char_single:
+ l.tokens.push(Token{Single, string(r)})
+
+ case r == char_any:
+ if l.read() == char_any {
+ l.tokens.push(Token{Super, string(r) + string(r)})
+ } else {
+ l.unread()
+ l.tokens.push(Token{Any, string(r)})
+ }
+
+ default:
+ l.unread()
+
+ var breakers []rune
+ if l.inTerms() {
+ breakers = inTermsBreakers
+ } else {
+ breakers = inTextBreakers
+ }
+ l.fetchText(breakers)
+ }
+}
+
+func (l *lexer) fetchRange() {
+ var wantHi bool
+ var wantClose bool
+ var seenNot bool
+ for {
+ r := l.read()
+ if r == eof {
+ l.errorf("unexpected end of input")
+ return
+ }
+
+ if wantClose {
+ if r != char_range_close {
+ l.errorf("expected close range character")
+ } else {
+ l.tokens.push(Token{RangeClose, string(r)})
+ }
+ return
+ }
+
+ if wantHi {
+ l.tokens.push(Token{RangeHi, string(r)})
+ wantClose = true
+ continue
+ }
+
+ if !seenNot && r == char_range_not {
+ l.tokens.push(Token{Not, string(r)})
+ seenNot = true
+ continue
+ }
+
+ if n, w := l.peek(); n == char_range_between {
+ l.seek(w)
+ l.tokens.push(Token{RangeLo, string(r)})
+ l.tokens.push(Token{RangeBetween, string(n)})
+ wantHi = true
+ continue
+ }
+
+ l.unread() // unread first peek and fetch as text
+ l.fetchText([]rune{char_range_close})
+ wantClose = true
+ }
+}
+
+func (l *lexer) fetchText(breakers []rune) {
+ var data []rune
+ var escaped bool
+
+reading:
+ for {
+ r := l.read()
+ if r == eof {
+ break
+ }
+
+ if !escaped {
+ if r == char_escape {
+ escaped = true
+ continue
+ }
+
+ if runes.IndexRune(breakers, r) != -1 {
+ l.unread()
+ break reading
+ }
+ }
+
+ escaped = false
+ data = append(data, r)
+ }
+
+ if len(data) > 0 {
+ l.tokens.push(Token{Text, string(data)})
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go
new file mode 100644
index 00000000..2797c4e8
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go
@@ -0,0 +1,88 @@
+package lexer
+
+import "fmt"
+
+type TokenType int
+
+const (
+ EOF TokenType = iota
+ Error
+ Text
+ Char
+ Any
+ Super
+ Single
+ Not
+ Separator
+ RangeOpen
+ RangeClose
+ RangeLo
+ RangeHi
+ RangeBetween
+ TermsOpen
+ TermsClose
+)
+
+func (tt TokenType) String() string {
+ switch tt {
+ case EOF:
+ return "eof"
+
+ case Error:
+ return "error"
+
+ case Text:
+ return "text"
+
+ case Char:
+ return "char"
+
+ case Any:
+ return "any"
+
+ case Super:
+ return "super"
+
+ case Single:
+ return "single"
+
+ case Not:
+ return "not"
+
+ case Separator:
+ return "separator"
+
+ case RangeOpen:
+ return "range_open"
+
+ case RangeClose:
+ return "range_close"
+
+ case RangeLo:
+ return "range_lo"
+
+ case RangeHi:
+ return "range_hi"
+
+ case RangeBetween:
+ return "range_between"
+
+ case TermsOpen:
+ return "terms_open"
+
+ case TermsClose:
+ return "terms_close"
+
+ default:
+ return "undef"
+ }
+}
+
+type Token struct {
+ Type TokenType
+ Raw string
+}
+
+func (t Token) String() string {
+ return fmt.Sprintf("%v<%q>", t.Type, t.Raw)
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go
new file mode 100644
index 00000000..1d168b14
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/syntax.go
@@ -0,0 +1,14 @@
+package syntax
+
+import (
+ "github.com/gobwas/glob/syntax/ast"
+ "github.com/gobwas/glob/syntax/lexer"
+)
+
+func Parse(s string) (*ast.Node, error) {
+ return ast.Parse(lexer.NewLexer(s))
+}
+
+func Special(b byte) bool {
+ return lexer.Special(b)
+}
diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go
new file mode 100644
index 00000000..a7235564
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/util/runes/runes.go
@@ -0,0 +1,154 @@
+package runes
+
+func Index(s, needle []rune) int {
+ ls, ln := len(s), len(needle)
+
+ switch {
+ case ln == 0:
+ return 0
+ case ln == 1:
+ return IndexRune(s, needle[0])
+ case ln == ls:
+ if Equal(s, needle) {
+ return 0
+ }
+ return -1
+ case ln > ls:
+ return -1
+ }
+
+head:
+ for i := 0; i < ls && ls-i >= ln; i++ {
+ for y := 0; y < ln; y++ {
+ if s[i+y] != needle[y] {
+ continue head
+ }
+ }
+
+ return i
+ }
+
+ return -1
+}
+
+func LastIndex(s, needle []rune) int {
+ ls, ln := len(s), len(needle)
+
+ switch {
+ case ln == 0:
+ if ls == 0 {
+ return 0
+ }
+ return ls
+ case ln == 1:
+ return IndexLastRune(s, needle[0])
+ case ln == ls:
+ if Equal(s, needle) {
+ return 0
+ }
+ return -1
+ case ln > ls:
+ return -1
+ }
+
+head:
+ for i := ls - 1; i >= 0 && i >= ln; i-- {
+ for y := ln - 1; y >= 0; y-- {
+ if s[i-(ln-y-1)] != needle[y] {
+ continue head
+ }
+ }
+
+ return i - ln + 1
+ }
+
+ return -1
+}
+
+// IndexAny returns the index of the first instance of any Unicode code point
+// from chars in s, or -1 if no Unicode code point from chars is present in s.
+func IndexAny(s, chars []rune) int {
+ if len(chars) > 0 {
+ for i, c := range s {
+ for _, m := range chars {
+ if c == m {
+ return i
+ }
+ }
+ }
+ }
+ return -1
+}
+
+func Contains(s, needle []rune) bool {
+ return Index(s, needle) >= 0
+}
+
+func Max(s []rune) (max rune) {
+ for _, r := range s {
+ if r > max {
+ max = r
+ }
+ }
+
+ return
+}
+
+func Min(s []rune) rune {
+ min := rune(-1)
+ for _, r := range s {
+ if min == -1 {
+ min = r
+ continue
+ }
+
+ if r < min {
+ min = r
+ }
+ }
+
+ return min
+}
+
+func IndexRune(s []rune, r rune) int {
+ for i, c := range s {
+ if c == r {
+ return i
+ }
+ }
+ return -1
+}
+
+func IndexLastRune(s []rune, r rune) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == r {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func Equal(a, b []rune) bool {
+ if len(a) == len(b) {
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ return false
+}
+
+// HasPrefix tests whether the string s begins with prefix.
+func HasPrefix(s, prefix []rune) bool {
+ return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
+}
+
+// HasSuffix tests whether the string s ends with suffix.
+func HasSuffix(s, suffix []rune) bool {
+ return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go
new file mode 100644
index 00000000..e8ee1920
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/util/strings/strings.go
@@ -0,0 +1,39 @@
+package strings
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+func IndexAnyRunes(s string, rs []rune) int {
+ for _, r := range rs {
+ if i := strings.IndexRune(s, r); i != -1 {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func LastIndexAnyRunes(s string, rs []rune) int {
+ for _, r := range rs {
+ i := -1
+ if 0 <= r && r < utf8.RuneSelf {
+ i = strings.LastIndexByte(s, byte(r))
+ } else {
+ sub := s
+ for len(sub) > 0 {
+ j := strings.IndexRune(s, r)
+ if j == -1 {
+ break
+ }
+ i = j
+ sub = sub[i+1:]
+ }
+ }
+ if i != -1 {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/godbus/dbus/v5/.travis.yml b/vendor/github.com/godbus/dbus/v5/.travis.yml
new file mode 100644
index 00000000..dd676720
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/.travis.yml
@@ -0,0 +1,50 @@
+dist: bionic
+language: go
+go_import_path: github.com/godbus/dbus
+
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+addons:
+ apt:
+ packages:
+ - dbus
+ - dbus-x11
+
+before_install:
+ - export GO111MODULE=on
+
+script:
+ - go test -v -race -mod=readonly ./... # Run all the tests with the race detector enabled
+ - go vet ./... # go vet is the official Go static analyzer
+
+jobs:
+ include:
+ # The build matrix doesn't cover build stages, so manually expand
+ # the jobs with anchors
+ - &multiarch
+ stage: "Multiarch Test"
+ go: 1.11.x
+ env: TARGETS="386 arm arm64 ppc64le"
+ before_install:
+ - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
+ script:
+ - |
+ set -e
+ for target in $TARGETS; do
+ printf "\e[1mRunning test suite under ${target}.\e[0m\n"
+ GOARCH="$target" go test -v ./...
+ printf "\n\n"
+ done
+ - <<: *multiarch
+ go: 1.12.x
+ - <<: *multiarch
+ go: 1.13.x
diff --git a/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md
new file mode 100644
index 00000000..c88f9b2b
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md
@@ -0,0 +1,50 @@
+# How to Contribute
+
+## Getting Started
+
+- Fork the repository on GitHub
+- Read the [README](README.markdown) for build and test instructions
+- Play with the project, submit bugs, submit patches!
+
+## Contribution Flow
+
+This is a rough outline of what a contributor's workflow looks like:
+
+- Create a topic branch from where you want to base your work (usually master).
+- Make commits of logical units.
+- Make sure your commit messages are in the proper format (see below).
+- Push your changes to a topic branch in your fork of the repository.
+- Make sure the tests pass, and add any new tests as appropriate.
+- Submit a pull request to the original repository.
+
+Thanks for your contributions!
+
+### Format of the Commit Message
+
+We follow a rough convention for commit messages that is designed to answer two
+questions: what changed and why. The subject line should feature the what and
+the body of the commit should describe the why.
+
+```
+scripts: add the test-cluster command
+
+this uses tmux to setup a test cluster that you can easily kill and
+start for debugging.
+
+Fixes #38
+```
+
+The format can be described more formally as follows:
+
+```
+:
+
+
+
+
+```
+
+The first line is the subject and should be no longer than 70 characters, the
+second line is always blank, and other lines should be wrapped at 80 characters.
+This allows the message to be easier to read on GitHub as well as in various
+git tools.
diff --git a/vendor/github.com/godbus/dbus/v5/LICENSE b/vendor/github.com/godbus/dbus/v5/LICENSE
new file mode 100644
index 00000000..670d88fc
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013, Georg Reinke (), Google
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/godbus/dbus/v5/MAINTAINERS b/vendor/github.com/godbus/dbus/v5/MAINTAINERS
new file mode 100644
index 00000000..27618c9c
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/MAINTAINERS
@@ -0,0 +1,3 @@
+Brandon Philips (@philips)
+Brian Waldon (@bcwaldon)
+John Southworth (@jsouthworth)
diff --git a/vendor/github.com/godbus/dbus/v5/README.markdown b/vendor/github.com/godbus/dbus/v5/README.markdown
new file mode 100644
index 00000000..fd296487
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/README.markdown
@@ -0,0 +1,44 @@
+[](https://travis-ci.org/godbus/dbus)
+
+dbus
+----
+
+dbus is a simple library that implements native Go client bindings for the
+D-Bus message bus system.
+
+### Features
+
+* Complete native implementation of the D-Bus message protocol
+* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
+* Subpackages that help with the introspection / property interfaces
+
+### Installation
+
+This packages requires Go 1.7. If you installed it and set up your GOPATH, just run:
+
+```
+go get github.com/godbus/dbus
+```
+
+If you want to use the subpackages, you can install them the same way.
+
+### Usage
+
+The complete package documentation and some simple examples are available at
+[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
+[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
+gives a short overview over the basic usage.
+
+#### Projects using godbus
+- [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library.
+- [go-bluetooth](https://github.com/muka/go-bluetooth) provides a bluetooth client over bluez dbus API.
+
+Please note that the API is considered unstable for now and may change without
+further notice.
+
+### License
+
+go.dbus is available under the Simplified BSD License; see LICENSE for the full
+text.
+
+Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.
diff --git a/vendor/github.com/godbus/dbus/v5/auth.go b/vendor/github.com/godbus/dbus/v5/auth.go
new file mode 100644
index 00000000..31abac62
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/auth.go
@@ -0,0 +1,252 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+)
+
+// AuthStatus represents the Status of an authentication mechanism.
+type AuthStatus byte
+
+const (
+ // AuthOk signals that authentication is finished; the next command
+ // from the server should be an OK.
+ AuthOk AuthStatus = iota
+
+ // AuthContinue signals that additional data is needed; the next command
+ // from the server should be a DATA.
+ AuthContinue
+
+ // AuthError signals an error; the server sent invalid data or some
+ // other unexpected thing happened and the current authentication
+ // process should be aborted.
+ AuthError
+)
+
+type authState byte
+
+const (
+ waitingForData authState = iota
+ waitingForOk
+ waitingForReject
+)
+
+// Auth defines the behaviour of an authentication mechanism.
+type Auth interface {
+ // Return the name of the mechnism, the argument to the first AUTH command
+ // and the next status.
+ FirstData() (name, resp []byte, status AuthStatus)
+
+ // Process the given DATA command, and return the argument to the DATA
+ // command and the next status. If len(resp) == 0, no DATA command is sent.
+ HandleData(data []byte) (resp []byte, status AuthStatus)
+}
+
+// Auth authenticates the connection, trying the given list of authentication
+// mechanisms (in that order). If nil is passed, the EXTERNAL and
+// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
+// connections, this method must be called before sending any messages to the
+// bus. Auth must not be called on shared connections.
+func (conn *Conn) Auth(methods []Auth) error {
+ if methods == nil {
+ uid := strconv.Itoa(os.Getuid())
+ methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
+ }
+ in := bufio.NewReader(conn.transport)
+ err := conn.transport.SendNullByte()
+ if err != nil {
+ return err
+ }
+ err = authWriteLine(conn.transport, []byte("AUTH"))
+ if err != nil {
+ return err
+ }
+ s, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
+ return errors.New("dbus: authentication protocol error")
+ }
+ s = s[1:]
+ for _, v := range s {
+ for _, m := range methods {
+ if name, data, status := m.FirstData(); bytes.Equal(v, name) {
+ var ok bool
+ err = authWriteLine(conn.transport, []byte("AUTH"), v, data)
+ if err != nil {
+ return err
+ }
+ switch status {
+ case AuthOk:
+ err, ok = conn.tryAuth(m, waitingForOk, in)
+ case AuthContinue:
+ err, ok = conn.tryAuth(m, waitingForData, in)
+ default:
+ panic("dbus: invalid authentication status")
+ }
+ if err != nil {
+ return err
+ }
+ if ok {
+ if conn.transport.SupportsUnixFDs() {
+ err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
+ if err != nil {
+ return err
+ }
+ line, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
+ conn.EnableUnixFDs()
+ conn.unixFD = true
+ case bytes.Equal(line[0], []byte("ERROR")):
+ default:
+ return errors.New("dbus: authentication protocol error")
+ }
+ }
+ err = authWriteLine(conn.transport, []byte("BEGIN"))
+ if err != nil {
+ return err
+ }
+ go conn.inWorker()
+ return nil
+ }
+ }
+ }
+ }
+ return errors.New("dbus: authentication failed")
+}
+
+// tryAuth tries to authenticate with m as the mechanism, using state as the
+// initial authState and in for reading input. It returns (nil, true) on
+// success, (nil, false) on a REJECTED and (someErr, false) if some other
+// error occurred.
+func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
+ for {
+ s, err := authReadLine(in)
+ if err != nil {
+ return err, false
+ }
+ switch {
+ case state == waitingForData && string(s[0]) == "DATA":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ continue
+ }
+ data, status := m.HandleData(s[1])
+ switch status {
+ case AuthOk, AuthContinue:
+ if len(data) != 0 {
+ err = authWriteLine(conn.transport, []byte("DATA"), data)
+ if err != nil {
+ return err, false
+ }
+ }
+ if status == AuthOk {
+ state = waitingForOk
+ }
+ case AuthError:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ }
+ case state == waitingForData && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForData && string(s[0]) == "ERROR":
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForData && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForData:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForOk && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForOk && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForOk && (string(s[0]) == "DATA" ||
+ string(s[0]) == "ERROR"):
+
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForOk:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForReject && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForReject:
+ return errors.New("dbus: authentication protocol error"), false
+ default:
+ panic("dbus: invalid auth state")
+ }
+ }
+}
+
+// authReadLine reads a line and separates it into its fields.
+func authReadLine(in *bufio.Reader) ([][]byte, error) {
+ data, err := in.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+ data = bytes.TrimSuffix(data, []byte("\r\n"))
+ return bytes.Split(data, []byte{' '}), nil
+}
+
+// authWriteLine writes the given line in the authentication protocol format
+// (elements of data separated by a " " and terminated by "\r\n").
+func authWriteLine(out io.Writer, data ...[]byte) error {
+ buf := make([]byte, 0)
+ for i, v := range data {
+ buf = append(buf, v...)
+ if i != len(data)-1 {
+ buf = append(buf, ' ')
+ }
+ }
+ buf = append(buf, '\r')
+ buf = append(buf, '\n')
+ n, err := out.Write(buf)
+ if err != nil {
+ return err
+ }
+ if n != len(buf) {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/v5/auth_anonymous.go b/vendor/github.com/godbus/dbus/v5/auth_anonymous.go
new file mode 100644
index 00000000..75f3ad34
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/auth_anonymous.go
@@ -0,0 +1,16 @@
+package dbus
+
+// AuthAnonymous returns an Auth that uses the ANONYMOUS mechanism.
+func AuthAnonymous() Auth {
+ return &authAnonymous{}
+}
+
+type authAnonymous struct{}
+
+func (a *authAnonymous) FirstData() (name, resp []byte, status AuthStatus) {
+ return []byte("ANONYMOUS"), nil, AuthOk
+}
+
+func (a *authAnonymous) HandleData(data []byte) (resp []byte, status AuthStatus) {
+ return nil, AuthError
+}
diff --git a/vendor/github.com/godbus/dbus/v5/auth_external.go b/vendor/github.com/godbus/dbus/v5/auth_external.go
new file mode 100644
index 00000000..7e376d3e
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/auth_external.go
@@ -0,0 +1,26 @@
+package dbus
+
+import (
+ "encoding/hex"
+)
+
+// AuthExternal returns an Auth that authenticates as the given user with the
+// EXTERNAL mechanism.
+func AuthExternal(user string) Auth {
+ return authExternal{user}
+}
+
+// AuthExternal implements the EXTERNAL authentication mechanism.
+type authExternal struct {
+ user string
+}
+
+func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("EXTERNAL"), b, AuthOk
+}
+
+func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
+ return nil, AuthError
+}
diff --git a/vendor/github.com/godbus/dbus/v5/auth_sha1.go b/vendor/github.com/godbus/dbus/v5/auth_sha1.go
new file mode 100644
index 00000000..80286700
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/auth_sha1.go
@@ -0,0 +1,102 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/hex"
+ "os"
+)
+
+// AuthCookieSha1 returns an Auth that authenticates as the given user with the
+// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
+// directory of the user.
+func AuthCookieSha1(user, home string) Auth {
+ return authCookieSha1{user, home}
+}
+
+type authCookieSha1 struct {
+ user, home string
+}
+
+func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
+}
+
+func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
+ challenge := make([]byte, len(data)/2)
+ _, err := hex.Decode(challenge, data)
+ if err != nil {
+ return nil, AuthError
+ }
+ b := bytes.Split(challenge, []byte{' '})
+ if len(b) != 3 {
+ return nil, AuthError
+ }
+ context := b[0]
+ id := b[1]
+ svchallenge := b[2]
+ cookie := a.getCookie(context, id)
+ if cookie == nil {
+ return nil, AuthError
+ }
+ clchallenge := a.generateChallenge()
+ if clchallenge == nil {
+ return nil, AuthError
+ }
+ hash := sha1.New()
+ hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
+ hexhash := make([]byte, 2*hash.Size())
+ hex.Encode(hexhash, hash.Sum(nil))
+ data = append(clchallenge, ' ')
+ data = append(data, hexhash...)
+ resp := make([]byte, 2*len(data))
+ hex.Encode(resp, data)
+ return resp, AuthOk
+}
+
+// getCookie searches for the cookie identified by id in context and returns
+// the cookie content or nil. (Since HandleData can't return a specific error,
+// but only whether an error occurred, this function also doesn't bother to
+// return an error.)
+func (a authCookieSha1) getCookie(context, id []byte) []byte {
+ file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
+ if err != nil {
+ return nil
+ }
+ defer file.Close()
+ rd := bufio.NewReader(file)
+ for {
+ line, err := rd.ReadBytes('\n')
+ if err != nil {
+ return nil
+ }
+ line = line[:len(line)-1]
+ b := bytes.Split(line, []byte{' '})
+ if len(b) != 3 {
+ return nil
+ }
+ if bytes.Equal(b[0], id) {
+ return b[2]
+ }
+ }
+}
+
+// generateChallenge returns a random, hex-encoded challenge, or nil on error
+// (see above).
+func (a authCookieSha1) generateChallenge() []byte {
+ b := make([]byte, 16)
+ n, err := rand.Read(b)
+ if err != nil {
+ return nil
+ }
+ if n != 16 {
+ return nil
+ }
+ enc := make([]byte, 32)
+ hex.Encode(enc, b)
+ return enc
+}
diff --git a/vendor/github.com/godbus/dbus/v5/call.go b/vendor/github.com/godbus/dbus/v5/call.go
new file mode 100644
index 00000000..2cb18901
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/call.go
@@ -0,0 +1,60 @@
+package dbus
+
+import (
+ "context"
+ "errors"
+)
+
+var errSignature = errors.New("dbus: mismatched signature")
+
+// Call represents a pending or completed method call.
+type Call struct {
+ Destination string
+ Path ObjectPath
+ Method string
+ Args []interface{}
+
+ // Strobes when the call is complete.
+ Done chan *Call
+
+ // After completion, the error status. If this is non-nil, it may be an
+ // error message from the peer (with Error as its type) or some other error.
+ Err error
+
+ // Holds the response once the call is done.
+ Body []interface{}
+
+ // tracks context and canceler
+ ctx context.Context
+ ctxCanceler context.CancelFunc
+}
+
+func (c *Call) Context() context.Context {
+ if c.ctx == nil {
+ return context.Background()
+ }
+
+ return c.ctx
+}
+
+func (c *Call) ContextCancel() {
+ if c.ctxCanceler != nil {
+ c.ctxCanceler()
+ }
+}
+
+// Store stores the body of the reply into the provided pointers. It returns
+// an error if the signatures of the body and retvalues don't match, or if
+// the error status is not nil.
+func (c *Call) Store(retvalues ...interface{}) error {
+ if c.Err != nil {
+ return c.Err
+ }
+
+ return Store(c.Body, retvalues...)
+}
+
+func (c *Call) done() {
+ c.Done <- c
+ c.ContextCancel()
+}
diff --git a/vendor/github.com/godbus/dbus/v5/conn.go b/vendor/github.com/godbus/dbus/v5/conn.go
new file mode 100644
index 00000000..b55bc99c
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/conn.go
@@ -0,0 +1,912 @@
+package dbus
+
+import (
+ "context"
+ "errors"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ systemBus *Conn
+ systemBusLck sync.Mutex
+ sessionBus *Conn
+ sessionBusLck sync.Mutex
+)
+
+// ErrClosed is the error returned by calls on a closed connection.
+var ErrClosed = errors.New("dbus: connection closed by user")
+
+// Conn represents a connection to a message bus (usually, the system or
+// session bus).
+//
+// Connections are either shared or private. Shared connections
+// are shared between calls to the functions that return them. As a result,
+// the methods Close, Auth and Hello must not be called on them.
+//
+// Multiple goroutines may invoke methods on a connection simultaneously.
+type Conn struct {
+ transport
+
+ ctx context.Context
+ cancelCtx context.CancelFunc
+
+ closeOnce sync.Once
+ closeErr error
+
+ busObj BusObject
+ unixFD bool
+ uuid string
+
+ handler Handler
+ signalHandler SignalHandler
+ serialGen SerialGenerator
+ inInt Interceptor
+ outInt Interceptor
+
+ names *nameTracker
+ calls *callTracker
+ outHandler *outputHandler
+
+ eavesdropped chan<- *Message
+ eavesdroppedLck sync.Mutex
+}
+
+// SessionBus returns a shared connection to the session bus, connecting to it
+// if not already done.
+func SessionBus() (conn *Conn, err error) {
+ sessionBusLck.Lock()
+ defer sessionBusLck.Unlock()
+ if sessionBus != nil {
+ return sessionBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ sessionBus = conn
+ }
+ }()
+ conn, err = SessionBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+func getSessionBusAddress() (string, error) {
+ if address := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); address != "" && address != "autolaunch:" {
+ return address, nil
+
+ } else if address := tryDiscoverDbusSessionBusAddress(); address != "" {
+ os.Setenv("DBUS_SESSION_BUS_ADDRESS", address)
+ return address, nil
+ }
+ return getSessionBusPlatformAddress()
+}
+
+// SessionBusPrivate returns a new private connection to the session bus.
+func SessionBusPrivate(opts ...ConnOption) (*Conn, error) {
+ address, err := getSessionBusAddress()
+ if err != nil {
+ return nil, err
+ }
+
+ return Dial(address, opts...)
+}
+
+// SessionBusPrivate returns a new private connection to the session bus.
+//
+// Deprecated: use SessionBusPrivate with options instead.
+func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ return SessionBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler))
+}
+
+// SystemBus returns a shared connection to the system bus, connecting to it if
+// not already done.
+func SystemBus() (conn *Conn, err error) {
+ systemBusLck.Lock()
+ defer systemBusLck.Unlock()
+ if systemBus != nil {
+ return systemBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ systemBus = conn
+ }
+ }()
+ conn, err = SystemBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SystemBusPrivate returns a new private connection to the system bus.
+// Note: this connection is not ready to use. One must perform Auth and Hello
+// on the connection before it is useable.
+func SystemBusPrivate(opts ...ConnOption) (*Conn, error) {
+ return Dial(getSystemBusPlatformAddress(), opts...)
+}
+
+// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers.
+//
+// Deprecated: use SystemBusPrivate with options instead.
+func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ return SystemBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler))
+}
+
+// Dial establishes a new private connection to the message bus specified by address.
+func Dial(address string, opts ...ConnOption) (*Conn, error) {
+ tr, err := getTransport(address)
+ if err != nil {
+ return nil, err
+ }
+ return newConn(tr, opts...)
+}
+
+// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers.
+//
+// Deprecated: use Dial with options instead.
+func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ return Dial(address, WithSignalHandler(signalHandler))
+}
+
+// ConnOption is a connection option.
+type ConnOption func(conn *Conn) error
+
+// WithHandler overrides the default handler.
+func WithHandler(handler Handler) ConnOption {
+ return func(conn *Conn) error {
+ conn.handler = handler
+ return nil
+ }
+}
+
+// WithSignalHandler overrides the default signal handler.
+func WithSignalHandler(handler SignalHandler) ConnOption {
+ return func(conn *Conn) error {
+ conn.signalHandler = handler
+ return nil
+ }
+}
+
+// WithSerialGenerator overrides the default signals generator.
+func WithSerialGenerator(gen SerialGenerator) ConnOption {
+ return func(conn *Conn) error {
+ conn.serialGen = gen
+ return nil
+ }
+}
+
+// Interceptor intercepts incoming and outgoing messages.
+type Interceptor func(msg *Message)
+
+// WithIncomingInterceptor sets the given interceptor for incoming messages.
+func WithIncomingInterceptor(interceptor Interceptor) ConnOption {
+ return func(conn *Conn) error {
+ conn.inInt = interceptor
+ return nil
+ }
+}
+
+// WithOutgoingInterceptor sets the given interceptor for outgoing messages.
+func WithOutgoingInterceptor(interceptor Interceptor) ConnOption {
+ return func(conn *Conn) error {
+ conn.outInt = interceptor
+ return nil
+ }
+}
+
+// WithContext overrides the default context for the connection.
+func WithContext(ctx context.Context) ConnOption {
+ return func(conn *Conn) error {
+ conn.ctx = ctx
+ return nil
+ }
+}
+
+// NewConn creates a new private *Conn from an already established connection.
+func NewConn(conn io.ReadWriteCloser, opts ...ConnOption) (*Conn, error) {
+ return newConn(genericTransport{conn}, opts...)
+}
+
+// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers.
+//
+// Deprecated: use NewConn with options instead.
+func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) {
+ return NewConn(genericTransport{conn}, WithHandler(handler), WithSignalHandler(signalHandler))
+}
+
+// newConn creates a new *Conn from a transport.
+func newConn(tr transport, opts ...ConnOption) (*Conn, error) {
+ conn := new(Conn)
+ conn.transport = tr
+ for _, opt := range opts {
+ if err := opt(conn); err != nil {
+ return nil, err
+ }
+ }
+ if conn.ctx == nil {
+ conn.ctx = context.Background()
+ }
+ conn.ctx, conn.cancelCtx = context.WithCancel(conn.ctx)
+ go func() {
+ <-conn.ctx.Done()
+ conn.Close()
+ }()
+
+ conn.calls = newCallTracker()
+ if conn.handler == nil {
+ conn.handler = NewDefaultHandler()
+ }
+ if conn.signalHandler == nil {
+ conn.signalHandler = NewDefaultSignalHandler()
+ }
+ if conn.serialGen == nil {
+ conn.serialGen = newSerialGenerator()
+ }
+ conn.outHandler = &outputHandler{conn: conn}
+ conn.names = newNameTracker()
+ conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
+ return conn, nil
+}
+
+// BusObject returns the object owned by the bus daemon which handles
+// administrative requests.
+func (conn *Conn) BusObject() BusObject {
+ return conn.busObj
+}
+
+// Close closes the connection. Any blocked operations will return with errors
+// and the channels passed to Eavesdrop and Signal are closed. This method must
+// not be called on shared connections.
+func (conn *Conn) Close() error {
+ conn.closeOnce.Do(func() {
+ conn.outHandler.close()
+ if term, ok := conn.signalHandler.(Terminator); ok {
+ term.Terminate()
+ }
+
+ if term, ok := conn.handler.(Terminator); ok {
+ term.Terminate()
+ }
+
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ close(conn.eavesdropped)
+ }
+ conn.eavesdroppedLck.Unlock()
+
+ conn.cancelCtx()
+
+ conn.closeErr = conn.transport.Close()
+ })
+ return conn.closeErr
+}
+
+// Context returns the context associated with the connection. The
+// context will be cancelled when the connection is closed.
+func (conn *Conn) Context() context.Context {
+ return conn.ctx
+}
+
+// Eavesdrop causes conn to send all incoming messages to the given channel
+// without further processing. Method replies, errors and signals will not be
+// sent to the appropriate channels and method calls will not be handled. If nil
+// is passed, the normal behaviour is restored.
+//
+// The caller has to make sure that ch is sufficiently buffered;
+// if a message arrives when a write to ch is not possible, the message is
+// discarded.
+func (conn *Conn) Eavesdrop(ch chan<- *Message) {
+ conn.eavesdroppedLck.Lock()
+ conn.eavesdropped = ch
+ conn.eavesdroppedLck.Unlock()
+}
+
+// getSerial returns an unused serial.
+func (conn *Conn) getSerial() uint32 {
+ return conn.serialGen.GetSerial()
+}
+
+// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
+// called after authentication, but before sending any other messages to the
+// bus. Hello must not be called for shared connections.
+func (conn *Conn) Hello() error {
+ var s string
+ err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
+ if err != nil {
+ return err
+ }
+ conn.names.acquireUniqueConnectionName(s)
+ return nil
+}
+
+// inWorker runs in an own goroutine, reading incoming messages from the
+// transport and dispatching them appropiately.
+func (conn *Conn) inWorker() {
+ for {
+ msg, err := conn.ReadMessage()
+ if err != nil {
+ if _, ok := err.(InvalidMessageError); !ok {
+ // Some read error occurred (usually EOF); we can't really do
+ // anything but to shut down all stuff and returns errors to all
+ // pending replies.
+ conn.Close()
+ conn.calls.finalizeAllWithError(err)
+ return
+ }
+ // invalid messages are ignored
+ continue
+ }
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ select {
+ case conn.eavesdropped <- msg:
+ default:
+ }
+ conn.eavesdroppedLck.Unlock()
+ continue
+ }
+ conn.eavesdroppedLck.Unlock()
+ dest, _ := msg.Headers[FieldDestination].value.(string)
+ found := dest == "" ||
+ !conn.names.uniqueNameIsKnown() ||
+ conn.names.isKnownName(dest)
+ if !found {
+ // Eavesdropped a message, but no channel for it is registered.
+ // Ignore it.
+ continue
+ }
+
+ if conn.inInt != nil {
+ conn.inInt(msg)
+ }
+ switch msg.Type {
+ case TypeError:
+ conn.serialGen.RetireSerial(conn.calls.handleDBusError(msg))
+ case TypeMethodReply:
+ conn.serialGen.RetireSerial(conn.calls.handleReply(msg))
+ case TypeSignal:
+ conn.handleSignal(msg)
+ case TypeMethodCall:
+ go conn.handleCall(msg)
+ }
+
+ }
+}
+
+func (conn *Conn) handleSignal(msg *Message) {
+ iface := msg.Headers[FieldInterface].value.(string)
+ member := msg.Headers[FieldMember].value.(string)
+ // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
+ // sender is optional for signals.
+ sender, _ := msg.Headers[FieldSender].value.(string)
+ if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" {
+ if member == "NameLost" {
+ // If we lost the name on the bus, remove it from our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the lost name")
+ }
+ conn.names.loseName(name)
+ } else if member == "NameAcquired" {
+ // If we acquired the name on the bus, add it to our
+ // tracking list.
+ name, ok := msg.Body[0].(string)
+ if !ok {
+ panic("Unable to read the acquired name")
+ }
+ conn.names.acquireName(name)
+ }
+ }
+ signal := &Signal{
+ Sender: sender,
+ Path: msg.Headers[FieldPath].value.(ObjectPath),
+ Name: iface + "." + member,
+ Body: msg.Body,
+ }
+ conn.signalHandler.DeliverSignal(iface, member, signal)
+}
+
+// Names returns the list of all names that are currently owned by this
+// connection. The slice is always at least one element long, the first element
+// being the unique name of the connection.
+func (conn *Conn) Names() []string {
+ return conn.names.listKnownNames()
+}
+
+// Object returns the object identified by the given destination name and path.
+func (conn *Conn) Object(dest string, path ObjectPath) BusObject {
+ return &Object{conn, dest, path}
+}
+
+func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) {
+ if conn.outInt != nil {
+ conn.outInt(msg)
+ }
+ err := conn.outHandler.sendAndIfClosed(msg, ifClosed)
+ conn.calls.handleSendError(msg, err)
+ if err != nil {
+ conn.serialGen.RetireSerial(msg.serial)
+ } else if msg.Type != TypeMethodCall {
+ conn.serialGen.RetireSerial(msg.serial)
+ }
+}
+
+// Send sends the given message to the message bus. You usually don't need to
+// use this; use the higher-level equivalents (Call / Go, Emit and Export)
+// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
+// call is returned and the same value is sent to ch (which must be buffered)
+// once the call is complete. Otherwise, ch is ignored and a Call structure is
+// returned of which only the Err member is valid.
+func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
+ return conn.send(context.Background(), msg, ch)
+}
+
+// SendWithContext acts like Send but takes a context
+func (conn *Conn) SendWithContext(ctx context.Context, msg *Message, ch chan *Call) *Call {
+ return conn.send(ctx, msg, ch)
+}
+
+func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call {
+ if ctx == nil {
+ panic("nil context")
+ }
+
+ var call *Call
+ ctx, canceler := context.WithCancel(ctx)
+ msg.serial = conn.getSerial()
+ if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 5)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Conn).Send")
+ }
+ call = new(Call)
+ call.Destination, _ = msg.Headers[FieldDestination].value.(string)
+ call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
+ iface, _ := msg.Headers[FieldInterface].value.(string)
+ member, _ := msg.Headers[FieldMember].value.(string)
+ call.Method = iface + "." + member
+ call.Args = msg.Body
+ call.Done = ch
+ call.ctx = ctx
+ call.ctxCanceler = canceler
+ conn.calls.track(msg.serial, call)
+ go func() {
+ <-ctx.Done()
+ conn.calls.handleSendError(msg, ctx.Err())
+ }()
+ conn.sendMessageAndIfClosed(msg, func() {
+ conn.calls.handleSendError(msg, ErrClosed)
+ canceler()
+ })
+ } else {
+ canceler()
+ call = &Call{Err: nil}
+ conn.sendMessageAndIfClosed(msg, func() {
+ call = &Call{Err: ErrClosed}
+ })
+ }
+ return call
+}
+
+// sendError creates an error message corresponding to the parameters and sends
+// it to conn.out.
+func (conn *Conn) sendError(err error, dest string, serial uint32) {
+ var e *Error
+ switch em := err.(type) {
+ case Error:
+ e = &em
+ case *Error:
+ e = em
+ case DBusError:
+ name, body := em.DBusError()
+ e = NewError(name, body)
+ default:
+ e = MakeFailedError(err)
+ }
+ msg := new(Message)
+ msg.Type = TypeError
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldErrorName] = MakeVariant(e.Name)
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = e.Body
+ if len(e.Body) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
+ }
+ conn.sendMessageAndIfClosed(msg, nil)
+}
+
+// sendReply creates a method reply message corresponding to the parameters and
+// sends it to conn.out.
+func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
+ msg := new(Message)
+ msg.Type = TypeMethodReply
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.sendMessageAndIfClosed(msg, nil)
+}
+
+// AddMatchSignal registers the given match rule to receive broadcast
+// signals based on their contents.
+func (conn *Conn) AddMatchSignal(options ...MatchOption) error {
+ options = append([]MatchOption{withMatchType("signal")}, options...)
+ return conn.busObj.Call(
+ "org.freedesktop.DBus.AddMatch", 0,
+ formatMatchOptions(options),
+ ).Store()
+}
+
+// RemoveMatchSignal removes the first rule that matches previously registered with AddMatchSignal.
+func (conn *Conn) RemoveMatchSignal(options ...MatchOption) error {
+ options = append([]MatchOption{withMatchType("signal")}, options...)
+ return conn.busObj.Call(
+ "org.freedesktop.DBus.RemoveMatch", 0,
+ formatMatchOptions(options),
+ ).Store()
+}
+
+// Signal registers the given channel to be passed all received signal messages.
+//
+// Multiple of these channels can be registered at the same time.
+//
+// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
+// channel for eavesdropped messages, this channel receives all signals, and
+// none of the channels passed to Signal will receive any signals.
+//
+// Panics if the signal handler is not a `SignalRegistrar`.
+func (conn *Conn) Signal(ch chan<- *Signal) {
+ handler, ok := conn.signalHandler.(SignalRegistrar)
+ if !ok {
+ panic("cannot use this method with a non SignalRegistrar handler")
+ }
+ handler.AddSignal(ch)
+}
+
+// RemoveSignal removes the given channel from the list of the registered channels.
+//
+// Panics if the signal handler is not a `SignalRegistrar`.
+func (conn *Conn) RemoveSignal(ch chan<- *Signal) {
+ handler, ok := conn.signalHandler.(SignalRegistrar)
+ if !ok {
+ panic("cannot use this method with a non SignalRegistrar handler")
+ }
+ handler.RemoveSignal(ch)
+}
+
+// SupportsUnixFDs returns whether the underlying transport supports passing of
+// unix file descriptors. If this is false, method calls containing unix file
+// descriptors will return an error and emitted signals containing them will
+// not be sent.
+func (conn *Conn) SupportsUnixFDs() bool {
+ return conn.unixFD
+}
+
+// Error represents a D-Bus message of type Error.
+type Error struct {
+ Name string
+ Body []interface{}
+}
+
+func NewError(name string, body []interface{}) *Error {
+ return &Error{name, body}
+}
+
+func (e Error) Error() string {
+ if len(e.Body) >= 1 {
+ s, ok := e.Body[0].(string)
+ if ok {
+ return s
+ }
+ }
+ return e.Name
+}
+
+// Signal represents a D-Bus message of type Signal. The name member is given in
+// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
+type Signal struct {
+ Sender string
+ Path ObjectPath
+ Name string
+ Body []interface{}
+}
+
+// transport is a D-Bus transport.
+type transport interface {
+ // Read and Write raw data (for example, for the authentication protocol).
+ io.ReadWriteCloser
+
+ // Send the initial null byte used for the EXTERNAL mechanism.
+ SendNullByte() error
+
+ // Returns whether this transport supports passing Unix FDs.
+ SupportsUnixFDs() bool
+
+ // Signal the transport that Unix FD passing is enabled for this connection.
+ EnableUnixFDs()
+
+ // Read / send a message, handling things like Unix FDs.
+ ReadMessage() (*Message, error)
+ SendMessage(*Message) error
+}
+
+var (
+ transports = make(map[string]func(string) (transport, error))
+)
+
+func getTransport(address string) (transport, error) {
+ var err error
+ var t transport
+
+ addresses := strings.Split(address, ";")
+ for _, v := range addresses {
+ i := strings.IndexRune(v, ':')
+ if i == -1 {
+ err = errors.New("dbus: invalid bus address (no transport)")
+ continue
+ }
+ f := transports[v[:i]]
+ if f == nil {
+ err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
+ continue
+ }
+ t, err = f(v[i+1:])
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, err
+}
+
+// getKey gets a key from a the list of keys. Returns "" on error / not found...
+func getKey(s, key string) string {
+ for _, keyEqualsValue := range strings.Split(s, ",") {
+ keyValue := strings.SplitN(keyEqualsValue, "=", 2)
+ if len(keyValue) == 2 && keyValue[0] == key {
+ return keyValue[1]
+ }
+ }
+ return ""
+}
+
+type outputHandler struct {
+ conn *Conn
+ sendLck sync.Mutex
+ closed struct {
+ isClosed bool
+ lck sync.RWMutex
+ }
+}
+
+func (h *outputHandler) sendAndIfClosed(msg *Message, ifClosed func()) error {
+ h.closed.lck.RLock()
+ defer h.closed.lck.RUnlock()
+ if h.closed.isClosed {
+ if ifClosed != nil {
+ ifClosed()
+ }
+ return nil
+ }
+ h.sendLck.Lock()
+ defer h.sendLck.Unlock()
+ return h.conn.SendMessage(msg)
+}
+
+func (h *outputHandler) close() {
+ h.closed.lck.Lock()
+ defer h.closed.lck.Unlock()
+ h.closed.isClosed = true
+}
+
+type serialGenerator struct {
+ lck sync.Mutex
+ nextSerial uint32
+ serialUsed map[uint32]bool
+}
+
+func newSerialGenerator() *serialGenerator {
+ return &serialGenerator{
+ serialUsed: map[uint32]bool{0: true},
+ nextSerial: 1,
+ }
+}
+
+func (gen *serialGenerator) GetSerial() uint32 {
+ gen.lck.Lock()
+ defer gen.lck.Unlock()
+ n := gen.nextSerial
+ for gen.serialUsed[n] {
+ n++
+ }
+ gen.serialUsed[n] = true
+ gen.nextSerial = n + 1
+ return n
+}
+
+func (gen *serialGenerator) RetireSerial(serial uint32) {
+ gen.lck.Lock()
+ defer gen.lck.Unlock()
+ delete(gen.serialUsed, serial)
+}
+
+type nameTracker struct {
+ lck sync.RWMutex
+ unique string
+ names map[string]struct{}
+}
+
+func newNameTracker() *nameTracker {
+ return &nameTracker{names: map[string]struct{}{}}
+}
+func (tracker *nameTracker) acquireUniqueConnectionName(name string) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ tracker.unique = name
+}
+func (tracker *nameTracker) acquireName(name string) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ tracker.names[name] = struct{}{}
+}
+func (tracker *nameTracker) loseName(name string) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ delete(tracker.names, name)
+}
+
+func (tracker *nameTracker) uniqueNameIsKnown() bool {
+ tracker.lck.RLock()
+ defer tracker.lck.RUnlock()
+ return tracker.unique != ""
+}
+func (tracker *nameTracker) isKnownName(name string) bool {
+ tracker.lck.RLock()
+ defer tracker.lck.RUnlock()
+ _, ok := tracker.names[name]
+ return ok || name == tracker.unique
+}
+func (tracker *nameTracker) listKnownNames() []string {
+ tracker.lck.RLock()
+ defer tracker.lck.RUnlock()
+ out := make([]string, 0, len(tracker.names)+1)
+ out = append(out, tracker.unique)
+ for k := range tracker.names {
+ out = append(out, k)
+ }
+ return out
+}
+
+type callTracker struct {
+ calls map[uint32]*Call
+ lck sync.RWMutex
+}
+
+func newCallTracker() *callTracker {
+ return &callTracker{calls: map[uint32]*Call{}}
+}
+
+func (tracker *callTracker) track(sn uint32, call *Call) {
+ tracker.lck.Lock()
+ tracker.calls[sn] = call
+ tracker.lck.Unlock()
+}
+
+func (tracker *callTracker) handleReply(msg *Message) uint32 {
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ tracker.lck.RLock()
+ _, ok := tracker.calls[serial]
+ tracker.lck.RUnlock()
+ if ok {
+ tracker.finalizeWithBody(serial, msg.Body)
+ }
+ return serial
+}
+
+func (tracker *callTracker) handleDBusError(msg *Message) uint32 {
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ tracker.lck.RLock()
+ _, ok := tracker.calls[serial]
+ tracker.lck.RUnlock()
+ if ok {
+ name, _ := msg.Headers[FieldErrorName].value.(string)
+ tracker.finalizeWithError(serial, Error{name, msg.Body})
+ }
+ return serial
+}
+
+func (tracker *callTracker) handleSendError(msg *Message, err error) {
+ if err == nil {
+ return
+ }
+ tracker.lck.RLock()
+ _, ok := tracker.calls[msg.serial]
+ tracker.lck.RUnlock()
+ if ok {
+ tracker.finalizeWithError(msg.serial, err)
+ }
+}
+
+// finalize was the only func that did not strobe Done
+func (tracker *callTracker) finalize(sn uint32) {
+ tracker.lck.Lock()
+ defer tracker.lck.Unlock()
+ c, ok := tracker.calls[sn]
+ if ok {
+ delete(tracker.calls, sn)
+ c.ContextCancel()
+ }
+}
+
+func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) {
+ tracker.lck.Lock()
+ c, ok := tracker.calls[sn]
+ if ok {
+ delete(tracker.calls, sn)
+ }
+ tracker.lck.Unlock()
+ if ok {
+ c.Body = body
+ c.done()
+ }
+}
+
+func (tracker *callTracker) finalizeWithError(sn uint32, err error) {
+ tracker.lck.Lock()
+ c, ok := tracker.calls[sn]
+ if ok {
+ delete(tracker.calls, sn)
+ }
+ tracker.lck.Unlock()
+ if ok {
+ c.Err = err
+ c.done()
+ }
+}
+
+func (tracker *callTracker) finalizeAllWithError(err error) {
+ tracker.lck.Lock()
+ closedCalls := make([]*Call, 0, len(tracker.calls))
+ for sn := range tracker.calls {
+ closedCalls = append(closedCalls, tracker.calls[sn])
+ }
+ tracker.calls = map[uint32]*Call{}
+ tracker.lck.Unlock()
+ for _, call := range closedCalls {
+ call.Err = err
+ call.done()
+ }
+}
diff --git a/vendor/github.com/godbus/dbus/v5/conn_darwin.go b/vendor/github.com/godbus/dbus/v5/conn_darwin.go
new file mode 100644
index 00000000..6e2e4020
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/conn_darwin.go
@@ -0,0 +1,37 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+)
+
+const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_socket"
+
+func getSessionBusPlatformAddress() (string, error) {
+ cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return "", err
+ }
+
+ if len(b) == 0 {
+ return "", errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return "unix:path=" + string(b[:len(b)-1]), nil
+}
+
+func getSystemBusPlatformAddress() string {
+ address := os.Getenv("DBUS_LAUNCHD_SESSION_BUS_SOCKET")
+ if address != "" {
+ return fmt.Sprintf("unix:path=%s", address)
+ }
+ return defaultSystemBusAddress
+}
+
+func tryDiscoverDbusSessionBusAddress() string {
+ return ""
+}
diff --git a/vendor/github.com/godbus/dbus/v5/conn_other.go b/vendor/github.com/godbus/dbus/v5/conn_other.go
new file mode 100644
index 00000000..616dcf66
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/conn_other.go
@@ -0,0 +1,93 @@
+// +build !darwin
+
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "os/user"
+ "path"
+ "strings"
+)
+
+var execCommand = exec.Command
+
+func getSessionBusPlatformAddress() (string, error) {
+ cmd := execCommand("dbus-launch")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return "", err
+ }
+
+ i := bytes.IndexByte(b, '=')
+ j := bytes.IndexByte(b, '\n')
+
+ if i == -1 || j == -1 || i > j {
+ return "", errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ env, addr := string(b[0:i]), string(b[i+1:j])
+ os.Setenv(env, addr)
+
+ return addr, nil
+}
+
+// tryDiscoverDbusSessionBusAddress tries to discover an existing dbus session
+// and return the value of its DBUS_SESSION_BUS_ADDRESS.
+// It tries different techniques employed by different operating systems,
+// returning the first valid address it finds, or an empty string.
+//
+// * /run/user//bus if this exists, it *is* the bus socket. present on
+// Ubuntu 18.04
+// * /run/user//dbus-session: if this exists, it can be parsed for the bus
+// address. present on Ubuntu 16.04
+//
+// See https://dbus.freedesktop.org/doc/dbus-launch.1.html
+func tryDiscoverDbusSessionBusAddress() string {
+ if runtimeDirectory, err := getRuntimeDirectory(); err == nil {
+
+ if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) {
+ // if /run/user//bus exists, that file itself
+ // *is* the unix socket, so return its path
+ return fmt.Sprintf("unix:path=%s", runUserBusFile)
+ }
+ if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) {
+ // if /run/user//dbus-session exists, it's a
+ // text file // containing the address of the socket, e.g.:
+ // DBUS_SESSION_BUS_ADDRESS=unix:abstract=/tmp/dbus-E1c73yNqrG
+
+ if f, err := ioutil.ReadFile(runUserSessionDbusFile); err == nil {
+ fileContent := string(f)
+
+ prefix := "DBUS_SESSION_BUS_ADDRESS="
+
+ if strings.HasPrefix(fileContent, prefix) {
+ address := strings.TrimRight(strings.TrimPrefix(fileContent, prefix), "\n\r")
+ return address
+ }
+ }
+ }
+ }
+ return ""
+}
+
+func getRuntimeDirectory() (string, error) {
+ if currentUser, err := user.Current(); err != nil {
+ return "", err
+ } else {
+ return fmt.Sprintf("/run/user/%s", currentUser.Uid), nil
+ }
+}
+
+func fileExists(filename string) bool {
+ if _, err := os.Stat(filename); !os.IsNotExist(err) {
+ return true
+ } else {
+ return false
+ }
+}
diff --git a/vendor/github.com/godbus/dbus/v5/conn_unix.go b/vendor/github.com/godbus/dbus/v5/conn_unix.go
new file mode 100644
index 00000000..58aee7d2
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/conn_unix.go
@@ -0,0 +1,17 @@
+//+build !windows,!solaris,!darwin
+
+package dbus
+
+import (
+ "os"
+)
+
+const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
+
+func getSystemBusPlatformAddress() string {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return address
+ }
+ return defaultSystemBusAddress
+}
diff --git a/vendor/github.com/godbus/dbus/v5/conn_windows.go b/vendor/github.com/godbus/dbus/v5/conn_windows.go
new file mode 100644
index 00000000..4291e451
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/conn_windows.go
@@ -0,0 +1,15 @@
+//+build windows
+
+package dbus
+
+import "os"
+
+const defaultSystemBusAddress = "tcp:host=127.0.0.1,port=12434"
+
+func getSystemBusPlatformAddress() string {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return address
+ }
+ return defaultSystemBusAddress
+}
diff --git a/vendor/github.com/godbus/dbus/v5/dbus.go b/vendor/github.com/godbus/dbus/v5/dbus.go
new file mode 100644
index 00000000..428923d2
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/dbus.go
@@ -0,0 +1,428 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ byteType = reflect.TypeOf(byte(0))
+ boolType = reflect.TypeOf(false)
+ uint8Type = reflect.TypeOf(uint8(0))
+ int16Type = reflect.TypeOf(int16(0))
+ uint16Type = reflect.TypeOf(uint16(0))
+ intType = reflect.TypeOf(int(0))
+ uintType = reflect.TypeOf(uint(0))
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ signatureType = reflect.TypeOf(Signature{""})
+ objectPathType = reflect.TypeOf(ObjectPath(""))
+ variantType = reflect.TypeOf(Variant{Signature{""}, nil})
+ interfacesType = reflect.TypeOf([]interface{}{})
+ interfaceType = reflect.TypeOf((*interface{})(nil)).Elem()
+ unixFDType = reflect.TypeOf(UnixFD(0))
+ unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
+)
+
+// An InvalidTypeError signals that a value which cannot be represented in the
+// D-Bus wire format was passed to a function.
+type InvalidTypeError struct {
+ Type reflect.Type
+}
+
+func (e InvalidTypeError) Error() string {
+ return "dbus: invalid type " + e.Type.String()
+}
+
+// Store copies the values contained in src to dest, which must be a slice of
+// pointers. It converts slices of interfaces from src to corresponding structs
+// in dest. An error is returned if the lengths of src and dest or the types of
+// their elements don't match.
+func Store(src []interface{}, dest ...interface{}) error {
+ if len(src) != len(dest) {
+ return errors.New("dbus.Store: length mismatch")
+ }
+
+ for i := range src {
+ if err := storeInterfaces(src[i], dest[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func storeInterfaces(src, dest interface{}) error {
+ return store(reflect.ValueOf(dest), reflect.ValueOf(src))
+}
+
+func store(dest, src reflect.Value) error {
+ if dest.Kind() == reflect.Ptr {
+ return store(dest.Elem(), src)
+ }
+ switch src.Kind() {
+ case reflect.Slice:
+ return storeSlice(dest, src)
+ case reflect.Map:
+ return storeMap(dest, src)
+ default:
+ return storeBase(dest, src)
+ }
+}
+
+func storeBase(dest, src reflect.Value) error {
+ return setDest(dest, src)
+}
+
+func setDest(dest, src reflect.Value) error {
+ if !isVariant(src.Type()) && isVariant(dest.Type()) {
+ //special conversion for dbus.Variant
+ dest.Set(reflect.ValueOf(MakeVariant(src.Interface())))
+ return nil
+ }
+ if isVariant(src.Type()) && !isVariant(dest.Type()) {
+ src = getVariantValue(src)
+ return store(dest, src)
+ }
+ if !src.Type().ConvertibleTo(dest.Type()) {
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: cannot convert %s to %s",
+ src.Type(), dest.Type())
+ }
+ dest.Set(src.Convert(dest.Type()))
+ return nil
+}
+
+func kindsAreCompatible(dest, src reflect.Type) bool {
+ switch {
+ case isVariant(dest):
+ return true
+ case dest.Kind() == reflect.Interface:
+ return true
+ default:
+ return dest.Kind() == src.Kind()
+ }
+}
+
+func isConvertibleTo(dest, src reflect.Type) bool {
+ switch {
+ case isVariant(dest):
+ return true
+ case dest.Kind() == reflect.Interface:
+ return true
+ case dest.Kind() == reflect.Slice:
+ return src.Kind() == reflect.Slice &&
+ isConvertibleTo(dest.Elem(), src.Elem())
+ case dest.Kind() == reflect.Struct:
+ return src == interfacesType
+ default:
+ return src.ConvertibleTo(dest)
+ }
+}
+
+func storeMap(dest, src reflect.Value) error {
+ switch {
+ case !kindsAreCompatible(dest.Type(), src.Type()):
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "map: cannot store a value of %s into %s",
+ src.Type(), dest.Type())
+ case isVariant(dest.Type()):
+ return storeMapIntoVariant(dest, src)
+ case dest.Kind() == reflect.Interface:
+ return storeMapIntoInterface(dest, src)
+ case isConvertibleTo(dest.Type().Key(), src.Type().Key()) &&
+ isConvertibleTo(dest.Type().Elem(), src.Type().Elem()):
+ return storeMapIntoMap(dest, src)
+ default:
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "map: cannot convert a value of %s into %s",
+ src.Type(), dest.Type())
+ }
+}
+
+func storeMapIntoVariant(dest, src reflect.Value) error {
+ dv := reflect.MakeMap(src.Type())
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeMapIntoInterface(dest, src reflect.Value) error {
+ var dv reflect.Value
+ if isVariant(src.Type().Elem()) {
+ //Convert variants to interface{} recursively when converting
+ //to interface{}
+ dv = reflect.MakeMap(
+ reflect.MapOf(src.Type().Key(), interfaceType))
+ } else {
+ dv = reflect.MakeMap(src.Type())
+ }
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeMapIntoMap(dest, src reflect.Value) error {
+ if dest.IsNil() {
+ dest.Set(reflect.MakeMap(dest.Type()))
+ }
+ keys := src.MapKeys()
+ for _, key := range keys {
+ dkey := key.Convert(dest.Type().Key())
+ dval := reflect.New(dest.Type().Elem()).Elem()
+ err := store(dval, getVariantValue(src.MapIndex(key)))
+ if err != nil {
+ return err
+ }
+ dest.SetMapIndex(dkey, dval)
+ }
+ return nil
+}
+
+func storeSlice(dest, src reflect.Value) error {
+ switch {
+ case src.Type() == interfacesType && dest.Kind() == reflect.Struct:
+ //The decoder always decodes structs as slices of interface{}
+ return storeStruct(dest, src)
+ case !kindsAreCompatible(dest.Type(), src.Type()):
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "slice: cannot store a value of %s into %s",
+ src.Type(), dest.Type())
+ case isVariant(dest.Type()):
+ return storeSliceIntoVariant(dest, src)
+ case dest.Kind() == reflect.Interface:
+ return storeSliceIntoInterface(dest, src)
+ case isConvertibleTo(dest.Type().Elem(), src.Type().Elem()):
+ return storeSliceIntoSlice(dest, src)
+ default:
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "slice: cannot convert a value of %s into %s",
+ src.Type(), dest.Type())
+ }
+}
+
+func storeStruct(dest, src reflect.Value) error {
+ if isVariant(dest.Type()) {
+ return storeBase(dest, src)
+ }
+ dval := make([]interface{}, 0, dest.NumField())
+ dtype := dest.Type()
+ for i := 0; i < dest.NumField(); i++ {
+ field := dest.Field(i)
+ ftype := dtype.Field(i)
+ if ftype.PkgPath != "" {
+ continue
+ }
+ if ftype.Tag.Get("dbus") == "-" {
+ continue
+ }
+ dval = append(dval, field.Addr().Interface())
+ }
+ if src.Len() != len(dval) {
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "destination struct does not have "+
+ "enough fields need: %d have: %d",
+ src.Len(), len(dval))
+ }
+ return Store(src.Interface().([]interface{}), dval...)
+}
+
+func storeSliceIntoVariant(dest, src reflect.Value) error {
+ dv := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeSliceIntoInterface(dest, src reflect.Value) error {
+ var dv reflect.Value
+ if isVariant(src.Type().Elem()) {
+ //Convert variants to interface{} recursively when converting
+ //to interface{}
+ dv = reflect.MakeSlice(reflect.SliceOf(interfaceType),
+ src.Len(), src.Cap())
+ } else {
+ dv = reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ }
+ err := store(dv, src)
+ if err != nil {
+ return err
+ }
+ return storeBase(dest, dv)
+}
+
+func storeSliceIntoSlice(dest, src reflect.Value) error {
+ if dest.IsNil() || dest.Len() < src.Len() {
+ dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap()))
+ }
+ if dest.Len() != src.Len() {
+ return fmt.Errorf(
+ "dbus.Store: type mismatch: "+
+ "slices are different lengths "+
+ "need: %d have: %d",
+ src.Len(), dest.Len())
+ }
+ for i := 0; i < src.Len(); i++ {
+ err := store(dest.Index(i), getVariantValue(src.Index(i)))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func getVariantValue(in reflect.Value) reflect.Value {
+ if isVariant(in.Type()) {
+ return reflect.ValueOf(in.Interface().(Variant).Value())
+ }
+ return in
+}
+
+func isVariant(t reflect.Type) bool {
+ return t == variantType
+}
+
+// An ObjectPath is an object path as defined by the D-Bus spec.
+type ObjectPath string
+
+// IsValid returns whether the object path is valid.
+func (o ObjectPath) IsValid() bool {
+ s := string(o)
+ if len(s) == 0 {
+ return false
+ }
+ if s[0] != '/' {
+ return false
+ }
+ if s[len(s)-1] == '/' && len(s) != 1 {
+ return false
+ }
+ // probably not used, but technically possible
+ if s == "/" {
+ return true
+ }
+ split := strings.Split(s[1:], "/")
+ for _, v := range split {
+ if len(v) == 0 {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
+// documentation for more information about Unix file descriptor passsing.
+type UnixFD int32
+
+// A UnixFDIndex is the representation of a Unix file descriptor in a message.
+type UnixFDIndex uint32
+
+// alignment returns the alignment of values of type t.
+func alignment(t reflect.Type) int {
+ switch t {
+ case variantType:
+ return 1
+ case objectPathType:
+ return 4
+ case signatureType:
+ return 1
+ case interfacesType:
+ return 4
+ }
+ switch t.Kind() {
+ case reflect.Uint8:
+ return 1
+ case reflect.Uint16, reflect.Int16:
+ return 2
+ case reflect.Uint, reflect.Int, reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
+ return 4
+ case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
+ return 8
+ case reflect.Ptr:
+ return alignment(t.Elem())
+ }
+ return 1
+}
+
+// isKeyType returns whether t is a valid type for a D-Bus dict.
+func isKeyType(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
+ reflect.String, reflect.Uint, reflect.Int:
+
+ return true
+ }
+ return false
+}
+
+// isValidInterface returns whether s is a valid name for an interface.
+func isValidInterface(s string) bool {
+ if len(s) == 0 || len(s) > 255 || s[0] == '.' {
+ return false
+ }
+ elem := strings.Split(s, ".")
+ if len(elem) < 2 {
+ return false
+ }
+ for _, v := range elem {
+ if len(v) == 0 {
+ return false
+ }
+ if v[0] >= '0' && v[0] <= '9' {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// isValidMember returns whether s is a valid name for a member.
+func isValidMember(s string) bool {
+ if len(s) == 0 || len(s) > 255 {
+ return false
+ }
+ i := strings.Index(s, ".")
+ if i != -1 {
+ return false
+ }
+ if s[0] >= '0' && s[0] <= '9' {
+ return false
+ }
+ for _, c := range s {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isMemberChar(c rune) bool {
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') || c == '_'
+}
diff --git a/vendor/github.com/godbus/dbus/v5/decoder.go b/vendor/github.com/godbus/dbus/v5/decoder.go
new file mode 100644
index 00000000..ede91575
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/decoder.go
@@ -0,0 +1,286 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+type decoder struct {
+ in io.Reader
+ order binary.ByteOrder
+ pos int
+}
+
+// newDecoder returns a new decoder that reads values from in. The input is
+// expected to be in the given byte order.
+func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
+ dec := new(decoder)
+ dec.in = in
+ dec.order = order
+ return dec
+}
+
+// align aligns the input to the given boundary and panics on error.
+func (dec *decoder) align(n int) {
+ if dec.pos%n != 0 {
+ newpos := (dec.pos + n - 1) & ^(n - 1)
+ empty := make([]byte, newpos-dec.pos)
+ if _, err := io.ReadFull(dec.in, empty); err != nil {
+ panic(err)
+ }
+ dec.pos = newpos
+ }
+}
+
+// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
+func (dec *decoder) binread(v interface{}) {
+ if err := binary.Read(dec.in, dec.order, v); err != nil {
+ panic(err)
+ }
+}
+
+func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
+ defer func() {
+ var ok bool
+ v := recover()
+ if err, ok = v.(error); ok {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ err = FormatError("unexpected EOF")
+ }
+ }
+ }()
+ vs = make([]interface{}, 0)
+ s := sig.str
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ return nil, err
+ }
+ v := dec.decode(s[:len(s)-len(rem)], 0)
+ vs = append(vs, v)
+ s = rem
+ }
+ return vs, nil
+}
+
+func (dec *decoder) decode(s string, depth int) interface{} {
+ dec.align(alignment(typeFor(s)))
+ switch s[0] {
+ case 'y':
+ var b [1]byte
+ if _, err := dec.in.Read(b[:]); err != nil {
+ panic(err)
+ }
+ dec.pos++
+ return b[0]
+ case 'b':
+ i := dec.decode("u", depth).(uint32)
+ switch {
+ case i == 0:
+ return false
+ case i == 1:
+ return true
+ default:
+ panic(FormatError("invalid value for boolean"))
+ }
+ case 'n':
+ var i int16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'i':
+ var i int32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 'x':
+ var i int64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'q':
+ var i uint16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'u':
+ var i uint32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 't':
+ var i uint64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'd':
+ var f float64
+ dec.binread(&f)
+ dec.pos += 8
+ return f
+ case 's':
+ length := dec.decode("u", depth).(uint32)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ return string(b[:len(b)-1])
+ case 'o':
+ return ObjectPath(dec.decode("s", depth).(string))
+ case 'g':
+ length := dec.decode("y", depth).(byte)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ sig, err := ParseSignature(string(b[:len(b)-1]))
+ if err != nil {
+ panic(err)
+ }
+ return sig
+ case 'v':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ var variant Variant
+ sig := dec.decode("g", depth).(Signature)
+ if len(sig.str) == 0 {
+ panic(FormatError("variant signature is empty"))
+ }
+ err, rem := validSingle(sig.str, 0)
+ if err != nil {
+ panic(err)
+ }
+ if rem != "" {
+ panic(FormatError("variant signature has multiple types"))
+ }
+ variant.sig = sig
+ variant.value = dec.decode(sig.str, depth+1)
+ return variant
+ case 'h':
+ return UnixFDIndex(dec.decode("u", depth).(uint32))
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ ksig := s[2:3]
+ vsig := s[3 : len(s)-1]
+ v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ // Even for empty maps, the correct padding must be included
+ dec.align(8)
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ dec.align(8)
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ kv := dec.decode(ksig, depth+2)
+ vv := dec.decode(vsig, depth+2)
+ v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return v.Interface()
+ }
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ sig := s[1:]
+ length := dec.decode("u", depth).(uint32)
+ // capacity can be determined only for fixed-size element types
+ var capacity int
+ if s := sigByteSize(sig); s != 0 {
+ capacity = int(length) / s
+ }
+ v := reflect.MakeSlice(reflect.SliceOf(typeFor(sig)), 0, capacity)
+ // Even for empty arrays, the correct padding must be included
+ align := alignment(typeFor(s[1:]))
+ if len(s) > 1 && s[1] == '(' {
+ //Special case for arrays of structs
+ //structs decode as a slice of interface{} values
+ //but the dbus alignment does not match this
+ align = 8
+ }
+ dec.align(align)
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ ev := dec.decode(s[1:], depth+1)
+ v = reflect.Append(v, reflect.ValueOf(ev))
+ }
+ return v.Interface()
+ case '(':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ dec.align(8)
+ v := make([]interface{}, 0)
+ s = s[1 : len(s)-1]
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+ ev := dec.decode(s[:len(s)-len(rem)], depth+1)
+ v = append(v, ev)
+ s = rem
+ }
+ return v
+ default:
+ panic(SignatureError{Sig: s})
+ }
+}
+
+// sigByteSize tries to calculates size of the given signature in bytes.
+//
+// It returns zero when it can't, for example when it contains non-fixed size
+// types such as strings, maps and arrays that require reading of the transmitted
+// data, for that we would need to implement the unread method for Decoder first.
+func sigByteSize(sig string) int {
+ var total int
+ for offset := 0; offset < len(sig); {
+ switch sig[offset] {
+ case 'y':
+ total += 1
+ offset += 1
+ case 'n', 'q':
+ total += 2
+ offset += 1
+ case 'b', 'i', 'u', 'h':
+ total += 4
+ offset += 1
+ case 'x', 't', 'd':
+ total += 8
+ offset += 1
+ case '(':
+ i := 1
+ depth := 1
+ for i < len(sig[offset:]) && depth != 0 {
+ if sig[offset+i] == '(' {
+ depth++
+ } else if sig[offset+i] == ')' {
+ depth--
+ }
+ i++
+ }
+ s := sigByteSize(sig[offset+1 : offset+i-1])
+ if s == 0 {
+ return 0
+ }
+ total += s
+ offset += i
+ default:
+ return 0
+ }
+ }
+ return total
+}
+
+// A FormatError is an error in the wire format.
+type FormatError string
+
+func (e FormatError) Error() string {
+ return "dbus: wire format error: " + string(e)
+}
diff --git a/vendor/github.com/godbus/dbus/v5/default_handler.go b/vendor/github.com/godbus/dbus/v5/default_handler.go
new file mode 100644
index 00000000..6d8bf32f
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/default_handler.go
@@ -0,0 +1,328 @@
+package dbus
+
+import (
+ "bytes"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+func newIntrospectIntf(h *defaultHandler) *exportedIntf {
+ methods := make(map[string]Method)
+ methods["Introspect"] = exportedMethod{
+ reflect.ValueOf(func(msg Message) (string, *Error) {
+ path := msg.Headers[FieldPath].value.(ObjectPath)
+ return h.introspectPath(path), nil
+ }),
+ }
+ return newExportedIntf(methods, true)
+}
+
+//NewDefaultHandler returns an instance of the default
+//call handler. This is useful if you want to implement only
+//one of the two handlers but not both.
+//
+// Deprecated: this is the default value, don't use it, it will be unexported.
+func NewDefaultHandler() *defaultHandler {
+ h := &defaultHandler{
+ objects: make(map[ObjectPath]*exportedObj),
+ defaultIntf: make(map[string]*exportedIntf),
+ }
+ h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h)
+ return h
+}
+
+type defaultHandler struct {
+ sync.RWMutex
+ objects map[ObjectPath]*exportedObj
+ defaultIntf map[string]*exportedIntf
+}
+
+func (h *defaultHandler) PathExists(path ObjectPath) bool {
+ _, ok := h.objects[path]
+ return ok
+}
+
+func (h *defaultHandler) introspectPath(path ObjectPath) string {
+ subpath := make(map[string]struct{})
+ var xml bytes.Buffer
+ xml.WriteString("")
+ for obj := range h.objects {
+ p := string(path)
+ if p != "/" {
+ p += "/"
+ }
+ if strings.HasPrefix(string(obj), p) {
+ node_name := strings.Split(string(obj[len(p):]), "/")[0]
+ subpath[node_name] = struct{}{}
+ }
+ }
+ for s := range subpath {
+ xml.WriteString("\n\t ")
+ }
+ xml.WriteString("\n ")
+ return xml.String()
+}
+
+func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) {
+ h.RLock()
+ defer h.RUnlock()
+ object, ok := h.objects[path]
+ if ok {
+ return object, ok
+ }
+
+ // If an object wasn't found for this exact path,
+ // look for a matching subtree registration
+ subtreeObject := newExportedObject()
+ path = path[:strings.LastIndex(string(path), "/")]
+ for len(path) > 0 {
+ object, ok = h.objects[path]
+ if ok {
+ for name, iface := range object.interfaces {
+ // Only include this handler if it registered for the subtree
+ if iface.isFallbackInterface() {
+ subtreeObject.interfaces[name] = iface
+ }
+ }
+ break
+ }
+
+ path = path[:strings.LastIndex(string(path), "/")]
+ }
+
+ for name, intf := range h.defaultIntf {
+ if _, exists := subtreeObject.interfaces[name]; exists {
+ continue
+ }
+ subtreeObject.interfaces[name] = intf
+ }
+
+ return subtreeObject, true
+}
+
+func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) {
+ h.Lock()
+ h.objects[path] = object
+ h.Unlock()
+}
+
+func (h *defaultHandler) DeleteObject(path ObjectPath) {
+ h.Lock()
+ delete(h.objects, path)
+ h.Unlock()
+}
+
+type exportedMethod struct {
+ reflect.Value
+}
+
+func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) {
+ t := m.Type()
+
+ params := make([]reflect.Value, len(args))
+ for i := 0; i < len(args); i++ {
+ params[i] = reflect.ValueOf(args[i]).Elem()
+ }
+
+ ret := m.Value.Call(params)
+
+ err := ret[t.NumOut()-1].Interface().(*Error)
+ ret = ret[:t.NumOut()-1]
+ out := make([]interface{}, len(ret))
+ for i, val := range ret {
+ out[i] = val.Interface()
+ }
+ if err == nil {
+ //concrete type to interface nil is a special case
+ return out, nil
+ }
+ return out, err
+}
+
+func (m exportedMethod) NumArguments() int {
+ return m.Value.Type().NumIn()
+}
+
+func (m exportedMethod) ArgumentValue(i int) interface{} {
+ return reflect.Zero(m.Type().In(i)).Interface()
+}
+
+func (m exportedMethod) NumReturns() int {
+ return m.Value.Type().NumOut()
+}
+
+func (m exportedMethod) ReturnValue(i int) interface{} {
+ return reflect.Zero(m.Type().Out(i)).Interface()
+}
+
+func newExportedObject() *exportedObj {
+ return &exportedObj{
+ interfaces: make(map[string]*exportedIntf),
+ }
+}
+
+type exportedObj struct {
+ mu sync.RWMutex
+ interfaces map[string]*exportedIntf
+}
+
+func (obj *exportedObj) LookupInterface(name string) (Interface, bool) {
+ if name == "" {
+ return obj, true
+ }
+ obj.mu.RLock()
+ defer obj.mu.RUnlock()
+ intf, exists := obj.interfaces[name]
+ return intf, exists
+}
+
+func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) {
+ obj.mu.Lock()
+ defer obj.mu.Unlock()
+ obj.interfaces[name] = iface
+}
+
+func (obj *exportedObj) DeleteInterface(name string) {
+ obj.mu.Lock()
+ defer obj.mu.Unlock()
+ delete(obj.interfaces, name)
+}
+
+func (obj *exportedObj) LookupMethod(name string) (Method, bool) {
+ obj.mu.RLock()
+ defer obj.mu.RUnlock()
+ for _, intf := range obj.interfaces {
+ method, exists := intf.LookupMethod(name)
+ if exists {
+ return method, exists
+ }
+ }
+ return nil, false
+}
+
+func (obj *exportedObj) isFallbackInterface() bool {
+ return false
+}
+
+func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf {
+ return &exportedIntf{
+ methods: methods,
+ includeSubtree: includeSubtree,
+ }
+}
+
+type exportedIntf struct {
+ methods map[string]Method
+
+ // Whether or not this export is for the entire subtree
+ includeSubtree bool
+}
+
+func (obj *exportedIntf) LookupMethod(name string) (Method, bool) {
+ out, exists := obj.methods[name]
+ return out, exists
+}
+
+func (obj *exportedIntf) isFallbackInterface() bool {
+ return obj.includeSubtree
+}
+
+//NewDefaultSignalHandler returns an instance of the default
+//signal handler. This is useful if you want to implement only
+//one of the two handlers but not both.
+//
+// Deprecated: this is the default value, don't use it, it will be unexported.
+func NewDefaultSignalHandler() *defaultSignalHandler {
+ return &defaultSignalHandler{}
+}
+
+type defaultSignalHandler struct {
+ mu sync.RWMutex
+ closed bool
+ signals []*signalChannelData
+}
+
+func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) {
+ sh.mu.RLock()
+ defer sh.mu.RUnlock()
+ if sh.closed {
+ return
+ }
+ for _, scd := range sh.signals {
+ scd.deliver(signal)
+ }
+}
+
+func (sh *defaultSignalHandler) Terminate() {
+ sh.mu.Lock()
+ defer sh.mu.Unlock()
+ if sh.closed {
+ return
+ }
+
+ for _, scd := range sh.signals {
+ scd.close()
+ close(scd.ch)
+ }
+ sh.closed = true
+ sh.signals = nil
+}
+
+func (sh *defaultSignalHandler) AddSignal(ch chan<- *Signal) {
+ sh.mu.Lock()
+ defer sh.mu.Unlock()
+ if sh.closed {
+ return
+ }
+ sh.signals = append(sh.signals, &signalChannelData{
+ ch: ch,
+ done: make(chan struct{}),
+ })
+}
+
+func (sh *defaultSignalHandler) RemoveSignal(ch chan<- *Signal) {
+ sh.mu.Lock()
+ defer sh.mu.Unlock()
+ if sh.closed {
+ return
+ }
+ for i := len(sh.signals) - 1; i >= 0; i-- {
+ if ch == sh.signals[i].ch {
+ sh.signals[i].close()
+ copy(sh.signals[i:], sh.signals[i+1:])
+ sh.signals[len(sh.signals)-1] = nil
+ sh.signals = sh.signals[:len(sh.signals)-1]
+ }
+ }
+}
+
+type signalChannelData struct {
+ wg sync.WaitGroup
+ ch chan<- *Signal
+ done chan struct{}
+}
+
+func (scd *signalChannelData) deliver(signal *Signal) {
+ select {
+ case scd.ch <- signal:
+ case <-scd.done:
+ return
+ default:
+ scd.wg.Add(1)
+ go scd.deferredDeliver(signal)
+ }
+}
+
+func (scd *signalChannelData) deferredDeliver(signal *Signal) {
+ select {
+ case scd.ch <- signal:
+ case <-scd.done:
+ }
+ scd.wg.Done()
+}
+
+func (scd *signalChannelData) close() {
+ close(scd.done)
+ scd.wg.Wait() // wait until all spawned goroutines return
+}
diff --git a/vendor/github.com/godbus/dbus/v5/doc.go b/vendor/github.com/godbus/dbus/v5/doc.go
new file mode 100644
index 00000000..ade1df95
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/doc.go
@@ -0,0 +1,69 @@
+/*
+Package dbus implements bindings to the D-Bus message bus system.
+
+To use the message bus API, you first need to connect to a bus (usually the
+session or system bus). The acquired connection then can be used to call methods
+on remote objects and emit or receive signals. Using the Export method, you can
+arrange D-Bus methods calls to be directly translated to method calls on a Go
+value.
+
+Conversion Rules
+
+For outgoing messages, Go types are automatically converted to the
+corresponding D-Bus types. The following types are directly encoded as their
+respective D-Bus equivalents:
+
+ Go type | D-Bus type
+ ------------+-----------
+ byte | BYTE
+ bool | BOOLEAN
+ int16 | INT16
+ uint16 | UINT16
+ int | INT32
+ uint | UINT32
+ int32 | INT32
+ uint32 | UINT32
+ int64 | INT64
+ uint64 | UINT64
+ float64 | DOUBLE
+ string | STRING
+ ObjectPath | OBJECT_PATH
+ Signature | SIGNATURE
+ Variant | VARIANT
+ interface{} | VARIANT
+ UnixFDIndex | UNIX_FD
+
+Slices and arrays encode as ARRAYs of their element type.
+
+Maps encode as DICTs, provided that their key type can be used as a key for
+a DICT.
+
+Structs other than Variant and Signature encode as a STRUCT containing their
+exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
+be skipped.
+
+Pointers encode as the value they're pointed to.
+
+Types convertible to one of the base types above will be mapped as the
+base type.
+
+Trying to encode any other type or a slice, map or struct containing an
+unsupported type will result in an InvalidTypeError.
+
+For incoming messages, the inverse of these rules are used, with the exception
+of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
+containing the struct fields in the correct order. The Store function can be
+used to convert such values to Go structs.
+
+Unix FD passing
+
+Handling Unix file descriptors deserves special mention. To use them, you should
+first check that they are supported on a connection by calling SupportsUnixFDs.
+If it returns true, all method of Connection will translate messages containing
+UnixFD's to messages that are accompanied by the given file descriptors with the
+UnixFD values being substituted by the correct indices. Similarly, the indices
+of incoming messages are automatically resolved. It shouldn't be necessary to use
+UnixFDIndex.
+
+*/
+package dbus
diff --git a/vendor/github.com/godbus/dbus/v5/encoder.go b/vendor/github.com/godbus/dbus/v5/encoder.go
new file mode 100644
index 00000000..adfbb75c
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/encoder.go
@@ -0,0 +1,210 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+// An encoder encodes values to the D-Bus wire format.
+type encoder struct {
+ out io.Writer
+ order binary.ByteOrder
+ pos int
+}
+
+// NewEncoder returns a new encoder that writes to out in the given byte order.
+func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
+ return newEncoderAtOffset(out, 0, order)
+}
+
+// newEncoderAtOffset returns a new encoder that writes to out in the given
+// byte order. Specify the offset to initialize pos for proper alignment
+// computation.
+func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder {
+ enc := new(encoder)
+ enc.out = out
+ enc.order = order
+ enc.pos = offset
+ return enc
+}
+
+// Aligns the next output to be on a multiple of n. Panics on write errors.
+func (enc *encoder) align(n int) {
+ pad := enc.padding(0, n)
+ if pad > 0 {
+ empty := make([]byte, pad)
+ if _, err := enc.out.Write(empty); err != nil {
+ panic(err)
+ }
+ enc.pos += pad
+ }
+}
+
+// pad returns the number of bytes of padding, based on current position and additional offset.
+// and alignment.
+func (enc *encoder) padding(offset, algn int) int {
+ abs := enc.pos + offset
+ if abs%algn != 0 {
+ newabs := (abs + algn - 1) & ^(algn - 1)
+ return newabs - abs
+ }
+ return 0
+}
+
+// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
+func (enc *encoder) binwrite(v interface{}) {
+ if err := binary.Write(enc.out, enc.order, v); err != nil {
+ panic(err)
+ }
+}
+
+// Encode encodes the given values to the underlying reader. All written values
+// are aligned properly as required by the D-Bus spec.
+func (enc *encoder) Encode(vs ...interface{}) (err error) {
+ defer func() {
+ err, _ = recover().(error)
+ }()
+ for _, v := range vs {
+ enc.encode(reflect.ValueOf(v), 0)
+ }
+ return nil
+}
+
+// encode encodes the given value to the writer and panics on error. depth holds
+// the depth of the container nesting.
+func (enc *encoder) encode(v reflect.Value, depth int) {
+ enc.align(alignment(v.Type()))
+ switch v.Kind() {
+ case reflect.Uint8:
+ var b [1]byte
+ b[0] = byte(v.Uint())
+ if _, err := enc.out.Write(b[:]); err != nil {
+ panic(err)
+ }
+ enc.pos++
+ case reflect.Bool:
+ if v.Bool() {
+ enc.encode(reflect.ValueOf(uint32(1)), depth)
+ } else {
+ enc.encode(reflect.ValueOf(uint32(0)), depth)
+ }
+ case reflect.Int16:
+ enc.binwrite(int16(v.Int()))
+ enc.pos += 2
+ case reflect.Uint16:
+ enc.binwrite(uint16(v.Uint()))
+ enc.pos += 2
+ case reflect.Int, reflect.Int32:
+ enc.binwrite(int32(v.Int()))
+ enc.pos += 4
+ case reflect.Uint, reflect.Uint32:
+ enc.binwrite(uint32(v.Uint()))
+ enc.pos += 4
+ case reflect.Int64:
+ enc.binwrite(v.Int())
+ enc.pos += 8
+ case reflect.Uint64:
+ enc.binwrite(v.Uint())
+ enc.pos += 8
+ case reflect.Float64:
+ enc.binwrite(v.Float())
+ enc.pos += 8
+ case reflect.String:
+ enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
+ b := make([]byte, v.Len()+1)
+ copy(b, v.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case reflect.Ptr:
+ enc.encode(v.Elem(), depth)
+ case reflect.Slice, reflect.Array:
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus alignment for elements.
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem()))
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+
+ for i := 0; i < v.Len(); i++ {
+ bufenc.encode(v.Index(i), depth+1)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(alignment(v.Type().Elem()))
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ case reflect.Struct:
+ if depth >= 64 && v.Type() != signatureType {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ switch t := v.Type(); t {
+ case signatureType:
+ str := v.Field(0)
+ enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
+ b := make([]byte, str.Len()+1)
+ copy(b, str.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case variantType:
+ variant := v.Interface().(Variant)
+ enc.encode(reflect.ValueOf(variant.sig), depth+1)
+ enc.encode(reflect.ValueOf(variant.value), depth+1)
+ default:
+ for i := 0; i < v.Type().NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ enc.encode(v.Field(i), depth+1)
+ }
+ }
+ }
+ case reflect.Map:
+ // Maps are arrays of structures, so they actually increase the depth by
+ // 2.
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ keys := v.MapKeys()
+ // Lookahead offset: 4 bytes for uint32 length (with alignment),
+ // plus 8-byte alignment
+ n := enc.padding(0, 4) + 4
+ offset := enc.pos + n + enc.padding(n, 8)
+
+ var buf bytes.Buffer
+ bufenc := newEncoderAtOffset(&buf, offset, enc.order)
+ for _, k := range keys {
+ bufenc.align(8)
+ bufenc.encode(k, depth+2)
+ bufenc.encode(v.MapIndex(k), depth+2)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(8)
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ case reflect.Interface:
+ enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth)
+ default:
+ panic(InvalidTypeError{v.Type()})
+ }
+}
diff --git a/vendor/github.com/godbus/dbus/v5/export.go b/vendor/github.com/godbus/dbus/v5/export.go
new file mode 100644
index 00000000..c277ab14
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/export.go
@@ -0,0 +1,412 @@
+package dbus
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ ErrMsgInvalidArg = Error{
+ "org.freedesktop.DBus.Error.InvalidArgs",
+ []interface{}{"Invalid type / number of args"},
+ }
+ ErrMsgNoObject = Error{
+ "org.freedesktop.DBus.Error.NoSuchObject",
+ []interface{}{"No such object"},
+ }
+ ErrMsgUnknownMethod = Error{
+ "org.freedesktop.DBus.Error.UnknownMethod",
+ []interface{}{"Unknown / invalid method"},
+ }
+ ErrMsgUnknownInterface = Error{
+ "org.freedesktop.DBus.Error.UnknownInterface",
+ []interface{}{"Object does not implement the interface"},
+ }
+)
+
+func MakeFailedError(err error) *Error {
+ return &Error{
+ "org.freedesktop.DBus.Error.Failed",
+ []interface{}{err.Error()},
+ }
+}
+
+// Sender is a type which can be used in exported methods to receive the message
+// sender.
+type Sender string
+
+func computeMethodName(name string, mapping map[string]string) string {
+ newname, ok := mapping[name]
+ if ok {
+ name = newname
+ }
+ return name
+}
+
+func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Value {
+ if in == nil {
+ return nil
+ }
+ methods := make(map[string]reflect.Value)
+ val := reflect.ValueOf(in)
+ typ := val.Type()
+ for i := 0; i < typ.NumMethod(); i++ {
+ methtype := typ.Method(i)
+ method := val.Method(i)
+ t := method.Type()
+ // only track valid methods must return *Error as last arg
+ // and must be exported
+ if t.NumOut() == 0 ||
+ t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) ||
+ methtype.PkgPath != "" {
+ continue
+ }
+ // map names while building table
+ methods[computeMethodName(methtype.Name, mapping)] = method
+ }
+ return methods
+}
+
+func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) {
+ pointers := make([]interface{}, m.NumArguments())
+ decode := make([]interface{}, 0, len(body))
+
+ for i := 0; i < m.NumArguments(); i++ {
+ tp := reflect.TypeOf(m.ArgumentValue(i))
+ val := reflect.New(tp)
+ pointers[i] = val.Interface()
+ if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
+ val.Elem().SetString(sender)
+ } else if tp == reflect.TypeOf((*Message)(nil)).Elem() {
+ val.Elem().Set(reflect.ValueOf(*msg))
+ } else {
+ decode = append(decode, pointers[i])
+ }
+ }
+
+ if len(decode) != len(body) {
+ return nil, ErrMsgInvalidArg
+ }
+
+ if err := Store(body, decode...); err != nil {
+ return nil, ErrMsgInvalidArg
+ }
+
+ return pointers, nil
+}
+
+func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) {
+ if decoder, ok := m.(ArgumentDecoder); ok {
+ return decoder.DecodeArguments(conn, sender, msg, msg.Body)
+ }
+ return standardMethodArgumentDecode(m, sender, msg, msg.Body)
+}
+
+// handleCall handles the given method call (i.e. looks if it's one of the
+// pre-implemented ones and searches for a corresponding handler if not).
+func (conn *Conn) handleCall(msg *Message) {
+ name := msg.Headers[FieldMember].value.(string)
+ path := msg.Headers[FieldPath].value.(ObjectPath)
+ ifaceName, _ := msg.Headers[FieldInterface].value.(string)
+ sender, hasSender := msg.Headers[FieldSender].value.(string)
+ serial := msg.serial
+ if ifaceName == "org.freedesktop.DBus.Peer" {
+ switch name {
+ case "Ping":
+ conn.sendReply(sender, serial)
+ case "GetMachineId":
+ conn.sendReply(sender, serial, conn.uuid)
+ default:
+ conn.sendError(ErrMsgUnknownMethod, sender, serial)
+ }
+ return
+ }
+ if len(name) == 0 {
+ conn.sendError(ErrMsgUnknownMethod, sender, serial)
+ }
+
+ object, ok := conn.handler.LookupObject(path)
+ if !ok {
+ conn.sendError(ErrMsgNoObject, sender, serial)
+ return
+ }
+
+ iface, exists := object.LookupInterface(ifaceName)
+ if !exists {
+ conn.sendError(ErrMsgUnknownInterface, sender, serial)
+ return
+ }
+
+ m, exists := iface.LookupMethod(name)
+ if !exists {
+ conn.sendError(ErrMsgUnknownMethod, sender, serial)
+ return
+ }
+ args, err := conn.decodeArguments(m, sender, msg)
+ if err != nil {
+ conn.sendError(err, sender, serial)
+ return
+ }
+
+ ret, err := m.Call(args...)
+ if err != nil {
+ conn.sendError(err, sender, serial)
+ return
+ }
+
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ reply := new(Message)
+ reply.Type = TypeMethodReply
+ reply.serial = conn.getSerial()
+ reply.Headers = make(map[HeaderField]Variant)
+ if hasSender {
+ reply.Headers[FieldDestination] = msg.Headers[FieldSender]
+ }
+ reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
+ reply.Body = make([]interface{}, len(ret))
+ for i := 0; i < len(ret); i++ {
+ reply.Body[i] = ret[i]
+ }
+ reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
+
+ conn.sendMessageAndIfClosed(reply, nil)
+ }
+}
+
+// Emit emits the given signal on the message bus. The name parameter must be
+// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
+func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
+ if !path.IsValid() {
+ return errors.New("dbus: invalid object path")
+ }
+ i := strings.LastIndex(name, ".")
+ if i == -1 {
+ return errors.New("dbus: invalid method name")
+ }
+ iface := name[:i]
+ member := name[i+1:]
+ if !isValidMember(member) {
+ return errors.New("dbus: invalid method name")
+ }
+ if !isValidInterface(iface) {
+ return errors.New("dbus: invalid interface name")
+ }
+ msg := new(Message)
+ msg.Type = TypeSignal
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ msg.Headers[FieldMember] = MakeVariant(member)
+ msg.Headers[FieldPath] = MakeVariant(path)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+
+ var closed bool
+ conn.sendMessageAndIfClosed(msg, func() {
+ closed = true
+ })
+ if closed {
+ return ErrClosed
+ }
+ return nil
+}
+
+// Export registers the given value to be exported as an object on the
+// message bus.
+//
+// If a method call on the given path and interface is received, an exported
+// method with the same name is called with v as the receiver if the
+// parameters match and the last return value is of type *Error. If this
+// *Error is not nil, it is sent back to the caller as an error.
+// Otherwise, a method reply is sent with the other return values as its body.
+//
+// Any parameters with the special type Sender are set to the sender of the
+// dbus message when the method is called. Parameters of this type do not
+// contribute to the dbus signature of the method (i.e. the method is exposed
+// as if the parameters of type Sender were not there).
+//
+// Similarly, any parameters with the type Message are set to the raw message
+// received on the bus. Again, parameters of this type do not contribute to the
+// dbus signature of the method.
+//
+// Every method call is executed in a new goroutine, so the method may be called
+// in multiple goroutines at once.
+//
+// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
+// handled for every object.
+//
+// Passing nil as the first parameter will cause conn to cease handling calls on
+// the given combination of path and interface.
+//
+// Export returns an error if path is not a valid path name.
+func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportWithMap(v, nil, path, iface)
+}
+
+// ExportWithMap works exactly like Export but provides the ability to remap
+// method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.export(getMethods(v, mapping), path, iface, false)
+}
+
+// ExportSubtree works exactly like Export but registers the given value for
+// an entire subtree rather under the root path provided.
+//
+// In order to make this useful, one parameter in each of the value's exported
+// methods should be a Message, in which case it will contain the raw message
+// (allowing one to get access to the path that caused the method to be called).
+//
+// Note that more specific export paths take precedence over less specific. For
+// example, a method call using the ObjectPath /foo/bar/baz will call a method
+// exported on /foo/bar before a method exported on /foo.
+func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error {
+ return conn.ExportSubtreeWithMap(v, nil, path, iface)
+}
+
+// ExportSubtreeWithMap works exactly like ExportSubtree but provides the
+// ability to remap method names (e.g. export a lower-case method).
+//
+// The keys in the map are the real method names (exported on the struct), and
+// the values are the method names to be exported on DBus.
+func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error {
+ return conn.export(getMethods(v, mapping), path, iface, true)
+}
+
+// ExportMethodTable like Export registers the given methods as an object
+// on the message bus. Unlike Export the it uses a method table to define
+// the object instead of a native go object.
+//
+// The method table is a map from method name to function closure
+// representing the method. This allows an object exported on the bus to not
+// necessarily be a native go object. It can be useful for generating exposed
+// methods on the fly.
+//
+// Any non-function objects in the method table are ignored.
+func (conn *Conn) ExportMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error {
+ return conn.exportMethodTable(methods, path, iface, false)
+}
+
+// Like ExportSubtree, but with the same caveats as ExportMethodTable.
+func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error {
+ return conn.exportMethodTable(methods, path, iface, true)
+}
+
+func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error {
+ out := make(map[string]reflect.Value)
+ for name, method := range methods {
+ rval := reflect.ValueOf(method)
+ if rval.Kind() != reflect.Func {
+ continue
+ }
+ t := rval.Type()
+ // only track valid methods must return *Error as last arg
+ if t.NumOut() == 0 ||
+ t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) {
+ continue
+ }
+ out[name] = rval
+ }
+ return conn.export(out, path, iface, includeSubtree)
+}
+
+func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error {
+ if h.PathExists(path) {
+ obj := h.objects[path]
+ obj.DeleteInterface(iface)
+ if len(obj.interfaces) == 0 {
+ h.DeleteObject(path)
+ }
+ }
+ return nil
+}
+
+// exportWithMap is the worker function for all exports/registrations.
+func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error {
+ h, ok := conn.handler.(*defaultHandler)
+ if !ok {
+ return fmt.Errorf(
+ `dbus: export only allowed on the default hander handler have %T"`,
+ conn.handler)
+ }
+
+ if !path.IsValid() {
+ return fmt.Errorf(`dbus: Invalid path name: "%s"`, path)
+ }
+
+ // Remove a previous export if the interface is nil
+ if methods == nil {
+ return conn.unexport(h, path, iface)
+ }
+
+ // If this is the first handler for this path, make a new map to hold all
+ // handlers for this path.
+ if !h.PathExists(path) {
+ h.AddObject(path, newExportedObject())
+ }
+
+ exportedMethods := make(map[string]Method)
+ for name, method := range methods {
+ exportedMethods[name] = exportedMethod{method}
+ }
+
+ // Finally, save this handler
+ obj := h.objects[path]
+ obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree))
+
+ return nil
+}
+
+// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response.
+func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return ReleaseNameReply(r), nil
+}
+
+// RequestName calls org.freedesktop.DBus.RequestName and awaits a response.
+func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ return RequestNameReply(r), nil
+}
+
+// ReleaseNameReply is the reply to a ReleaseName call.
+type ReleaseNameReply uint32
+
+const (
+ ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
+ ReleaseNameReplyNonExistent
+ ReleaseNameReplyNotOwner
+)
+
+// RequestNameFlags represents the possible flags for a RequestName call.
+type RequestNameFlags uint32
+
+const (
+ NameFlagAllowReplacement RequestNameFlags = 1 << iota
+ NameFlagReplaceExisting
+ NameFlagDoNotQueue
+)
+
+// RequestNameReply is the reply to a RequestName call.
+type RequestNameReply uint32
+
+const (
+ RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
+ RequestNameReplyInQueue
+ RequestNameReplyExists
+ RequestNameReplyAlreadyOwner
+)
diff --git a/vendor/github.com/godbus/dbus/v5/go.mod b/vendor/github.com/godbus/dbus/v5/go.mod
new file mode 100644
index 00000000..15b92020
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/go.mod
@@ -0,0 +1,3 @@
+module github.com/godbus/dbus/v5
+
+go 1.12
diff --git a/vendor/github.com/godbus/dbus/v5/go.sum b/vendor/github.com/godbus/dbus/v5/go.sum
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/github.com/godbus/dbus/v5/homedir.go b/vendor/github.com/godbus/dbus/v5/homedir.go
new file mode 100644
index 00000000..0b745f93
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/homedir.go
@@ -0,0 +1,28 @@
+package dbus
+
+import (
+ "os"
+ "sync"
+)
+
+var (
+ homeDir string
+ homeDirLock sync.Mutex
+)
+
+func getHomeDir() string {
+ homeDirLock.Lock()
+ defer homeDirLock.Unlock()
+
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = os.Getenv("HOME")
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = lookupHomeDir()
+ return homeDir
+}
diff --git a/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go b/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go
new file mode 100644
index 00000000..2732081e
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go
@@ -0,0 +1,15 @@
+// +build !static_build
+
+package dbus
+
+import (
+ "os/user"
+)
+
+func lookupHomeDir() string {
+ u, err := user.Current()
+ if err != nil {
+ return "/"
+ }
+ return u.HomeDir
+}
diff --git a/vendor/github.com/godbus/dbus/v5/homedir_static.go b/vendor/github.com/godbus/dbus/v5/homedir_static.go
new file mode 100644
index 00000000..b9d9cb55
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/homedir_static.go
@@ -0,0 +1,45 @@
+// +build static_build
+
+package dbus
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func lookupHomeDir() string {
+ myUid := os.Getuid()
+
+ f, err := os.Open("/etc/passwd")
+ if err != nil {
+ return "/"
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ break
+ }
+
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ parts := strings.Split(line, ":")
+
+ if len(parts) >= 6 {
+ uid, err := strconv.Atoi(parts[2])
+ if err == nil && uid == myUid {
+ return parts[5]
+ }
+ }
+ }
+
+ // Default to / if we can't get a better value
+ return "/"
+}
diff --git a/vendor/github.com/godbus/dbus/v5/match.go b/vendor/github.com/godbus/dbus/v5/match.go
new file mode 100644
index 00000000..086ee336
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/match.go
@@ -0,0 +1,62 @@
+package dbus
+
+import (
+ "strings"
+)
+
+// MatchOption specifies option for dbus routing match rule. Options can be constructed with WithMatch* helpers.
+// For full list of available options consult
+// https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules
+type MatchOption struct {
+ key string
+ value string
+}
+
+func formatMatchOptions(options []MatchOption) string {
+ items := make([]string, 0, len(options))
+ for _, option := range options {
+ items = append(items, option.key+"='"+option.value+"'")
+ }
+ return strings.Join(items, ",")
+}
+
+// WithMatchOption creates match option with given key and value
+func WithMatchOption(key, value string) MatchOption {
+ return MatchOption{key, value}
+}
+
+// doesn't make sense to export this option because clients can only
+// subscribe to messages with signal type.
+func withMatchType(typ string) MatchOption {
+ return WithMatchOption("type", typ)
+}
+
+// WithMatchSender sets sender match option.
+func WithMatchSender(sender string) MatchOption {
+ return WithMatchOption("sender", sender)
+}
+
+// WithMatchSender sets interface match option.
+func WithMatchInterface(iface string) MatchOption {
+ return WithMatchOption("interface", iface)
+}
+
+// WithMatchMember sets member match option.
+func WithMatchMember(member string) MatchOption {
+ return WithMatchOption("member", member)
+}
+
+// WithMatchObjectPath creates match option that filters events based on given path
+func WithMatchObjectPath(path ObjectPath) MatchOption {
+ return WithMatchOption("path", string(path))
+}
+
+// WithMatchPathNamespace sets path_namespace match option.
+func WithMatchPathNamespace(namespace ObjectPath) MatchOption {
+ return WithMatchOption("path_namespace", string(namespace))
+}
+
+// WithMatchDestination sets destination match option.
+func WithMatchDestination(destination string) MatchOption {
+ return WithMatchOption("destination", destination)
+}
diff --git a/vendor/github.com/godbus/dbus/v5/message.go b/vendor/github.com/godbus/dbus/v5/message.go
new file mode 100644
index 00000000..6a925367
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/message.go
@@ -0,0 +1,353 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+const protoVersion byte = 1
+
+// Flags represents the possible flags of a D-Bus message.
+type Flags byte
+
+const (
+ // FlagNoReplyExpected signals that the message is not expected to generate
+ // a reply. If this flag is set on outgoing messages, any possible reply
+ // will be discarded.
+ FlagNoReplyExpected Flags = 1 << iota
+ // FlagNoAutoStart signals that the message bus should not automatically
+ // start an application when handling this message.
+ FlagNoAutoStart
+ // FlagAllowInteractiveAuthorization may be set on a method call
+ // message to inform the receiving side that the caller is prepared
+ // to wait for interactive authorization, which might take a
+ // considerable time to complete. For instance, if this flag is set,
+ // it would be appropriate to query the user for passwords or
+ // confirmation via Polkit or a similar framework.
+ FlagAllowInteractiveAuthorization
+)
+
+// Type represents the possible types of a D-Bus message.
+type Type byte
+
+const (
+ TypeMethodCall Type = 1 + iota
+ TypeMethodReply
+ TypeError
+ TypeSignal
+ typeMax
+)
+
+func (t Type) String() string {
+ switch t {
+ case TypeMethodCall:
+ return "method call"
+ case TypeMethodReply:
+ return "reply"
+ case TypeError:
+ return "error"
+ case TypeSignal:
+ return "signal"
+ }
+ return "invalid"
+}
+
+// HeaderField represents the possible byte codes for the headers
+// of a D-Bus message.
+type HeaderField byte
+
+const (
+ FieldPath HeaderField = 1 + iota
+ FieldInterface
+ FieldMember
+ FieldErrorName
+ FieldReplySerial
+ FieldDestination
+ FieldSender
+ FieldSignature
+ FieldUnixFDs
+ fieldMax
+)
+
+// An InvalidMessageError describes the reason why a D-Bus message is regarded as
+// invalid.
+type InvalidMessageError string
+
+func (e InvalidMessageError) Error() string {
+ return "dbus: invalid message: " + string(e)
+}
+
+// fieldType are the types of the various header fields.
+var fieldTypes = [fieldMax]reflect.Type{
+ FieldPath: objectPathType,
+ FieldInterface: stringType,
+ FieldMember: stringType,
+ FieldErrorName: stringType,
+ FieldReplySerial: uint32Type,
+ FieldDestination: stringType,
+ FieldSender: stringType,
+ FieldSignature: signatureType,
+ FieldUnixFDs: uint32Type,
+}
+
+// requiredFields lists the header fields that are required by the different
+// message types.
+var requiredFields = [typeMax][]HeaderField{
+ TypeMethodCall: {FieldPath, FieldMember},
+ TypeMethodReply: {FieldReplySerial},
+ TypeError: {FieldErrorName, FieldReplySerial},
+ TypeSignal: {FieldPath, FieldInterface, FieldMember},
+}
+
+// Message represents a single D-Bus message.
+type Message struct {
+ Type
+ Flags
+ Headers map[HeaderField]Variant
+ Body []interface{}
+
+ serial uint32
+}
+
+type header struct {
+ Field byte
+ Variant
+}
+
+// DecodeMessage tries to decode a single message in the D-Bus wire format
+// from the given reader. The byte order is figured out from the first byte.
+// The possibly returned error can be an error of the underlying reader, an
+// InvalidMessageError or a FormatError.
+func DecodeMessage(rd io.Reader) (msg *Message, err error) {
+ var order binary.ByteOrder
+ var hlength, length uint32
+ var typ, flags, proto byte
+ var headers []header
+
+ b := make([]byte, 1)
+ _, err = rd.Read(b)
+ if err != nil {
+ return
+ }
+ switch b[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+
+ dec := newDecoder(rd, order)
+ dec.pos = 1
+
+ msg = new(Message)
+ vs, err := dec.Decode(Signature{"yyyuu"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
+ return nil, err
+ }
+ msg.Type = Type(typ)
+ msg.Flags = Flags(flags)
+
+ // get the header length separately because we need it later
+ b = make([]byte, 4)
+ _, err = io.ReadFull(rd, b)
+ if err != nil {
+ return nil, err
+ }
+ binary.Read(bytes.NewBuffer(b), order, &hlength)
+ if hlength+length+16 > 1<<27 {
+ return nil, InvalidMessageError("message is too long")
+ }
+ dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
+ dec.pos = 12
+ vs, err = dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &headers); err != nil {
+ return nil, err
+ }
+
+ msg.Headers = make(map[HeaderField]Variant)
+ for _, v := range headers {
+ msg.Headers[HeaderField(v.Field)] = v.Variant
+ }
+
+ dec.align(8)
+ body := make([]byte, int(length))
+ if length != 0 {
+ _, err := io.ReadFull(rd, body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err = msg.IsValid(); err != nil {
+ return nil, err
+ }
+ sig, _ := msg.Headers[FieldSignature].value.(Signature)
+ if sig.str != "" {
+ buf := bytes.NewBuffer(body)
+ dec = newDecoder(buf, order)
+ vs, err := dec.Decode(sig)
+ if err != nil {
+ return nil, err
+ }
+ msg.Body = vs
+ }
+
+ return
+}
+
+// EncodeTo encodes and sends a message to the given writer. The byte order must
+// be either binary.LittleEndian or binary.BigEndian. If the message is not
+// valid or an error occurs when writing, an error is returned.
+func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
+ if err := msg.IsValid(); err != nil {
+ return err
+ }
+ var vs [7]interface{}
+ switch order {
+ case binary.LittleEndian:
+ vs[0] = byte('l')
+ case binary.BigEndian:
+ vs[0] = byte('B')
+ default:
+ return errors.New("dbus: invalid byte order")
+ }
+ body := new(bytes.Buffer)
+ enc := newEncoder(body, order)
+ if len(msg.Body) != 0 {
+ enc.Encode(msg.Body...)
+ }
+ vs[1] = msg.Type
+ vs[2] = msg.Flags
+ vs[3] = protoVersion
+ vs[4] = uint32(len(body.Bytes()))
+ vs[5] = msg.serial
+ headers := make([]header, 0, len(msg.Headers))
+ for k, v := range msg.Headers {
+ headers = append(headers, header{byte(k), v})
+ }
+ vs[6] = headers
+ var buf bytes.Buffer
+ enc = newEncoder(&buf, order)
+ enc.Encode(vs[:]...)
+ enc.align(8)
+ body.WriteTo(&buf)
+ if buf.Len() > 1<<27 {
+ return InvalidMessageError("message is too long")
+ }
+ if _, err := buf.WriteTo(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// IsValid checks whether msg is a valid message and returns an
+// InvalidMessageError if it is not.
+func (msg *Message) IsValid() error {
+ if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 {
+ return InvalidMessageError("invalid flags")
+ }
+ if msg.Type == 0 || msg.Type >= typeMax {
+ return InvalidMessageError("invalid message type")
+ }
+ for k, v := range msg.Headers {
+ if k == 0 || k >= fieldMax {
+ return InvalidMessageError("invalid header")
+ }
+ if reflect.TypeOf(v.value) != fieldTypes[k] {
+ return InvalidMessageError("invalid type of header field")
+ }
+ }
+ for _, v := range requiredFields[msg.Type] {
+ if _, ok := msg.Headers[v]; !ok {
+ return InvalidMessageError("missing required header")
+ }
+ }
+ if path, ok := msg.Headers[FieldPath]; ok {
+ if !path.value.(ObjectPath).IsValid() {
+ return InvalidMessageError("invalid path name")
+ }
+ }
+ if iface, ok := msg.Headers[FieldInterface]; ok {
+ if !isValidInterface(iface.value.(string)) {
+ return InvalidMessageError("invalid interface name")
+ }
+ }
+ if member, ok := msg.Headers[FieldMember]; ok {
+ if !isValidMember(member.value.(string)) {
+ return InvalidMessageError("invalid member name")
+ }
+ }
+ if errname, ok := msg.Headers[FieldErrorName]; ok {
+ if !isValidInterface(errname.value.(string)) {
+ return InvalidMessageError("invalid error name")
+ }
+ }
+ if len(msg.Body) != 0 {
+ if _, ok := msg.Headers[FieldSignature]; !ok {
+ return InvalidMessageError("missing signature")
+ }
+ }
+ return nil
+}
+
+// Serial returns the message's serial number. The returned value is only valid
+// for messages received by eavesdropping.
+func (msg *Message) Serial() uint32 {
+ return msg.serial
+}
+
+// String returns a string representation of a message similar to the format of
+// dbus-monitor.
+func (msg *Message) String() string {
+ if err := msg.IsValid(); err != nil {
+ return ""
+ }
+ s := msg.Type.String()
+ if v, ok := msg.Headers[FieldSender]; ok {
+ s += " from " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldDestination]; ok {
+ s += " to " + v.value.(string)
+ }
+ s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
+ if v, ok := msg.Headers[FieldReplySerial]; ok {
+ s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldUnixFDs]; ok {
+ s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldPath]; ok {
+ s += " path " + string(v.value.(ObjectPath))
+ }
+ if v, ok := msg.Headers[FieldInterface]; ok {
+ s += " interface " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldErrorName]; ok {
+ s += " error " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldMember]; ok {
+ s += " member " + v.value.(string)
+ }
+ if len(msg.Body) != 0 {
+ s += "\n"
+ }
+ for i, v := range msg.Body {
+ s += " " + MakeVariant(v).String()
+ if i != len(msg.Body)-1 {
+ s += "\n"
+ }
+ }
+ return s
+}
diff --git a/vendor/github.com/godbus/dbus/v5/object.go b/vendor/github.com/godbus/dbus/v5/object.go
new file mode 100644
index 00000000..8acd7fc8
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/object.go
@@ -0,0 +1,211 @@
+package dbus
+
+import (
+ "context"
+ "errors"
+ "strings"
+)
+
+// BusObject is the interface of a remote object on which methods can be
+// invoked.
+type BusObject interface {
+ Call(method string, flags Flags, args ...interface{}) *Call
+ CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call
+ Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call
+ GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call
+ AddMatchSignal(iface, member string, options ...MatchOption) *Call
+ RemoveMatchSignal(iface, member string, options ...MatchOption) *Call
+ GetProperty(p string) (Variant, error)
+ SetProperty(p string, v interface{}) error
+ Destination() string
+ Path() ObjectPath
+}
+
+// Object represents a remote object on which methods can be invoked.
+type Object struct {
+ conn *Conn
+ dest string
+ path ObjectPath
+}
+
+// Call calls a method with (*Object).Go and waits for its reply.
+func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
+ return <-o.createCall(context.Background(), method, flags, make(chan *Call, 1), args...).Done
+}
+
+// CallWithContext acts like Call but takes a context
+func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call {
+ return <-o.createCall(ctx, method, flags, make(chan *Call, 1), args...).Done
+}
+
+// AddMatchSignal subscribes BusObject to signals from specified interface,
+// method (member). Additional filter rules can be added via WithMatch* option constructors.
+// Note: To filter events by object path you have to specify this path via an option.
+//
+// Deprecated: use (*Conn) AddMatchSignal instead.
+func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *Call {
+ base := []MatchOption{
+ withMatchType("signal"),
+ WithMatchInterface(iface),
+ WithMatchMember(member),
+ }
+
+ options = append(base, options...)
+ return o.conn.BusObject().Call(
+ "org.freedesktop.DBus.AddMatch",
+ 0,
+ formatMatchOptions(options),
+ )
+}
+
+// RemoveMatchSignal unsubscribes BusObject from signals from specified interface,
+// method (member). Additional filter rules can be added via WithMatch* option constructors
+//
+// Deprecated: use (*Conn) RemoveMatchSignal instead.
+func (o *Object) RemoveMatchSignal(iface, member string, options ...MatchOption) *Call {
+ base := []MatchOption{
+ withMatchType("signal"),
+ WithMatchInterface(iface),
+ WithMatchMember(member),
+ }
+
+ options = append(base, options...)
+ return o.conn.BusObject().Call(
+ "org.freedesktop.DBus.RemoveMatch",
+ 0,
+ formatMatchOptions(options),
+ )
+}
+
+// Go calls a method with the given arguments asynchronously. It returns a
+// Call structure representing this method call. The passed channel will
+// return the same value once the call is done. If ch is nil, a new channel
+// will be allocated. Otherwise, ch has to be buffered or Go will panic.
+//
+// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
+// is returned with any error in Err and a closed channel in Done containing
+// the returned Call as it's one entry.
+//
+// If the method parameter contains a dot ('.'), the part before the last dot
+// specifies the interface on which the method is called.
+func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ return o.createCall(context.Background(), method, flags, ch, args...)
+}
+
+// GoWithContext acts like Go but takes a context
+func (o *Object) GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ return o.createCall(ctx, method, flags, ch, args...)
+}
+
+func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ if ctx == nil {
+ panic("nil context")
+ }
+ iface := ""
+ i := strings.LastIndex(method, ".")
+ if i != -1 {
+ iface = method[:i]
+ }
+ method = method[i+1:]
+ msg := new(Message)
+ msg.Type = TypeMethodCall
+ msg.serial = o.conn.getSerial()
+ msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldPath] = MakeVariant(o.path)
+ msg.Headers[FieldDestination] = MakeVariant(o.dest)
+ msg.Headers[FieldMember] = MakeVariant(method)
+ if iface != "" {
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ }
+ msg.Body = args
+ if len(args) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
+ }
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 1)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Object).Go")
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ call := &Call{
+ Destination: o.dest,
+ Path: o.path,
+ Method: method,
+ Args: args,
+ Done: ch,
+ ctxCanceler: cancel,
+ ctx: ctx,
+ }
+ o.conn.calls.track(msg.serial, call)
+ o.conn.sendMessageAndIfClosed(msg, func() {
+ o.conn.calls.handleSendError(msg, ErrClosed)
+ cancel()
+ })
+ go func() {
+ <-ctx.Done()
+ o.conn.calls.handleSendError(msg, ctx.Err())
+ }()
+
+ return call
+ }
+ done := make(chan *Call, 1)
+ call := &Call{
+ Err: nil,
+ Done: done,
+ }
+ defer func() {
+ call.Done <- call
+ close(done)
+ }()
+ o.conn.sendMessageAndIfClosed(msg, func() {
+ call.Err = ErrClosed
+ })
+ return call
+}
+
+// GetProperty calls org.freedesktop.DBus.Properties.Get on the given
+// object. The property name must be given in interface.member notation.
+func (o *Object) GetProperty(p string) (Variant, error) {
+ idx := strings.LastIndex(p, ".")
+ if idx == -1 || idx+1 == len(p) {
+ return Variant{}, errors.New("dbus: invalid property " + p)
+ }
+
+ iface := p[:idx]
+ prop := p[idx+1:]
+
+ result := Variant{}
+ err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
+
+ if err != nil {
+ return Variant{}, err
+ }
+
+ return result, nil
+}
+
+// SetProperty calls org.freedesktop.DBus.Properties.Set on the given
+// object. The property name must be given in interface.member notation.
+func (o *Object) SetProperty(p string, v interface{}) error {
+ idx := strings.LastIndex(p, ".")
+ if idx == -1 || idx+1 == len(p) {
+ return errors.New("dbus: invalid property " + p)
+ }
+
+ iface := p[:idx]
+ prop := p[idx+1:]
+
+ return o.Call("org.freedesktop.DBus.Properties.Set", 0, iface, prop, v).Err
+}
+
+// Destination returns the destination that calls on (o *Object) are sent to.
+func (o *Object) Destination() string {
+ return o.dest
+}
+
+// Path returns the path that calls on (o *Object") are sent to.
+func (o *Object) Path() ObjectPath {
+ return o.path
+}
diff --git a/vendor/github.com/godbus/dbus/v5/server_interfaces.go b/vendor/github.com/godbus/dbus/v5/server_interfaces.go
new file mode 100644
index 00000000..79d97edf
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/server_interfaces.go
@@ -0,0 +1,107 @@
+package dbus
+
+// Terminator allows a handler to implement a shutdown mechanism that
+// is called when the connection terminates.
+type Terminator interface {
+ Terminate()
+}
+
+// Handler is the representation of a D-Bus Application.
+//
+// The Handler must have a way to lookup objects given
+// an ObjectPath. The returned object must implement the
+// ServerObject interface.
+type Handler interface {
+ LookupObject(path ObjectPath) (ServerObject, bool)
+}
+
+// ServerObject is the representation of an D-Bus Object.
+//
+// Objects are registered at a path for a given Handler.
+// The Objects implement D-Bus interfaces. The semantics
+// of Interface lookup is up to the implementation of
+// the ServerObject. The ServerObject implementation may
+// choose to implement empty string as a valid interface
+// represeting all methods or not per the D-Bus specification.
+type ServerObject interface {
+ LookupInterface(name string) (Interface, bool)
+}
+
+// An Interface is the representation of a D-Bus Interface.
+//
+// Interfaces are a grouping of methods implemented by the Objects.
+// Interfaces are responsible for routing method calls.
+type Interface interface {
+ LookupMethod(name string) (Method, bool)
+}
+
+// A Method represents the exposed methods on D-Bus.
+type Method interface {
+ // Call requires that all arguments are decoded before being passed to it.
+ Call(args ...interface{}) ([]interface{}, error)
+ NumArguments() int
+ NumReturns() int
+ // ArgumentValue returns a representative value for the argument at position
+ // it should be of the proper type. reflect.Zero would be a good mechanism
+ // to use for this Value.
+ ArgumentValue(position int) interface{}
+ // ReturnValue returns a representative value for the return at position
+ // it should be of the proper type. reflect.Zero would be a good mechanism
+ // to use for this Value.
+ ReturnValue(position int) interface{}
+}
+
+// An Argument Decoder can decode arguments using the non-standard mechanism
+//
+// If a method implements this interface then the non-standard
+// decoder will be used.
+//
+// Method arguments must be decoded from the message.
+// The mechanism for doing this will vary based on the
+// implementation of the method. A normal approach is provided
+// as part of this library, but may be replaced with
+// any other decoding scheme.
+type ArgumentDecoder interface {
+ // To decode the arguments of a method the sender and message are
+ // provided incase the semantics of the implementer provides access
+ // to these as part of the method invocation.
+ DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error)
+}
+
+// A SignalHandler is responsible for delivering a signal.
+//
+// Signal delivery may be changed from the default channel
+// based approach by Handlers implementing the SignalHandler
+// interface.
+type SignalHandler interface {
+ DeliverSignal(iface, name string, signal *Signal)
+}
+
+// SignalRegistrar manages signal delivery channels.
+//
+// This is an optional set of methods for `SignalHandler`.
+type SignalRegistrar interface {
+ AddSignal(ch chan<- *Signal)
+ RemoveSignal(ch chan<- *Signal)
+}
+
+// A DBusError is used to convert a generic object to a D-Bus error.
+//
+// Any custom error mechanism may implement this interface to provide
+// a custom encoding of the error on D-Bus. By default if a normal
+// error is returned, it will be encoded as the generic
+// "org.freedesktop.DBus.Error.Failed" error. By implementing this
+// interface as well a custom encoding may be provided.
+type DBusError interface {
+ DBusError() (string, []interface{})
+}
+
+// SerialGenerator is responsible for serials generation.
+//
+// Different approaches for the serial generation can be used,
+// maintaining a map guarded with a mutex (the standard way) or
+// simply increment an atomic counter.
+type SerialGenerator interface {
+ GetSerial() uint32
+ RetireSerial(serial uint32)
+}
diff --git a/vendor/github.com/godbus/dbus/v5/sig.go b/vendor/github.com/godbus/dbus/v5/sig.go
new file mode 100644
index 00000000..c1b80920
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/sig.go
@@ -0,0 +1,259 @@
+package dbus
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var sigToType = map[byte]reflect.Type{
+ 'y': byteType,
+ 'b': boolType,
+ 'n': int16Type,
+ 'q': uint16Type,
+ 'i': int32Type,
+ 'u': uint32Type,
+ 'x': int64Type,
+ 't': uint64Type,
+ 'd': float64Type,
+ 's': stringType,
+ 'g': signatureType,
+ 'o': objectPathType,
+ 'v': variantType,
+ 'h': unixFDIndexType,
+}
+
+// Signature represents a correct type signature as specified by the D-Bus
+// specification. The zero value represents the empty signature, "".
+type Signature struct {
+ str string
+}
+
+// SignatureOf returns the concatenation of all the signatures of the given
+// values. It panics if one of them is not representable in D-Bus.
+func SignatureOf(vs ...interface{}) Signature {
+ var s string
+ for _, v := range vs {
+ s += getSignature(reflect.TypeOf(v))
+ }
+ return Signature{s}
+}
+
+// SignatureOfType returns the signature of the given type. It panics if the
+// type is not representable in D-Bus.
+func SignatureOfType(t reflect.Type) Signature {
+ return Signature{getSignature(t)}
+}
+
+// getSignature returns the signature of the given type and panics on unknown types.
+func getSignature(t reflect.Type) string {
+ // handle simple types first
+ switch t.Kind() {
+ case reflect.Uint8:
+ return "y"
+ case reflect.Bool:
+ return "b"
+ case reflect.Int16:
+ return "n"
+ case reflect.Uint16:
+ return "q"
+ case reflect.Int, reflect.Int32:
+ if t == unixFDType {
+ return "h"
+ }
+ return "i"
+ case reflect.Uint, reflect.Uint32:
+ if t == unixFDIndexType {
+ return "h"
+ }
+ return "u"
+ case reflect.Int64:
+ return "x"
+ case reflect.Uint64:
+ return "t"
+ case reflect.Float64:
+ return "d"
+ case reflect.Ptr:
+ return getSignature(t.Elem())
+ case reflect.String:
+ if t == objectPathType {
+ return "o"
+ }
+ return "s"
+ case reflect.Struct:
+ if t == variantType {
+ return "v"
+ } else if t == signatureType {
+ return "g"
+ }
+ var s string
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ s += getSignature(t.Field(i).Type)
+ }
+ }
+ return "(" + s + ")"
+ case reflect.Array, reflect.Slice:
+ return "a" + getSignature(t.Elem())
+ case reflect.Map:
+ if !isKeyType(t.Key()) {
+ panic(InvalidTypeError{t})
+ }
+ return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
+ case reflect.Interface:
+ return "v"
+ }
+ panic(InvalidTypeError{t})
+}
+
+// ParseSignature returns the signature represented by this string, or a
+// SignatureError if the string is not a valid signature.
+func ParseSignature(s string) (sig Signature, err error) {
+ if len(s) == 0 {
+ return
+ }
+ if len(s) > 255 {
+ return Signature{""}, SignatureError{s, "too long"}
+ }
+ sig.str = s
+ for err == nil && len(s) != 0 {
+ err, s = validSingle(s, 0)
+ }
+ if err != nil {
+ sig = Signature{""}
+ }
+
+ return
+}
+
+// ParseSignatureMust behaves like ParseSignature, except that it panics if s
+// is not valid.
+func ParseSignatureMust(s string) Signature {
+ sig, err := ParseSignature(s)
+ if err != nil {
+ panic(err)
+ }
+ return sig
+}
+
+// Empty retruns whether the signature is the empty signature.
+func (s Signature) Empty() bool {
+ return s.str == ""
+}
+
+// Single returns whether the signature represents a single, complete type.
+func (s Signature) Single() bool {
+ err, r := validSingle(s.str, 0)
+ return err != nil && r == ""
+}
+
+// String returns the signature's string representation.
+func (s Signature) String() string {
+ return s.str
+}
+
+// A SignatureError indicates that a signature passed to a function or received
+// on a connection is not a valid signature.
+type SignatureError struct {
+ Sig string
+ Reason string
+}
+
+func (e SignatureError) Error() string {
+ return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
+}
+
+// Try to read a single type from this string. If it was successful, err is nil
+// and rem is the remaining unparsed part. Otherwise, err is a non-nil
+// SignatureError and rem is "". depth is the current recursion depth which may
+// not be greater than 64 and should be given as 0 on the first call.
+func validSingle(s string, depth int) (err error, rem string) {
+ if s == "" {
+ return SignatureError{Sig: s, Reason: "empty signature"}, ""
+ }
+ if depth > 64 {
+ return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
+ }
+ switch s[0] {
+ case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
+ return nil, s[1:]
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ i := findMatching(s[1:], '{', '}')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
+ }
+ i++
+ rem = s[i+1:]
+ s = s[2:i]
+ if err, _ = validSingle(s[:1], depth+1); err != nil {
+ return err, ""
+ }
+ err, nr := validSingle(s[1:], depth+1)
+ if err != nil {
+ return err, ""
+ }
+ if nr != "" {
+ return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
+ }
+ return nil, rem
+ }
+ return validSingle(s[1:], depth+1)
+ case '(':
+ i := findMatching(s, '(', ')')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
+ }
+ rem = s[i+1:]
+ s = s[1:i]
+ for err == nil && s != "" {
+ err, s = validSingle(s, depth+1)
+ }
+ if err != nil {
+ rem = ""
+ }
+ return
+ }
+ return SignatureError{Sig: s, Reason: "invalid type character"}, ""
+}
+
+func findMatching(s string, left, right rune) int {
+ n := 0
+ for i, v := range s {
+ if v == left {
+ n++
+ } else if v == right {
+ n--
+ }
+ if n == 0 {
+ return i
+ }
+ }
+ return -1
+}
+
+// typeFor returns the type of the given signature. It ignores any left over
+// characters and panics if s doesn't start with a valid type signature.
+func typeFor(s string) (t reflect.Type) {
+ err, _ := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ if t, ok := sigToType[s[0]]; ok {
+ return t
+ }
+ switch s[0] {
+ case 'a':
+ if s[1] == '{' {
+ i := strings.LastIndex(s, "}")
+ t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
+ } else {
+ t = reflect.SliceOf(typeFor(s[1:]))
+ }
+ case '(':
+ t = interfacesType
+ }
+ return
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_darwin.go b/vendor/github.com/godbus/dbus/v5/transport_darwin.go
new file mode 100644
index 00000000..1bba0d6b
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_darwin.go
@@ -0,0 +1,6 @@
+package dbus
+
+func (t *unixTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_generic.go b/vendor/github.com/godbus/dbus/v5/transport_generic.go
new file mode 100644
index 00000000..718a1ff0
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_generic.go
@@ -0,0 +1,50 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+ "unsafe"
+)
+
+var nativeEndian binary.ByteOrder
+
+func detectEndianness() binary.ByteOrder {
+ var x uint32 = 0x01020304
+ if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
+ return binary.BigEndian
+ }
+ return binary.LittleEndian
+}
+
+func init() {
+ nativeEndian = detectEndianness()
+}
+
+type genericTransport struct {
+ io.ReadWriteCloser
+}
+
+func (t genericTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
+
+func (t genericTransport) SupportsUnixFDs() bool {
+ return false
+}
+
+func (t genericTransport) EnableUnixFDs() {}
+
+func (t genericTransport) ReadMessage() (*Message, error) {
+ return DecodeMessage(t)
+}
+
+func (t genericTransport) SendMessage(msg *Message) error {
+ for _, v := range msg.Body {
+ if _, ok := v.(UnixFD); ok {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ }
+ return msg.EncodeTo(t, nativeEndian)
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go b/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go
new file mode 100644
index 00000000..697739ef
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go
@@ -0,0 +1,39 @@
+//+build !windows
+
+package dbus
+
+import (
+ "errors"
+ "io/ioutil"
+ "net"
+)
+
+func init() {
+ transports["nonce-tcp"] = newNonceTcpTransport
+}
+
+func newNonceTcpTransport(keys string) (transport, error) {
+ host := getKey(keys, "host")
+ port := getKey(keys, "port")
+ noncefile := getKey(keys, "noncefile")
+ if host == "" || port == "" || noncefile == "" {
+ return nil, errors.New("dbus: unsupported address (must set host, port and noncefile)")
+ }
+ protocol, err := tcpFamily(keys)
+ if err != nil {
+ return nil, err
+ }
+ socket, err := net.Dial(protocol, net.JoinHostPort(host, port))
+ if err != nil {
+ return nil, err
+ }
+ b, err := ioutil.ReadFile(noncefile)
+ if err != nil {
+ return nil, err
+ }
+ _, err = socket.Write(b)
+ if err != nil {
+ return nil, err
+ }
+ return NewConn(socket)
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_tcp.go b/vendor/github.com/godbus/dbus/v5/transport_tcp.go
new file mode 100644
index 00000000..f91c9b7d
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_tcp.go
@@ -0,0 +1,41 @@
+package dbus
+
+import (
+ "errors"
+ "net"
+)
+
+func init() {
+ transports["tcp"] = newTcpTransport
+}
+
+func tcpFamily(keys string) (string, error) {
+ switch getKey(keys, "family") {
+ case "":
+ return "tcp", nil
+ case "ipv4":
+ return "tcp4", nil
+ case "ipv6":
+ return "tcp6", nil
+ default:
+ return "", errors.New("dbus: invalid tcp family (must be ipv4 or ipv6)")
+ }
+}
+
+func newTcpTransport(keys string) (transport, error) {
+ host := getKey(keys, "host")
+ port := getKey(keys, "port")
+ if host == "" || port == "" {
+ return nil, errors.New("dbus: unsupported address (must set host and port)")
+ }
+
+ protocol, err := tcpFamily(keys)
+ if err != nil {
+ return nil, err
+ }
+ socket, err := net.Dial(protocol, net.JoinHostPort(host, port))
+ if err != nil {
+ return nil, err
+ }
+ return NewConn(socket)
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_unix.go b/vendor/github.com/godbus/dbus/v5/transport_unix.go
new file mode 100644
index 00000000..c7cd02f9
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_unix.go
@@ -0,0 +1,214 @@
+//+build !windows,!solaris
+
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+type oobReader struct {
+ conn *net.UnixConn
+ oob []byte
+ buf [4096]byte
+}
+
+func (o *oobReader) Read(b []byte) (n int, err error) {
+ n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
+ if err != nil {
+ return n, err
+ }
+ if flags&syscall.MSG_CTRUNC != 0 {
+ return n, errors.New("dbus: control data truncated (too many fds received)")
+ }
+ o.oob = append(o.oob, o.buf[:oobn]...)
+ return n, nil
+}
+
+type unixTransport struct {
+ *net.UnixConn
+ rdr *oobReader
+ hasUnixFDs bool
+}
+
+func newUnixTransport(keys string) (transport, error) {
+ var err error
+
+ t := new(unixTransport)
+ abstract := getKey(keys, "abstract")
+ path := getKey(keys, "path")
+ switch {
+ case abstract == "" && path == "":
+ return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
+ case abstract != "" && path == "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ case abstract == "" && path != "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ default:
+ return nil, errors.New("dbus: invalid address (both path and abstract set)")
+ }
+}
+
+func init() {
+ transports["unix"] = newUnixTransport
+}
+
+func (t *unixTransport) EnableUnixFDs() {
+ t.hasUnixFDs = true
+}
+
+func (t *unixTransport) ReadMessage() (*Message, error) {
+ var (
+ blen, hlen uint32
+ csheader [16]byte
+ headers []header
+ order binary.ByteOrder
+ unixfds uint32
+ )
+ // To be sure that all bytes of out-of-band data are read, we use a special
+ // reader that uses ReadUnix on the underlying connection instead of Read
+ // and gathers the out-of-band data in a buffer.
+ if t.rdr == nil {
+ t.rdr = &oobReader{conn: t.UnixConn}
+ } else {
+ t.rdr.oob = nil
+ }
+
+ // read the first 16 bytes (the part of the header that has a constant size),
+ // from which we can figure out the length of the rest of the message
+ if _, err := io.ReadFull(t.rdr, csheader[:]); err != nil {
+ return nil, err
+ }
+ switch csheader[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+ // csheader[4:8] -> length of message body, csheader[12:16] -> length of
+ // header fields (without alignment)
+ binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
+ binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
+ if hlen%8 != 0 {
+ hlen += 8 - (hlen % 8)
+ }
+
+ // decode headers and look for unix fds
+ headerdata := make([]byte, hlen+4)
+ copy(headerdata, csheader[12:])
+ if _, err := io.ReadFull(t.rdr, headerdata[4:]); err != nil {
+ return nil, err
+ }
+ dec := newDecoder(bytes.NewBuffer(headerdata), order)
+ dec.pos = 12
+ vs, err := dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ Store(vs, &headers)
+ for _, v := range headers {
+ if v.Field == byte(FieldUnixFDs) {
+ unixfds, _ = v.Variant.value.(uint32)
+ }
+ }
+ all := make([]byte, 16+hlen+blen)
+ copy(all, csheader[:])
+ copy(all[16:], headerdata[4:])
+ if _, err := io.ReadFull(t.rdr, all[16+hlen:]); err != nil {
+ return nil, err
+ }
+ if unixfds != 0 {
+ if !t.hasUnixFDs {
+ return nil, errors.New("dbus: got unix fds on unsupported transport")
+ }
+ // read the fds from the OOB data
+ scms, err := syscall.ParseSocketControlMessage(t.rdr.oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, errors.New("dbus: received more than one socket control message")
+ }
+ fds, err := syscall.ParseUnixRights(&scms[0])
+ if err != nil {
+ return nil, err
+ }
+ msg, err := DecodeMessage(bytes.NewBuffer(all))
+ if err != nil {
+ return nil, err
+ }
+ // substitute the values in the message body (which are indices for the
+ // array receiver via OOB) with the actual values
+ for i, v := range msg.Body {
+ switch v.(type) {
+ case UnixFDIndex:
+ j := v.(UnixFDIndex)
+ if uint32(j) >= unixfds {
+ return nil, InvalidMessageError("invalid index for unix fd")
+ }
+ msg.Body[i] = UnixFD(fds[j])
+ case []UnixFDIndex:
+ idxArray := v.([]UnixFDIndex)
+ fdArray := make([]UnixFD, len(idxArray))
+ for k, j := range idxArray {
+ if uint32(j) >= unixfds {
+ return nil, InvalidMessageError("invalid index for unix fd")
+ }
+ fdArray[k] = UnixFD(fds[j])
+ }
+ msg.Body[i] = fdArray
+ }
+ }
+ return msg, nil
+ }
+ return DecodeMessage(bytes.NewBuffer(all))
+}
+
+func (t *unixTransport) SendMessage(msg *Message) error {
+ fds := make([]int, 0)
+ for i, v := range msg.Body {
+ if fd, ok := v.(UnixFD); ok {
+ msg.Body[i] = UnixFDIndex(len(fds))
+ fds = append(fds, int(fd))
+ }
+ }
+ if len(fds) != 0 {
+ if !t.hasUnixFDs {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
+ oob := syscall.UnixRights(fds...)
+ buf := new(bytes.Buffer)
+ msg.EncodeTo(buf, nativeEndian)
+ n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() || oobn != len(oob) {
+ return io.ErrShortWrite
+ }
+ } else {
+ if err := msg.EncodeTo(t, nativeEndian); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *unixTransport) SupportsUnixFDs() bool {
+ return true
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_unixcred_dragonfly.go b/vendor/github.com/godbus/dbus/v5/transport_unixcred_dragonfly.go
new file mode 100644
index 00000000..a8cd3939
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_unixcred_dragonfly.go
@@ -0,0 +1,95 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+// Local implementation of the UnixCredentials system call for DragonFly BSD
+
+package dbus
+
+/*
+#include
+*/
+import "C"
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
+// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+// http://golang.org/src/pkg/syscall/types_linux.go
+// http://golang.org/src/pkg/syscall/types_dragonfly.go
+// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h
+const (
+ SizeofUcred = C.sizeof_struct_ucred
+)
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgAlignOf(salen int) int {
+ // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+ //salign := sizeofPtr
+ // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels
+ // still require 32-bit aligned access to network subsystem.
+ //if darwin64Bit || dragonfly64Bit {
+ // salign = 4
+ //}
+ salign := 4
+ return (salen + salign - 1) & ^(salign - 1)
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// UnixCredentials encodes credentials into a socket control message
+// for sending to another process. This can be used for
+// authentication.
+func UnixCredentials(ucred *Ucred) []byte {
+ b := make([]byte, syscall.CmsgSpace(SizeofUcred))
+ h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = syscall.SOL_SOCKET
+ h.Type = syscall.SCM_CREDS
+ h.SetLen(syscall.CmsgLen(SizeofUcred))
+ *((*Ucred)(cmsgData(h))) = *ucred
+ return b
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// ParseUnixCredentials decodes a socket control message that contains
+// credentials in a Ucred structure. To receive such a message, the
+// SO_PASSCRED option must be enabled on the socket.
+func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
+ if m.Header.Level != syscall.SOL_SOCKET {
+ return nil, syscall.EINVAL
+ }
+ if m.Header.Type != syscall.SCM_CREDS {
+ return nil, syscall.EINVAL
+ }
+ ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
+ return &ucred, nil
+}
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go b/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go
new file mode 100644
index 00000000..0fc5b927
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go
@@ -0,0 +1,91 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+// Local implementation of the UnixCredentials system call for FreeBSD
+
+package dbus
+
+/*
+const int sizeofPtr = sizeof(void*);
+#define _WANT_UCRED
+#include
+*/
+import "C"
+
+import (
+ "io"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go
+// https://golang.org/src/syscall/ztypes_freebsd_amd64.go
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+// http://golang.org/src/pkg/syscall/types_linux.go
+// https://golang.org/src/syscall/types_freebsd.go
+// https://github.com/freebsd/freebsd/blob/master/sys/sys/ucred.h
+const (
+ SizeofUcred = C.sizeof_struct_ucred
+)
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgAlignOf(salen int) int {
+ salign := C.sizeofPtr
+
+ return (salen + salign - 1) & ^(salign - 1)
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_unix.go
+func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr)))
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// UnixCredentials encodes credentials into a socket control message
+// for sending to another process. This can be used for
+// authentication.
+func UnixCredentials(ucred *Ucred) []byte {
+ b := make([]byte, syscall.CmsgSpace(SizeofUcred))
+ h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+ h.Level = syscall.SOL_SOCKET
+ h.Type = syscall.SCM_CREDS
+ h.SetLen(syscall.CmsgLen(SizeofUcred))
+ *((*Ucred)(cmsgData(h))) = *ucred
+ return b
+}
+
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// ParseUnixCredentials decodes a socket control message that contains
+// credentials in a Ucred structure. To receive such a message, the
+// SO_PASSCRED option must be enabled on the socket.
+func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) {
+ if m.Header.Level != syscall.SOL_SOCKET {
+ return nil, syscall.EINVAL
+ }
+ if m.Header.Type != syscall.SCM_CREDS {
+ return nil, syscall.EINVAL
+ }
+ ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0]))
+ return &ucred, nil
+}
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go b/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go
new file mode 100644
index 00000000..d9dfdf69
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go
@@ -0,0 +1,25 @@
+// The UnixCredentials system call is currently only implemented on Linux
+// http://golang.org/src/pkg/syscall/sockcmsg_linux.go
+// https://golang.org/s/go1.4-syscall
+// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys
+
+package dbus
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := syscall.UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/v5/transport_unixcred_openbsd.go b/vendor/github.com/godbus/dbus/v5/transport_unixcred_openbsd.go
new file mode 100644
index 00000000..af7bafdf
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/transport_unixcred_openbsd.go
@@ -0,0 +1,14 @@
+package dbus
+
+import "io"
+
+func (t *unixTransport) SendNullByte() error {
+ n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil)
+ if err != nil {
+ return err
+ }
+ if n != 1 {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/github.com/godbus/dbus/v5/variant.go b/vendor/github.com/godbus/dbus/v5/variant.go
new file mode 100644
index 00000000..5b51828c
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/variant.go
@@ -0,0 +1,144 @@
+package dbus
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Variant represents the D-Bus variant type.
+type Variant struct {
+ sig Signature
+ value interface{}
+}
+
+// MakeVariant converts the given value to a Variant. It panics if v cannot be
+// represented as a D-Bus type.
+func MakeVariant(v interface{}) Variant {
+ return MakeVariantWithSignature(v, SignatureOf(v))
+}
+
+// MakeVariantWithSignature converts the given value to a Variant.
+func MakeVariantWithSignature(v interface{}, s Signature) Variant {
+ return Variant{s, v}
+}
+
+// ParseVariant parses the given string as a variant as described at
+// https://developer.gnome.org/glib/stable/gvariant-text.html. If sig is not
+// empty, it is taken to be the expected signature for the variant.
+func ParseVariant(s string, sig Signature) (Variant, error) {
+ tokens := varLex(s)
+ p := &varParser{tokens: tokens}
+ n, err := varMakeNode(p)
+ if err != nil {
+ return Variant{}, err
+ }
+ if sig.str == "" {
+ sig, err = varInfer(n)
+ if err != nil {
+ return Variant{}, err
+ }
+ }
+ v, err := n.Value(sig)
+ if err != nil {
+ return Variant{}, err
+ }
+ return MakeVariant(v), nil
+}
+
+// format returns a formatted version of v and whether this string can be parsed
+// unambigously.
+func (v Variant) format() (string, bool) {
+ switch v.sig.str[0] {
+ case 'b', 'i':
+ return fmt.Sprint(v.value), true
+ case 'n', 'q', 'u', 'x', 't', 'd', 'h':
+ return fmt.Sprint(v.value), false
+ case 's':
+ return strconv.Quote(v.value.(string)), true
+ case 'o':
+ return strconv.Quote(string(v.value.(ObjectPath))), false
+ case 'g':
+ return strconv.Quote(v.value.(Signature).str), false
+ case 'v':
+ s, unamb := v.value.(Variant).format()
+ if !unamb {
+ return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
+ }
+ return "<" + s + ">", true
+ case 'y':
+ return fmt.Sprintf("%#x", v.value.(byte)), false
+ }
+ rv := reflect.ValueOf(v.value)
+ switch rv.Kind() {
+ case reflect.Slice:
+ if rv.Len() == 0 {
+ return "[]", false
+ }
+ unamb := true
+ buf := bytes.NewBuffer([]byte("["))
+ for i := 0; i < rv.Len(); i++ {
+ // TODO: slooow
+ s, b := MakeVariant(rv.Index(i).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ if i != rv.Len()-1 {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String(), unamb
+ case reflect.Map:
+ if rv.Len() == 0 {
+ return "{}", false
+ }
+ unamb := true
+ var buf bytes.Buffer
+ kvs := make([]string, rv.Len())
+ for i, k := range rv.MapKeys() {
+ s, b := MakeVariant(k.Interface()).format()
+ unamb = unamb && b
+ buf.Reset()
+ buf.WriteString(s)
+ buf.WriteString(": ")
+ s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ kvs[i] = buf.String()
+ }
+ buf.Reset()
+ buf.WriteByte('{')
+ sort.Strings(kvs)
+ for i, kv := range kvs {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(kv)
+ }
+ buf.WriteByte('}')
+ return buf.String(), unamb
+ }
+ return `"INVALID"`, true
+}
+
+// Signature returns the D-Bus signature of the underlying value of v.
+func (v Variant) Signature() Signature {
+ return v.sig
+}
+
+// String returns the string representation of the underlying value of v as
+// described at https://developer.gnome.org/glib/stable/gvariant-text.html.
+func (v Variant) String() string {
+ s, unamb := v.format()
+ if !unamb {
+ return "@" + v.sig.str + " " + s
+ }
+ return s
+}
+
+// Value returns the underlying value of v.
+func (v Variant) Value() interface{} {
+ return v.value
+}
diff --git a/vendor/github.com/godbus/dbus/v5/variant_lexer.go b/vendor/github.com/godbus/dbus/v5/variant_lexer.go
new file mode 100644
index 00000000..bf1398c8
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/variant_lexer.go
@@ -0,0 +1,284 @@
+package dbus
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Heavily inspired by the lexer from text/template.
+
+type varToken struct {
+ typ varTokenType
+ val string
+}
+
+type varTokenType byte
+
+const (
+ tokEOF varTokenType = iota
+ tokError
+ tokNumber
+ tokString
+ tokBool
+ tokArrayStart
+ tokArrayEnd
+ tokDictStart
+ tokDictEnd
+ tokVariantStart
+ tokVariantEnd
+ tokComma
+ tokColon
+ tokType
+ tokByteString
+)
+
+type varLexer struct {
+ input string
+ start int
+ pos int
+ width int
+ tokens []varToken
+}
+
+type lexState func(*varLexer) lexState
+
+func varLex(s string) []varToken {
+ l := &varLexer{input: s}
+ l.run()
+ return l.tokens
+}
+
+func (l *varLexer) accept(valid string) bool {
+ if strings.ContainsRune(valid, l.next()) {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *varLexer) backup() {
+ l.pos -= l.width
+}
+
+func (l *varLexer) emit(t varTokenType) {
+ l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
+ l.start = l.pos
+}
+
+func (l *varLexer) errorf(format string, v ...interface{}) lexState {
+ l.tokens = append(l.tokens, varToken{
+ tokError,
+ fmt.Sprintf(format, v...),
+ })
+ return nil
+}
+
+func (l *varLexer) ignore() {
+ l.start = l.pos
+}
+
+func (l *varLexer) next() rune {
+ var r rune
+
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return -1
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+func (l *varLexer) run() {
+ for state := varLexNormal; state != nil; {
+ state = state(l)
+ }
+}
+
+func (l *varLexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func varLexNormal(l *varLexer) lexState {
+ for {
+ r := l.next()
+ switch {
+ case r == -1:
+ l.emit(tokEOF)
+ return nil
+ case r == '[':
+ l.emit(tokArrayStart)
+ case r == ']':
+ l.emit(tokArrayEnd)
+ case r == '{':
+ l.emit(tokDictStart)
+ case r == '}':
+ l.emit(tokDictEnd)
+ case r == '<':
+ l.emit(tokVariantStart)
+ case r == '>':
+ l.emit(tokVariantEnd)
+ case r == ':':
+ l.emit(tokColon)
+ case r == ',':
+ l.emit(tokComma)
+ case r == '\'' || r == '"':
+ l.backup()
+ return varLexString
+ case r == '@':
+ l.backup()
+ return varLexType
+ case unicode.IsSpace(r):
+ l.ignore()
+ case unicode.IsNumber(r) || r == '+' || r == '-':
+ l.backup()
+ return varLexNumber
+ case r == 'b':
+ pos := l.start
+ if n := l.peek(); n == '"' || n == '\'' {
+ return varLexByteString
+ }
+ // not a byte string; try to parse it as a type or bool below
+ l.pos = pos + 1
+ l.width = 1
+ fallthrough
+ default:
+ // either a bool or a type. Try bools first.
+ l.backup()
+ if l.pos+4 <= len(l.input) {
+ if l.input[l.pos:l.pos+4] == "true" {
+ l.pos += 4
+ l.emit(tokBool)
+ continue
+ }
+ }
+ if l.pos+5 <= len(l.input) {
+ if l.input[l.pos:l.pos+5] == "false" {
+ l.pos += 5
+ l.emit(tokBool)
+ continue
+ }
+ }
+ // must be a type.
+ return varLexType
+ }
+ }
+}
+
+var varTypeMap = map[string]string{
+ "boolean": "b",
+ "byte": "y",
+ "int16": "n",
+ "uint16": "q",
+ "int32": "i",
+ "uint32": "u",
+ "int64": "x",
+ "uint64": "t",
+ "double": "f",
+ "string": "s",
+ "objectpath": "o",
+ "signature": "g",
+}
+
+func varLexByteString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated bytestring")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokByteString)
+ return varLexNormal
+}
+
+func varLexNumber(l *varLexer) lexState {
+ l.accept("+-")
+ digits := "0123456789"
+ if l.accept("0") {
+ if l.accept("x") {
+ digits = "0123456789abcdefABCDEF"
+ } else {
+ digits = "01234567"
+ }
+ }
+ for strings.ContainsRune(digits, l.next()) {
+ }
+ l.backup()
+ if l.accept(".") {
+ for strings.ContainsRune(digits, l.next()) {
+ }
+ l.backup()
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ for strings.ContainsRune("0123456789", l.next()) {
+ }
+ l.backup()
+ }
+ if r := l.peek(); unicode.IsLetter(r) {
+ l.next()
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokNumber)
+ return varLexNormal
+}
+
+func varLexString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated string")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokString)
+ return varLexNormal
+}
+
+func varLexType(l *varLexer) lexState {
+ at := l.accept("@")
+ for {
+ r := l.next()
+ if r == -1 {
+ break
+ }
+ if unicode.IsSpace(r) {
+ l.backup()
+ break
+ }
+ }
+ if at {
+ if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
+ return l.errorf("%s", err)
+ }
+ } else {
+ if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
+ l.emit(tokType)
+ return varLexNormal
+ }
+ return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokType)
+ return varLexNormal
+}
diff --git a/vendor/github.com/godbus/dbus/v5/variant_parser.go b/vendor/github.com/godbus/dbus/v5/variant_parser.go
new file mode 100644
index 00000000..d20f5da6
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/v5/variant_parser.go
@@ -0,0 +1,817 @@
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type varParser struct {
+ tokens []varToken
+ i int
+}
+
+func (p *varParser) backup() {
+ p.i--
+}
+
+func (p *varParser) next() varToken {
+ if p.i < len(p.tokens) {
+ t := p.tokens[p.i]
+ p.i++
+ return t
+ }
+ return varToken{typ: tokEOF}
+}
+
+type varNode interface {
+ Infer() (Signature, error)
+ String() string
+ Sigs() sigSet
+ Value(Signature) (interface{}, error)
+}
+
+func varMakeNode(p *varParser) (varNode, error) {
+ var sig Signature
+
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokNumber:
+ return varMakeNumNode(t, sig)
+ case tokString:
+ return varMakeStringNode(t, sig)
+ case tokBool:
+ if sig.str != "" && sig.str != "b" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := strconv.ParseBool(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return boolNode(b), nil
+ case tokArrayStart:
+ return varMakeArrayNode(p, sig)
+ case tokVariantStart:
+ return varMakeVariantNode(p, sig)
+ case tokDictStart:
+ return varMakeDictNode(p, sig)
+ case tokType:
+ if sig.str != "" {
+ return nil, errors.New("unexpected type annotation")
+ }
+ if t.val[0] == '@' {
+ sig.str = t.val[1:]
+ } else {
+ sig.str = varTypeMap[t.val]
+ }
+ case tokByteString:
+ if sig.str != "" && sig.str != "ay" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := varParseByteString(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return byteStringNode(b), nil
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+}
+
+type varTypeError struct {
+ val string
+ sig Signature
+}
+
+func (e varTypeError) Error() string {
+ return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
+}
+
+type sigSet map[Signature]bool
+
+func (s sigSet) Empty() bool {
+ return len(s) == 0
+}
+
+func (s sigSet) Intersect(s2 sigSet) sigSet {
+ r := make(sigSet)
+ for k := range s {
+ if s2[k] {
+ r[k] = true
+ }
+ }
+ return r
+}
+
+func (s sigSet) Single() (Signature, bool) {
+ if len(s) == 1 {
+ for k := range s {
+ return k, true
+ }
+ }
+ return Signature{}, false
+}
+
+func (s sigSet) ToArray() sigSet {
+ r := make(sigSet, len(s))
+ for k := range s {
+ r[Signature{"a" + k.str}] = true
+ }
+ return r
+}
+
+type numNode struct {
+ sig Signature
+ str string
+ val interface{}
+}
+
+var numSigSet = sigSet{
+ Signature{"y"}: true,
+ Signature{"n"}: true,
+ Signature{"q"}: true,
+ Signature{"i"}: true,
+ Signature{"u"}: true,
+ Signature{"x"}: true,
+ Signature{"t"}: true,
+ Signature{"d"}: true,
+}
+
+func (n numNode) Infer() (Signature, error) {
+ if strings.ContainsAny(n.str, ".e") {
+ return Signature{"d"}, nil
+ }
+ return Signature{"i"}, nil
+}
+
+func (n numNode) String() string {
+ return n.str
+}
+
+func (n numNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ if strings.ContainsAny(n.str, ".e") {
+ return sigSet{Signature{"d"}: true}
+ }
+ return numSigSet
+}
+
+func (n numNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ return varNumAs(n.str, sig)
+}
+
+func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str == "" {
+ return numNode{str: tok.val}, nil
+ }
+ num, err := varNumAs(tok.val, sig)
+ if err != nil {
+ return nil, err
+ }
+ return numNode{sig: sig, val: num}, nil
+}
+
+func varNumAs(s string, sig Signature) (interface{}, error) {
+ isUnsigned := false
+ size := 32
+ switch sig.str {
+ case "n":
+ size = 16
+ case "i":
+ case "x":
+ size = 64
+ case "y":
+ size = 8
+ isUnsigned = true
+ case "q":
+ size = 16
+ isUnsigned = true
+ case "u":
+ isUnsigned = true
+ case "t":
+ size = 64
+ isUnsigned = true
+ case "d":
+ d, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ default:
+ return nil, varTypeError{s, sig}
+ }
+ base := 10
+ if strings.HasPrefix(s, "0x") {
+ base = 16
+ s = s[2:]
+ }
+ if strings.HasPrefix(s, "0") && len(s) != 1 {
+ base = 8
+ s = s[1:]
+ }
+ if isUnsigned {
+ i, err := strconv.ParseUint(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "y":
+ v = byte(i)
+ case "q":
+ v = uint16(i)
+ case "u":
+ v = uint32(i)
+ }
+ return v, nil
+ }
+ i, err := strconv.ParseInt(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "n":
+ v = int16(i)
+ case "i":
+ v = int32(i)
+ }
+ return v, nil
+}
+
+type stringNode struct {
+ sig Signature
+ str string // parsed
+ val interface{} // has correct type
+}
+
+var stringSigSet = sigSet{
+ Signature{"s"}: true,
+ Signature{"g"}: true,
+ Signature{"o"}: true,
+}
+
+func (n stringNode) Infer() (Signature, error) {
+ return Signature{"s"}, nil
+}
+
+func (n stringNode) String() string {
+ return n.str
+}
+
+func (n stringNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ return stringSigSet
+}
+
+func (n stringNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ switch {
+ case sig.str == "g":
+ return Signature{n.str}, nil
+ case sig.str == "o":
+ return ObjectPath(n.str), nil
+ case sig.str == "s":
+ return n.str, nil
+ default:
+ return nil, varTypeError{n.str, sig}
+ }
+}
+
+func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
+ return nil, fmt.Errorf("invalid type %q for string", sig.str)
+ }
+ s, err := varParseString(tok.val)
+ if err != nil {
+ return nil, err
+ }
+ n := stringNode{str: s}
+ if sig.str == "" {
+ return stringNode{str: s}, nil
+ }
+ n.sig = sig
+ switch sig.str {
+ case "o":
+ n.val = ObjectPath(s)
+ case "g":
+ n.val = Signature{s}
+ case "s":
+ n.val = s
+ }
+ return n, nil
+}
+
+func varParseString(s string) (string, error) {
+ // quotes are guaranteed to be there
+ s = s[1 : len(s)-1]
+ buf := new(bytes.Buffer)
+ for len(s) != 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ if r != '\\' {
+ buf.WriteRune(r)
+ continue
+ }
+ r, size = utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ switch r {
+ case 'a':
+ buf.WriteRune(0x7)
+ case 'b':
+ buf.WriteRune(0x8)
+ case 'f':
+ buf.WriteRune(0xc)
+ case 'n':
+ buf.WriteRune('\n')
+ case 'r':
+ buf.WriteRune('\r')
+ case 't':
+ buf.WriteRune('\t')
+ case '\n':
+ case 'u':
+ if len(s) < 4 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:4], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[4:]
+ case 'U':
+ if len(s) < 8 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:8], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[8:]
+ default:
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String(), nil
+}
+
+var boolSigSet = sigSet{Signature{"b"}: true}
+
+type boolNode bool
+
+func (boolNode) Infer() (Signature, error) {
+ return Signature{"b"}, nil
+}
+
+func (b boolNode) String() string {
+ if b {
+ return "true"
+ }
+ return "false"
+}
+
+func (boolNode) Sigs() sigSet {
+ return boolSigSet
+}
+
+func (b boolNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "b" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return bool(b), nil
+}
+
+type arrayNode struct {
+ set sigSet
+ children []varNode
+ val interface{}
+}
+
+func (n arrayNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ csig, err := varInfer(v)
+ if err != nil {
+ continue
+ }
+ return Signature{"a" + csig.str}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n arrayNode) String() string {
+ s := "["
+ for i, v := range n.children {
+ s += v.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "]"
+}
+
+func (n arrayNode) Sigs() sigSet {
+ return n.set
+}
+
+func (n arrayNode) Value(sig Signature) (interface{}, error) {
+ if n.set.Empty() {
+ // no type information whatsoever, so this must be an empty slice
+ return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
+ }
+ if !n.set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
+ for i, v := range n.children {
+ rv, err := v.Value(Signature{sig.str[1:]})
+ if err != nil {
+ return nil, err
+ }
+ s.Index(i).Set(reflect.ValueOf(rv))
+ }
+ return s.Interface(), nil
+}
+
+func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
+ var n arrayNode
+ if sig.str != "" {
+ n.set = sigSet{sig: true}
+ }
+ if t := p.next(); t.typ == tokArrayEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ cn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if cset := cn.Sigs(); !cset.Empty() {
+ if n.set.Empty() {
+ n.set = cset.ToArray()
+ } else {
+ nset := cset.ToArray().Intersect(n.set)
+ if nset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
+ }
+ n.set = nset
+ }
+ }
+ n.children = append(n.children, cn)
+ switch t := p.next(); t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokArrayEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type variantNode struct {
+ n varNode
+}
+
+var variantSet = sigSet{
+ Signature{"v"}: true,
+}
+
+func (variantNode) Infer() (Signature, error) {
+ return Signature{"v"}, nil
+}
+
+func (n variantNode) String() string {
+ return "<" + n.n.String() + ">"
+}
+
+func (variantNode) Sigs() sigSet {
+ return variantSet
+}
+
+func (n variantNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "v" {
+ return nil, varTypeError{n.String(), sig}
+ }
+ sig, err := varInfer(n.n)
+ if err != nil {
+ return nil, err
+ }
+ v, err := n.n.Value(sig)
+ if err != nil {
+ return nil, err
+ }
+ return MakeVariant(v), nil
+}
+
+func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
+ n, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if t := p.next(); t.typ != tokVariantEnd {
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ vn := variantNode{n}
+ if sig.str != "" && sig.str != "v" {
+ return nil, varTypeError{vn.String(), sig}
+ }
+ return variantNode{n}, nil
+}
+
+type dictEntry struct {
+ key, val varNode
+}
+
+type dictNode struct {
+ kset, vset sigSet
+ children []dictEntry
+ val interface{}
+}
+
+func (n dictNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ ksig, err := varInfer(v.key)
+ if err != nil {
+ continue
+ }
+ vsig, err := varInfer(v.val)
+ if err != nil {
+ continue
+ }
+ return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n dictNode) String() string {
+ s := "{"
+ for i, v := range n.children {
+ s += v.key.String() + ": " + v.val.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "}"
+}
+
+func (n dictNode) Sigs() sigSet {
+ r := sigSet{}
+ for k := range n.kset {
+ for v := range n.vset {
+ sig := "a{" + k.str + v.str + "}"
+ r[Signature{sig}] = true
+ }
+ }
+ return r
+}
+
+func (n dictNode) Value(sig Signature) (interface{}, error) {
+ set := n.Sigs()
+ if set.Empty() {
+ // no type information -> empty dict
+ return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
+ }
+ if !set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ m := reflect.MakeMap(typeFor(sig.str))
+ ksig := Signature{sig.str[2:3]}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ for _, v := range n.children {
+ kv, err := v.key.Value(ksig)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := v.val.Value(vsig)
+ if err != nil {
+ return nil, err
+ }
+ m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return m.Interface(), nil
+}
+
+func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
+ var n dictNode
+
+ if sig.str != "" {
+ if len(sig.str) < 5 {
+ return nil, fmt.Errorf("invalid signature %q for dict type", sig)
+ }
+ ksig := Signature{string(sig.str[2])}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ n.kset = sigSet{ksig: true}
+ n.vset = sigSet{vsig: true}
+ }
+ if t := p.next(); t.typ == tokDictEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ kn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if kset := kn.Sigs(); !kset.Empty() {
+ if n.kset.Empty() {
+ n.kset = kset
+ } else {
+ n.kset = kset.Intersect(n.kset)
+ if n.kset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
+ }
+ }
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokColon:
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ vn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if vset := vn.Sigs(); !vset.Empty() {
+ if n.vset.Empty() {
+ n.vset = vset
+ } else {
+ n.vset = n.vset.Intersect(vset)
+ if n.vset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
+ }
+ }
+ }
+ n.children = append(n.children, dictEntry{kn, vn})
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokDictEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type byteStringNode []byte
+
+var byteStringSet = sigSet{
+ Signature{"ay"}: true,
+}
+
+func (byteStringNode) Infer() (Signature, error) {
+ return Signature{"ay"}, nil
+}
+
+func (b byteStringNode) String() string {
+ return string(b)
+}
+
+func (b byteStringNode) Sigs() sigSet {
+ return byteStringSet
+}
+
+func (b byteStringNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "ay" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return []byte(b), nil
+}
+
+func varParseByteString(s string) ([]byte, error) {
+ // quotes and b at start are guaranteed to be there
+ b := make([]byte, 0, 1)
+ s = s[2 : len(s)-1]
+ for len(s) != 0 {
+ c := s[0]
+ s = s[1:]
+ if c != '\\' {
+ b = append(b, c)
+ continue
+ }
+ c = s[0]
+ s = s[1:]
+ switch c {
+ case 'a':
+ b = append(b, 0x7)
+ case 'b':
+ b = append(b, 0x8)
+ case 'f':
+ b = append(b, 0xc)
+ case 'n':
+ b = append(b, '\n')
+ case 'r':
+ b = append(b, '\r')
+ case 't':
+ b = append(b, '\t')
+ case 'x':
+ if len(s) < 2 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:2], 16, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[2:]
+ case '0':
+ if len(s) < 3 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:3], 8, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[3:]
+ default:
+ b = append(b, c)
+ }
+ }
+ return append(b, 0), nil
+}
+
+func varInfer(n varNode) (Signature, error) {
+ if sig, ok := n.Sigs().Single(); ok {
+ return sig, nil
+ }
+ return n.Infer()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 00000000..e810e6fe
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ WireVarint = 0
+ WireFixed32 = 5
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+ return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+ return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0
+ }
+ return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+ buf []byte
+ idx int
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+ b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+ b.buf = buf
+ b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+ return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+ return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+ var err error
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+ err := UnmarshalMerge(b.Unread(), m)
+ b.idx = len(b.buf)
+ return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset() { panic("not implemented") }
+func (m *unknownFields) ProtoMessage() { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+ m := MessageReflect(new(unknownFields))
+ m.SetUnknown(b)
+ b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+ fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+ b.buf = protowire.AppendVarint(b.buf, v)
+ return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+ return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+ return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+ b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+ return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+ b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+ return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+ b.buf = protowire.AppendBytes(b.buf, v)
+ return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+ b.buf = protowire.AppendString(b.buf, v)
+ return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+ var err error
+ b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+ v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+ v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+ v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+ v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+ if n < 0 {
+ return nil, protowire.ParseError(n)
+ }
+ b.idx += n
+ if alloc {
+ v = append([]byte(nil), v...)
+ }
+ return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+ v, n := protowire.ConsumeString(b.buf[b.idx:])
+ if n < 0 {
+ return "", protowire.ParseError(n)
+ }
+ b.idx += n
+ return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+ v, err := b.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+ v, n, err := consumeGroup(b.buf[b.idx:])
+ if err != nil {
+ return err
+ }
+ b.idx += n
+ return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+ b0 := b
+ depth := 1 // assume this follows a start group marker
+ for {
+ _, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return nil, 0, protowire.ParseError(tagLen)
+ }
+ b = b[tagLen:]
+
+ var valLen int
+ switch wtyp {
+ case protowire.VarintType:
+ _, valLen = protowire.ConsumeVarint(b)
+ case protowire.Fixed32Type:
+ _, valLen = protowire.ConsumeFixed32(b)
+ case protowire.Fixed64Type:
+ _, valLen = protowire.ConsumeFixed64(b)
+ case protowire.BytesType:
+ _, valLen = protowire.ConsumeBytes(b)
+ case protowire.StartGroupType:
+ depth++
+ case protowire.EndGroupType:
+ depth--
+ default:
+ return nil, 0, errors.New("proto: cannot parse reserved wire type")
+ }
+ if valLen < 0 {
+ return nil, 0, protowire.ParseError(valLen)
+ }
+ b = b[valLen:]
+
+ if depth == 0 {
+ return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
deleted file mode 100644
index 3cd3249f..00000000
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
- "fmt"
- "log"
- "reflect"
- "strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(src Message) Message {
- in := reflect.ValueOf(src)
- if in.IsNil() {
- return src
- }
- out := reflect.New(in.Type().Elem())
- dst := out.Interface().(Message)
- Merge(dst, src)
- return dst
-}
-
-// Merger is the interface representing objects that can merge messages of the same type.
-type Merger interface {
- // Merge merges src into this message.
- // Required and optional fields that are set in src will be set to that value in dst.
- // Elements of repeated fields will be appended.
- //
- // Merge may panic if called with a different argument type than the receiver.
- Merge(src Message)
-}
-
-// generatedMerger is the custom merge method that generated protos will have.
-// We must add this method since a generate Merge method will conflict with
-// many existing protos that have a Merge data field already defined.
-type generatedMerger interface {
- XXX_Merge(src Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
- if m, ok := dst.(Merger); ok {
- m.Merge(src)
- return
- }
-
- in := reflect.ValueOf(src)
- out := reflect.ValueOf(dst)
- if out.IsNil() {
- panic("proto: nil destination")
- }
- if in.Type() != out.Type() {
- panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
- }
- if in.IsNil() {
- return // Merge from nil src is a noop
- }
- if m, ok := dst.(generatedMerger); ok {
- m.XXX_Merge(src)
- return
- }
- mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
- sprop := GetProperties(in.Type())
- for i := 0; i < in.NumField(); i++ {
- f := in.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
- }
-
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- uf := in.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return
- }
- uin := uf.Bytes()
- if len(uin) > 0 {
- out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
- }
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
- if in.Type() == protoMessageType {
- if !in.IsNil() {
- if out.IsNil() {
- out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
- } else {
- Merge(out.Interface().(Message), in.Interface().(Message))
- }
- }
- return
- }
- switch in.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- if !viaPtr && isProto3Zero(in) {
- return
- }
- out.Set(in)
- case reflect.Interface:
- // Probably a oneof field; copy non-nil values.
- if in.IsNil() {
- return
- }
- // Allocate destination if it is not set, or set to a different type.
- // Otherwise we will merge as normal.
- if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
- out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
- }
- mergeAny(out.Elem(), in.Elem(), false, nil)
- case reflect.Map:
- if in.Len() == 0 {
- return
- }
- if out.IsNil() {
- out.Set(reflect.MakeMap(in.Type()))
- }
- // For maps with value types of *T or []byte we need to deep copy each value.
- elemKind := in.Type().Elem().Kind()
- for _, key := range in.MapKeys() {
- var val reflect.Value
- switch elemKind {
- case reflect.Ptr:
- val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key), false, nil)
- case reflect.Slice:
- val = in.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- default:
- val = in.MapIndex(key)
- }
- out.SetMapIndex(key, val)
- }
- case reflect.Ptr:
- if in.IsNil() {
- return
- }
- if out.IsNil() {
- out.Set(reflect.New(in.Elem().Type()))
- }
- mergeAny(out.Elem(), in.Elem(), true, nil)
- case reflect.Slice:
- if in.IsNil() {
- return
- }
- if in.Type().Elem().Kind() == reflect.Uint8 {
- // []byte is a scalar bytes field, not a repeated field.
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value, and should not
- // be merged.
- if prop != nil && prop.proto3 && in.Len() == 0 {
- return
- }
-
- // Make a deep copy.
- // Append to []byte{} instead of []byte(nil) so that we never end up
- // with a nil result.
- out.SetBytes(append([]byte{}, in.Bytes()...))
- return
- }
- n := in.Len()
- if out.IsNil() {
- out.Set(reflect.MakeSlice(in.Type(), 0, n))
- }
- switch in.Type().Elem().Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- out.Set(reflect.AppendSlice(out, in))
- default:
- for i := 0; i < n; i++ {
- x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i), false, nil)
- out.Set(reflect.Append(out, x))
- }
- }
- case reflect.Struct:
- mergeStruct(out, in)
- default:
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to copy %v", in)
- }
-}
-
-func mergeExtension(out, in map[int32]Extension) {
- for extNum, eIn := range in {
- eOut := Extension{desc: eIn.desc}
- if eIn.value != nil {
- v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
- eOut.value = v.Interface()
- }
- if eIn.enc != nil {
- eOut.enc = make([]byte, len(eIn.enc))
- copy(eOut.enc, eIn.enc)
- }
-
- out[extNum] = eOut
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
deleted file mode 100644
index 63b0f08b..00000000
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
- "errors"
- "fmt"
- "io"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
- for shift := uint(0); shift < 64; shift += 7 {
- if n >= len(buf) {
- return 0, 0
- }
- b := uint64(buf[n])
- n++
- x |= (b & 0x7F) << shift
- if (b & 0x80) == 0 {
- return x, n
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
- i := p.index
- l := len(p.buf)
-
- for shift := uint(0); shift < 64; shift += 7 {
- if i >= l {
- err = io.ErrUnexpectedEOF
- return
- }
- b := p.buf[i]
- i++
- x |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- p.index = i
- return
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- err = errOverflow
- return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- i := p.index
- buf := p.buf
-
- if i >= len(buf) {
- return 0, io.ErrUnexpectedEOF
- } else if buf[i] < 0x80 {
- p.index++
- return uint64(buf[i]), nil
- } else if len(buf)-i < 10 {
- return p.decodeVarintSlow()
- }
-
- var b uint64
- // we already checked the first byte
- x = uint64(buf[i]) - 0x80
- i++
-
- b = uint64(buf[i])
- i++
- x += b << 7
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 7
-
- b = uint64(buf[i])
- i++
- x += b << 14
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 14
-
- b = uint64(buf[i])
- i++
- x += b << 21
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 21
-
- b = uint64(buf[i])
- i++
- x += b << 28
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 28
-
- b = uint64(buf[i])
- i++
- x += b << 35
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 35
-
- b = uint64(buf[i])
- i++
- x += b << 42
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 42
-
- b = uint64(buf[i])
- i++
- x += b << 49
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 49
-
- b = uint64(buf[i])
- i++
- x += b << 56
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 56
-
- b = uint64(buf[i])
- i++
- x += b << 63
- if b&0x80 == 0 {
- goto done
- }
-
- return 0, errOverflow
-
-done:
- p.index = i
- return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
- // x, err already 0
- i := p.index + 8
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-8])
- x |= uint64(p.buf[i-7]) << 8
- x |= uint64(p.buf[i-6]) << 16
- x |= uint64(p.buf[i-5]) << 24
- x |= uint64(p.buf[i-4]) << 32
- x |= uint64(p.buf[i-3]) << 40
- x |= uint64(p.buf[i-2]) << 48
- x |= uint64(p.buf[i-1]) << 56
- return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
- // x, err already 0
- i := p.index + 4
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-4])
- x |= uint64(p.buf[i-3]) << 8
- x |= uint64(p.buf[i-2]) << 16
- x |= uint64(p.buf[i-1]) << 24
- return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
- return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
- return
-}
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
- n, err := p.DecodeVarint()
- if err != nil {
- return nil, err
- }
-
- nb := int(n)
- if nb < 0 {
- return nil, fmt.Errorf("proto: bad byte length %d", nb)
- }
- end := p.index + nb
- if end < p.index || end > len(p.buf) {
- return nil, io.ErrUnexpectedEOF
- }
-
- if !alloc {
- // todo: check if can get more uses of alloc=false
- buf = p.buf[p.index:end]
- p.index += nb
- return
- }
-
- buf = make([]byte, nb)
- copy(buf, p.buf[p.index:])
- p.index += nb
- return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
- buf, err := p.DecodeRawBytes(false)
- if err != nil {
- return
- }
- return string(buf), nil
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-// Unmarshal implementations should not clear the receiver.
-// Any unmarshaled data should be merged into the receiver.
-// Callers of Unmarshal that do not want to retain existing data
-// should Reset the receiver before calling Unmarshal.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// newUnmarshaler is the interface representing objects that can
-// unmarshal themselves. The semantics are identical to Unmarshaler.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newUnmarshaler interface {
- XXX_Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
- pb.Reset()
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
- enc, err := p.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-// StartGroup tag is already consumed. This function consumes
-// EndGroup tag.
-func (p *Buffer) DecodeGroup(pb Message) error {
- b := p.buf[p.index:]
- x, y := findEndGroup(b)
- if x < 0 {
- return io.ErrUnexpectedEOF
- }
- err := Unmarshal(b[:x], pb)
- p.index += y
- return err
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb. If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(newUnmarshaler); ok {
- err := u.XXX_Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- err := u.Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
-
- // Slow workaround for messages that aren't Unmarshalers.
- // This includes some hand-coded .pb.go files and
- // bootstrap protos.
- // TODO: fix all of those and then add Unmarshal to
- // the Message interface. Then:
- // The cast above and code below can be deleted.
- // The old unmarshaler can be deleted.
- // Clients can call Unmarshal directly (can already do that, actually).
- var info InternalMessageInfo
- err := info.Unmarshal(pb, p.buf[p.index:])
- p.index = len(p.buf)
- return err
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 00000000..d399bf06
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+ if m != nil {
+ setDefaults(MessageReflect(m))
+ }
+}
+
+func setDefaults(m protoreflect.Message) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if !m.Has(fd) {
+ if fd.HasDefault() && fd.ContainingOneof() == nil {
+ v := fd.Default()
+ if fd.Kind() == protoreflect.BytesKind {
+ v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+ }
+ m.Set(fd, v)
+ }
+ continue
+ }
+ }
+
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ setDefaults(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ setDefaults(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ setDefaults(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
index 35b882c0..e8db57e0 100644
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -1,63 +1,113 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2018 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-import "errors"
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
-// Deprecated: do not use.
+ protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+ // Deprecated: No longer returned.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // Deprecated: No longer returned.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+ // Deprecated: No longer returned.
+ ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-// Deprecated: do not use.
+// Deprecated: Do not use.
func GetStats() Stats { return Stats{} }
-// Deprecated: do not use.
+// Deprecated: Do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
-// Deprecated: do not use.
+// Deprecated: Do not use.
func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+ DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+ return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+ return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+ return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
index dea2617c..2187e877 100644
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -1,48 +1,13 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
+ "google.golang.org/protobuf/reflect/protoreflect"
)
-type generatedDiscarder interface {
- XXX_DiscardUnknown()
-}
-
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
@@ -51,300 +16,43 @@ type generatedDiscarder interface {
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
-//
-// For proto2 messages, the unknown fields of message extensions are only
-// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
- if m, ok := m.(generatedDiscarder); ok {
- m.XXX_DiscardUnknown()
- return
- }
- // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
- // but the master branch has no implementation for InternalMessageInfo,
- // so it would be more work to replicate that approach.
- discardLegacy(m)
-}
-
-// DiscardUnknown recursively discards all unknown fields.
-func (a *InternalMessageInfo) DiscardUnknown(m Message) {
- di := atomicLoadDiscardInfo(&a.discard)
- if di == nil {
- di = getDiscardInfo(reflect.TypeOf(m).Elem())
- atomicStoreDiscardInfo(&a.discard, di)
- }
- di.discard(toPointer(&m))
-}
-
-type discardInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []discardFieldInfo
- unrecognized field
-}
-
-type discardFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
- discard func(src pointer)
-}
-
-var (
- discardInfoMap = map[reflect.Type]*discardInfo{}
- discardInfoLock sync.Mutex
-)
-
-func getDiscardInfo(t reflect.Type) *discardInfo {
- discardInfoLock.Lock()
- defer discardInfoLock.Unlock()
- di := discardInfoMap[t]
- if di == nil {
- di = &discardInfo{typ: t}
- discardInfoMap[t] = di
+ if m != nil {
+ discardUnknown(MessageReflect(m))
}
- return di
}
-func (di *discardInfo) discard(src pointer) {
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&di.initialized) == 0 {
- di.computeDiscardInfo()
- }
-
- for _, fi := range di.fields {
- sfp := src.offset(fi.field)
- fi.discard(sfp)
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
- // Ignore lock since DiscardUnknown is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- DiscardUnknown(m)
+func discardUnknown(m protoreflect.Message) {
+ m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ discardUnknown(m.Get(fd).Message())
}
- }
- }
-
- if di.unrecognized.IsValid() {
- *src.offset(di.unrecognized).toBytes() = nil
- }
-}
-
-func (di *discardInfo) computeDiscardInfo() {
- di.lock.Lock()
- defer di.lock.Unlock()
- if di.initialized != 0 {
- return
- }
- t := di.typ
- n := t.NumField()
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- dfi := discardFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
- case isSlice: // E.g., []*pb.T
- di := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sps := src.getPointerSlice()
- for _, sp := range sps {
- if !sp.isNil() {
- di.discard(sp)
- }
- }
- }
- default: // E.g., *pb.T
- di := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- di.discard(sp)
- }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ discardUnknown(ls.Get(i).Message())
}
}
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
- default: // E.g., map[K]V
- if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
- dfi.discard = func(src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- DiscardUnknown(val.Interface().(Message))
- }
- }
- } else {
- dfi.discard = func(pointer) {} // Noop
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
- default: // E.g., interface{}
- // TODO: Make this faster?
- dfi.discard = func(src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- DiscardUnknown(sv.Interface().(Message))
- }
- }
- }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ discardUnknown(v.Message())
+ return true
+ })
}
- default:
- continue
- }
- di.fields = append(di.fields, dfi)
- }
-
- di.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- di.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&di.initialized, 1)
-}
-
-func discardLegacy(m Message) {
- v := reflect.ValueOf(m)
- if v.Kind() != reflect.Ptr || v.IsNil() {
- return
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return
- }
- t := v.Type()
-
- for i := 0; i < v.NumField(); i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
}
- vf := v.Field(i)
- tf := f.Type
+ return true
+ })
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
- case isSlice: // E.g., []*pb.T
- for j := 0; j < vf.Len(); j++ {
- discardLegacy(vf.Index(j).Interface().(Message))
- }
- default: // E.g., *pb.T
- discardLegacy(vf.Interface().(Message))
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
- default: // E.g., map[K]V
- tv := vf.Type().Elem()
- if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
- for _, key := range vf.MapKeys() {
- val := vf.MapIndex(key)
- discardLegacy(val.Interface().(Message))
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
- default: // E.g., test_proto.isCommunique_Union interface
- if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
- vf = vf.Elem() // E.g., *test_proto.Communique_Msg
- if !vf.IsNil() {
- vf = vf.Elem() // E.g., test_proto.Communique_Msg
- vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
- if vf.Kind() == reflect.Ptr {
- discardLegacy(vf.Interface().(Message))
- }
- }
- }
- }
- }
- }
-
- if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
- if vf.Type() != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- vf.Set(reflect.ValueOf([]byte(nil)))
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(m); err == nil {
- // Ignore lock since discardLegacy is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- discardLegacy(m)
- }
- }
+ // Discard unknown fields.
+ if len(m.GetUnknown()) > 0 {
+ m.SetUnknown(nil)
}
}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
deleted file mode 100644
index 3abfed2c..00000000
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "errors"
- "reflect"
-)
-
-var (
- // errRepeatedHasNil is the error returned if Marshal is called with
- // a struct with a repeated field containing a nil element.
- errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
- // errOneofHasNil is the error returned if Marshal is called with
- // a struct with a oneof field containing a nil element.
- errOneofHasNil = errors.New("proto: oneof field has nil value")
-
- // ErrNil is the error returned if Marshal is called with nil.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // ErrTooLarge is the error returned if Marshal is called with a
- // message that encodes to >2GB.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
- var buf [maxVarintBytes]byte
- var n int
- for n = 0; x > 127; n++ {
- buf[n] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- buf[n] = uint8(x)
- n++
- return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
- for x >= 1<<7 {
- p.buf = append(p.buf, uint8(x&0x7f|0x80))
- x >>= 7
- }
- p.buf = append(p.buf, uint8(x))
- return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
- switch {
- case x < 1<<7:
- return 1
- case x < 1<<14:
- return 2
- case x < 1<<21:
- return 3
- case x < 1<<28:
- return 4
- case x < 1<<35:
- return 5
- case x < 1<<42:
- return 6
- case x < 1<<49:
- return 7
- case x < 1<<56:
- return 8
- case x < 1<<63:
- return 9
- }
- return 10
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24),
- uint8(x>>32),
- uint8(x>>40),
- uint8(x>>48),
- uint8(x>>56))
- return nil
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24))
- return nil
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
- p.EncodeVarint(uint64(len(b)))
- p.buf = append(p.buf, b...)
- return nil
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
- p.EncodeVarint(uint64(len(s)))
- p.buf = append(p.buf, s...)
- return nil
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
- siz := Size(pb)
- p.EncodeVarint(uint64(siz))
- return p.Marshal(pb)
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
deleted file mode 100644
index f9b6e41b..00000000
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
- "bytes"
- "log"
- "reflect"
- "strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
- - Two messages are equal iff they are the same type,
- corresponding fields are equal, unknown field sets
- are equal, and extensions sets are equal.
- - Two set scalar fields are equal iff their values are equal.
- If the fields are of a floating-point type, remember that
- NaN != x for all x, including NaN. If the message is defined
- in a proto3 .proto file, fields are not "set"; specifically,
- zero length proto3 "bytes" fields are equal (nil == {}).
- - Two repeated fields are equal iff their lengths are the same,
- and their corresponding elements are equal. Note a "bytes" field,
- although represented by []byte, is not a repeated field and the
- rule for the scalar fields described above applies.
- - Two unset fields are equal.
- - Two unknown field sets are equal if their current
- encoded state is equal.
- - Two extension sets are equal iff they have corresponding
- elements that are pairwise equal.
- - Two map fields are equal iff their lengths are the same,
- and they contain the same set of elements. Zero-length map
- fields are equal.
- - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
- if a == nil || b == nil {
- return a == b
- }
- v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
- if v1.Type() != v2.Type() {
- return false
- }
- if v1.Kind() == reflect.Ptr {
- if v1.IsNil() {
- return v2.IsNil()
- }
- if v2.IsNil() {
- return false
- }
- v1, v2 = v1.Elem(), v2.Elem()
- }
- if v1.Kind() != reflect.Struct {
- return false
- }
- return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
- sprop := GetProperties(v1.Type())
- for i := 0; i < v1.NumField(); i++ {
- f := v1.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- f1, f2 := v1.Field(i), v2.Field(i)
- if f.Type.Kind() == reflect.Ptr {
- if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
- // both unset
- continue
- } else if n1 != n2 {
- // set/unset mismatch
- return false
- }
- f1, f2 = f1.Elem(), f2.Elem()
- }
- if !equalAny(f1, f2, sprop.Prop[i]) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_InternalExtensions")
- if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_extensions")
- if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
- return false
- }
- }
-
- uf := v1.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return true
- }
-
- u1 := uf.Bytes()
- u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- return bytes.Equal(u1, u2)
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
- if v1.Type() == protoMessageType {
- m1, _ := v1.Interface().(Message)
- m2, _ := v2.Interface().(Message)
- return Equal(m1, m2)
- }
- switch v1.Kind() {
- case reflect.Bool:
- return v1.Bool() == v2.Bool()
- case reflect.Float32, reflect.Float64:
- return v1.Float() == v2.Float()
- case reflect.Int32, reflect.Int64:
- return v1.Int() == v2.Int()
- case reflect.Interface:
- // Probably a oneof field; compare the inner values.
- n1, n2 := v1.IsNil(), v2.IsNil()
- if n1 || n2 {
- return n1 == n2
- }
- e1, e2 := v1.Elem(), v2.Elem()
- if e1.Type() != e2.Type() {
- return false
- }
- return equalAny(e1, e2, nil)
- case reflect.Map:
- if v1.Len() != v2.Len() {
- return false
- }
- for _, key := range v1.MapKeys() {
- val2 := v2.MapIndex(key)
- if !val2.IsValid() {
- // This key was not found in the second map.
- return false
- }
- if !equalAny(v1.MapIndex(key), val2, nil) {
- return false
- }
- }
- return true
- case reflect.Ptr:
- // Maps may have nil values in them, so check for nil.
- if v1.IsNil() && v2.IsNil() {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return equalAny(v1.Elem(), v2.Elem(), prop)
- case reflect.Slice:
- if v1.Type().Elem().Kind() == reflect.Uint8 {
- // short circuit: []byte
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value.
- if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
- }
-
- if v1.Len() != v2.Len() {
- return false
- }
- for i := 0; i < v1.Len(); i++ {
- if !equalAny(v1.Index(i), v2.Index(i), prop) {
- return false
- }
- }
- return true
- case reflect.String:
- return v1.Interface().(string) == v2.Interface().(string)
- case reflect.Struct:
- return equalStruct(v1, v2)
- case reflect.Uint32, reflect.Uint64:
- return v1.Uint() == v2.Uint()
- }
-
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to compare %v", v1)
- return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
- em1, _ := x1.extensionsRead()
- em2, _ := x2.extensionsRead()
- return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
- if len(em1) != len(em2) {
- return false
- }
-
- for extNum, e1 := range em1 {
- e2, ok := em2[extNum]
- if !ok {
- return false
- }
-
- m1 := extensionAsLegacyType(e1.value)
- m2 := extensionAsLegacyType(e2.value)
-
- if m1 == nil && m2 == nil {
- // Both have only encoded form.
- if bytes.Equal(e1.enc, e2.enc) {
- continue
- }
- // The bytes are different, but the extensions might still be
- // equal. We need to decode them to compare.
- }
-
- if m1 != nil && m2 != nil {
- // Both are unencoded.
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- continue
- }
-
- // At least one is encoded. To do a semantically correct comparison
- // we need to unmarshal them first.
- var desc *ExtensionDesc
- if m := extensionMaps[base]; m != nil {
- desc = m[extNum]
- }
- if desc == nil {
- // If both have only encoded form and the bytes are the same,
- // it is handled above. We get here when the bytes are different.
- // We don't know how to decode it, so just compare them as byte
- // slices.
- log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- return false
- }
- var err error
- if m1 == nil {
- m1, err = decodeExtension(e1.enc, desc)
- }
- if m2 == nil && err == nil {
- m2, err = decodeExtension(e2.enc, desc)
- }
- if err != nil {
- // The encoded form is invalid.
- log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
- return false
- }
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index fa88add3..42fc120c 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -1,607 +1,356 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
import (
"errors"
"fmt"
- "io"
"reflect"
- "strconv"
- "sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
- Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- extensionsWrite() map[int32]Extension
- extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- ExtensionMap() map[int32]Extension
-}
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
- extendableProtoV1
-}
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
- return e.ExtensionMap()
-}
+type (
+ // ExtensionDesc represents an extension descriptor and
+ // is used to interact with an extension field in a message.
+ //
+ // Variables of this type are generated in code by protoc-gen-go.
+ ExtensionDesc = protoimpl.ExtensionInfo
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
- return e.ExtensionMap(), notLocker{}
-}
+ // ExtensionRange represents a range of message extensions.
+ // Used in code generated by protoc-gen-go.
+ ExtensionRange = protoiface.ExtensionRangeV1
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
+ // Deprecated: Do not use; this is an internal type.
+ Extension = protoimpl.ExtensionFieldV1
-func (n notLocker) Lock() {}
-func (n notLocker) Unlock() {}
+ // Deprecated: Do not use; this is an internal type.
+ XXX_InternalExtensions = protoimpl.ExtensionFields
+)
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, error) {
- switch p := p.(type) {
- case extendableProto:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return p, nil
- case extendableProtoV1:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return extensionAdapter{p}, nil
- }
- // Don't allocate a specific error containing %T:
- // this is the hot path for Clone and MarshalText.
- return nil, errNotExtendable
-}
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-func isNilPtr(x interface{}) bool {
- v := reflect.ValueOf(x)
- return v.Kind() == reflect.Ptr && v.IsNil()
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
- // The struct must be indirect so that if a user inadvertently copies a
- // generated message and its embedded XXX_InternalExtensions, they
- // avoid the mayhem of a copied mutex.
- //
- // The mutex serializes all logically read-only operations to p.extensionMap.
- // It is up to the client to ensure that write operations to p.extensionMap are
- // mutually exclusive with other accesses.
- p *struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return false
}
-}
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
- if e.p == nil {
- e.p = new(struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
+ // Check whether any populated known field matches the field number.
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ has = mr.Has(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ has = int32(fd.Number()) == xt.Field
+ return !has
})
- e.p.extensionMap = make(map[int32]Extension)
}
- return e.p.extensionMap
-}
-// extensionsRead returns the extensions map for read-only use. It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
- if e.p == nil {
- return nil, nil
+ // Check whether any unknown field matches the field number.
+ for b := mr.GetUnknown(); !has && len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ has = int32(num) == xt.Field
+ b = b[n:]
}
- return e.p.extensionMap, &e.p.mu
-}
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
- ExtendedType Message // nil pointer to the type that is being extended
- ExtensionType interface{} // nil pointer to the extension type
- Field int32 // field number
- Name string // fully-qualified name of extension, for text formatting
- Tag string // protobuf tag style
- Filename string // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
- t := reflect.TypeOf(ed.ExtensionType)
- return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
- // When an extension is stored in a message using SetExtension
- // only desc and value are set. When the message is marshaled
- // enc will be set to the encoded form of the message.
- //
- // When a message is unmarshaled and contains extensions, each
- // extension will have only enc set. When such an extension is
- // accessed using GetExtension (or GetExtensions) desc and value
- // will be set.
- desc *ExtensionDesc
-
- // value is a concrete value for the extension field. Let the type of
- // desc.ExtensionType be the "API type" and the type of Extension.value
- // be the "storage type". The API type and storage type are the same except:
- // * For scalars (except []byte), the API type uses *T,
- // while the storage type uses T.
- // * For repeated fields, the API type uses []T, while the storage type
- // uses *[]T.
- //
- // The reason for the divergence is so that the storage type more naturally
- // matches what is expected of when retrieving the values through the
- // protobuf reflection APIs.
- //
- // The value may only be populated if desc is also populated.
- value interface{}
-
- // enc is the raw bytes for the extension field.
- enc []byte
+ return has
}
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
- epb, err := extendable(base)
- if err != nil {
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- extmap := epb.extensionsWrite()
- extmap[id] = Extension{enc: b}
-}
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
- for _, er := range pb.ExtensionRangeArray() {
- if er.Start <= field && field <= er.End {
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ mr.Clear(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if int32(fd.Number()) == xt.Field {
+ mr.Clear(fd)
+ return false
+ }
return true
- }
- }
- return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
- var pbi interface{} = pb
- // Check the extended type.
- if ea, ok := pbi.(extensionAdapter); ok {
- pbi = ea.extendableProtoV1
- }
- if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
- }
- // Check the range.
- if !isExtensionField(pb, extension.Field) {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
- base reflect.Type
- field int32
-}
-
-var extProp = struct {
- sync.RWMutex
- m map[extPropKey]*Properties
-}{
- m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
- key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
- extProp.RLock()
- if prop, ok := extProp.m[key]; ok {
- extProp.RUnlock()
- return prop
- }
- extProp.RUnlock()
-
- extProp.Lock()
- defer extProp.Unlock()
- // Check again.
- if prop, ok := extProp.m[key]; ok {
- return prop
- }
-
- prop := new(Properties)
- prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
- extProp.m[key] = prop
- return prop
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
- // TODO: Check types, field numbers, etc.?
- epb, err := extendable(pb)
- if err != nil {
- return false
- }
- extmap, mu := epb.extensionsRead()
- if extmap == nil {
- return false
+ })
}
- mu.Lock()
- _, ok := extmap[extension.Field]
- mu.Unlock()
- return ok
+ clearUnknown(mr, fieldNum(xt.Field))
}
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
- epb, err := extendable(pb)
- if err != nil {
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- // TODO: Check types, field numbers, etc.?
- extmap := epb.extensionsWrite()
- delete(extmap, extension.Field)
+
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.IsExtension() {
+ mr.Clear(fd)
+ }
+ return true
+ })
+ clearUnknown(mr, mr.Descriptor().ExtensionRanges())
}
-// GetExtension retrieves a proto2 extended field from pb.
+// GetExtension retrieves a proto2 extended field from m.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
-// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes of the field extension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
-
- if extension.ExtendedType != nil {
- // can only check type if this is a complete descriptor
- if err := checkExtensionTypes(epb, extension); err != nil {
- return nil, err
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Retrieve the unknown fields for this extension field.
+ var bo protoreflect.RawFields
+ for bi := mr.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if int32(num) == xt.Field {
+ bo = append(bo, bi[:n]...)
}
+ bi = bi[n:]
}
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return defaultExtensionValue(extension)
- }
- mu.Lock()
- defer mu.Unlock()
- e, ok := emap[extension.Field]
- if !ok {
- // defaultExtensionValue returns the default value or
- // ErrMissingExtension if there is no default.
- return defaultExtensionValue(extension)
- }
-
- if e.value != nil {
- // Already decoded. Check the descriptor, though.
- if e.desc != extension {
- // This shouldn't happen. If it does, it means that
- // GetExtension was called twice with two different
- // descriptors with the same field number.
- return nil, errors.New("proto: descriptor conflict")
- }
- return extensionAsLegacyType(e.value), nil
+ // For type incomplete descriptors, only retrieve the unknown fields.
+ if xt.ExtensionType == nil {
+ return []byte(bo), nil
}
- if extension.ExtensionType == nil {
- // incomplete descriptor
- return e.enc, nil
+ // If the extension field only exists as unknown fields, unmarshal it.
+ // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
-
- v, err := decodeExtension(e.enc, extension)
- if err != nil {
- return nil, err
+ if !mr.Has(xtd) && len(bo) > 0 {
+ m2 := mr.New()
+ if err := (proto.UnmarshalOptions{
+ Resolver: extensionResolver{xt},
+ }.Unmarshal(bo, m2.Interface())); err != nil {
+ return nil, err
+ }
+ if m2.Has(xtd) {
+ mr.Set(xtd, m2.Get(xtd))
+ clearUnknown(mr, fieldNum(xt.Field))
+ }
}
- // Remember the decoded version and drop the encoded version.
- // That way it is safe to mutate what we return.
- e.value = extensionAsStorageType(v)
- e.desc = extension
- e.enc = nil
- emap[extension.Field] = e
- return extensionAsLegacyType(e.value), nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
- if extension.ExtensionType == nil {
- // incomplete descriptor, so no default
+ // Check whether the message has the extension field set or a default.
+ var pv protoreflect.Value
+ switch {
+ case mr.Has(xtd):
+ pv = mr.Get(xtd)
+ case xtd.HasDefault():
+ pv = xtd.Default()
+ default:
return nil, ErrMissingExtension
}
- t := reflect.TypeOf(extension.ExtensionType)
- props := extensionProperties(extension)
-
- sf, _, err := fieldDefault(t, props)
- if err != nil {
- return nil, err
- }
-
- if sf == nil || sf.value == nil {
- // There is no default value.
- return nil, ErrMissingExtension
+ v := xt.InterfaceOf(pv)
+ rv := reflect.ValueOf(v)
+ if isScalarKind(rv.Kind()) {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
}
+ return v, nil
+}
- if t.Kind() != reflect.Ptr {
- // We do not need to return a Ptr, we can directly return sf.value.
- return sf.value, nil
- }
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
- // We need to return an interface{} that is a pointer to sf.value.
- value := reflect.New(t).Elem()
- value.Set(reflect.New(value.Type().Elem()))
- if sf.kind == reflect.Int32 {
- // We may have an int32 or an enum, but the underlying data is int32.
- // Since we can't set an int32 into a non int32 reflect.value directly
- // set it as a int32.
- value.Elem().SetInt(int64(sf.value.(int32)))
- } else {
- value.Elem().Set(reflect.ValueOf(sf.value))
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+ return r.xt, nil
}
- return value.Interface(), nil
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
}
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- t := reflect.TypeOf(extension.ExtensionType)
- unmarshal := typeUnmarshaler(t, extension.Tag)
-
- // t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate space to store the pointer/slice.
- value := reflect.New(t).Elem()
-
- var err error
- for {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- wire := int(x) & 7
-
- b, err = unmarshal(b, valToPointer(value.Addr()), wire)
- if err != nil {
- return nil, err
- }
-
- if len(b) == 0 {
- break
- }
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+ return r.xt, nil
}
- return value.Interface(), nil
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return nil, errNotExtendable
}
- extensions = make([]interface{}, len(es))
- for i, e := range es {
- extensions[i], err = GetExtension(epb, e)
- if err == ErrMissingExtension {
- err = nil
- }
+
+ vs := make([]interface{}, len(xts))
+ for i, xt := range xts {
+ v, err := GetExtension(m, xt)
if err != nil {
- return
+ if err == ErrMissingExtension {
+ continue
+ }
+ return vs, err
}
+ vs[i] = v
}
- return
+ return vs, nil
}
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return errNotExtendable
}
- registeredExtensions := RegisteredExtensions(pb)
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return nil, nil
+ rv := reflect.ValueOf(v)
+ if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
}
- mu.Lock()
- defer mu.Unlock()
- extensions := make([]*ExtensionDesc, 0, len(emap))
- for extid, e := range emap {
- desc := e.desc
- if desc == nil {
- desc = registeredExtensions[extid]
- if desc == nil {
- desc = &ExtensionDesc{Field: extid}
- }
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+ }
+ if isScalarKind(rv.Elem().Kind()) {
+ v = rv.Elem().Interface()
}
-
- extensions = append(extensions, desc)
}
- return extensions, nil
-}
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- epb, err := extendable(pb)
- if err != nil {
- return err
- }
- if err := checkExtensionTypes(epb, extension); err != nil {
- return err
- }
- typ := reflect.TypeOf(extension.ExtensionType)
- if typ != reflect.TypeOf(value) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
- // nil extension values need to be caught early, because the
- // encoder can't distinguish an ErrNil due to a nil extension
- // from an ErrNil due to a missing field. Extensions are
- // always optional, so the encoder would just swallow the error
- // and drop all the extensions from the encoded message.
- if reflect.ValueOf(value).IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
- }
-
- extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+ mr.Set(xtd, xt.ValueOf(v))
+ clearUnknown(mr, fieldNum(xt.Field))
return nil
}
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
- epb, err := extendable(pb)
- if err != nil {
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
return
}
- m := epb.extensionsWrite()
- for k := range m {
- delete(m, k)
+
+ // Verify that the raw field is valid.
+ for b0 := b; len(b0) > 0; {
+ num, _, n := protowire.ConsumeField(b0)
+ if int32(num) != fnum {
+ panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+ }
+ b0 = b0[n:]
}
-}
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
+ ClearExtension(m, &ExtensionDesc{Field: fnum})
+ mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
- st := reflect.TypeOf(desc.ExtendedType).Elem()
- m := extensionMaps[st]
- if m == nil {
- m = make(map[int32]*ExtensionDesc)
- extensionMaps[st] = m
+ // Collect a set of known extension descriptors.
+ extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+ mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor)
+ if xd, ok := xt.Type().(*ExtensionDesc); ok {
+ extDescs[fd.Number()] = xd
+ }
+ }
+ return true
+ })
+
+ // Collect a set of unknown extension descriptors.
+ extRanges := mr.Descriptor().ExtensionRanges()
+ for b := mr.GetUnknown(); len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ if extRanges.Has(num) && extDescs[num] == nil {
+ extDescs[num] = nil
+ }
+ b = b[n:]
}
- if _, ok := m[desc.Field]; ok {
- panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+
+ // Transpose the set of descriptors into a list.
+ var xts []*ExtensionDesc
+ for num, xt := range extDescs {
+ if xt == nil {
+ xt = &ExtensionDesc{Field: int32(num)}
+ }
+ xts = append(xts, xt)
}
- m[desc.Field] = desc
+ return xts, nil
}
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
- return extensionMaps[reflect.TypeOf(pb).Elem()]
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+ return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
}
-// extensionAsLegacyType converts an value in the storage type as the API type.
-// See Extension.value.
-func extensionAsLegacyType(v interface{}) interface{} {
- switch rv := reflect.ValueOf(v); rv.Kind() {
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+ switch k {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- // Represent primitive types as a pointer to the value.
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
- case reflect.Ptr:
- // Represent slice types as the value itself.
- switch rv.Type().Elem().Kind() {
- case reflect.Slice:
- if rv.IsNil() {
- v = reflect.Zero(rv.Type().Elem()).Interface()
- } else {
- v = rv.Elem().Interface()
- }
- }
+ return true
+ default:
+ return false
}
- return v
}
-// extensionAsStorageType converts an value in the API type as the storage type.
-// See Extension.value.
-func extensionAsStorageType(v interface{}) interface{} {
- switch rv := reflect.ValueOf(v); rv.Kind() {
- case reflect.Ptr:
- // Represent slice types as the value itself.
- switch rv.Type().Elem().Kind() {
- case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- if rv.IsNil() {
- v = reflect.Zero(rv.Type().Elem()).Interface()
- } else {
- v = rv.Elem().Interface()
- }
- }
- case reflect.Slice:
- // Represent slice types as a pointer to the value.
- if rv.Type().Elem().Kind() != reflect.Uint8 {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+ Has(protoreflect.FieldNumber) bool
+}) {
+ var bo protoreflect.RawFields
+ for bi := m.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if !remover.Has(num) {
+ bo = append(bo, bi[:n]...)
}
+ bi = bi[n:]
}
- return v
+ if bi := m.GetUnknown(); len(bi) != len(bo) {
+ m.SetUnknown(bo)
+ }
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+ return protoreflect.FieldNumber(n1) == n2
}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
deleted file mode 100644
index fdd328bb..00000000
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ /dev/null
@@ -1,965 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers. It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
- package example;
-
- enum FOO { X = 17; }
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
- oneof union {
- int32 number = 6;
- string name = 7;
- }
- }
-
-The resulting file, test.pb.go, is:
-
- package example
-
- import proto "github.com/golang/protobuf/proto"
- import math "math"
-
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
- }
- var FOO_value = map[string]int32{
- "X": 17,
- }
-
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
- }
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
- }
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
- }
-
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- // Types that are valid to be assigned to Union:
- // *Test_Number
- // *Test_Name
- Union isTest_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
- }
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
-
- type isTest_Union interface {
- isTest_Union()
- }
-
- type Test_Number struct {
- Number int32 `protobuf:"varint,6,opt,name=number"`
- }
- type Test_Name struct {
- Name string `protobuf:"bytes,7,opt,name=name"`
- }
-
- func (*Test_Number) isTest_Union() {}
- func (*Test_Name) isTest_Union() {}
-
- func (m *Test) GetUnion() isTest_Union {
- if m != nil {
- return m.Union
- }
- return nil
- }
- const Default_Test_Type int32 = 77
-
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
- }
-
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
- }
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
- }
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
- }
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
- }
-
- func (m *Test) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Test_Number); ok {
- return x.Number
- }
- return 0
- }
-
- func (m *Test) GetName() string {
- if x, ok := m.GetUnion().(*Test_Name); ok {
- return x.Name
- }
- return ""
- }
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
-To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/golang/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- Union: &pb.Test_Name{"fred"},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // Use a type switch to determine which oneof was set.
- switch u := test.Union.(type) {
- case *pb.Test_Number: // u.Number contains the number.
- case *pb.Test_Name: // u.Name contains the string.
- }
- // etc.
- }
-*/
-package proto
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "sync"
-)
-
-// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
-// Marshal reports this when a required field is not initialized.
-// Unmarshal reports this when a required field is missing from the wire data.
-type RequiredNotSetError struct{ field string }
-
-func (e *RequiredNotSetError) Error() string {
- if e.field == "" {
- return fmt.Sprintf("proto: required field not set")
- }
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-type invalidUTF8Error struct{ field string }
-
-func (e *invalidUTF8Error) Error() string {
- if e.field == "" {
- return "proto: invalid UTF-8 detected"
- }
- return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
-}
-func (e *invalidUTF8Error) InvalidUTF8() bool {
- return true
-}
-
-// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
-// This error should not be exposed to the external API as such errors should
-// be recreated with the field information.
-var errInvalidUTF8 = &invalidUTF8Error{}
-
-// isNonFatal reports whether the error is either a RequiredNotSet error
-// or a InvalidUTF8 error.
-func isNonFatal(err error) bool {
- if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
- return true
- }
- if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
- return true
- }
- return false
-}
-
-type nonFatal struct{ E error }
-
-// Merge merges err into nf and reports whether it was successful.
-// Otherwise it returns false for any fatal non-nil errors.
-func (nf *nonFatal) Merge(err error) (ok bool) {
- if err == nil {
- return true // not an error
- }
- if !isNonFatal(err) {
- return false // fatal error
- }
- if nf.E == nil {
- nf.E = err // store first instance of non-fatal error
- }
- return true
-}
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
-}
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers. It may be reused between invocations to
-// reduce memory usage. It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
- buf []byte // encode/decode byte stream
- index int // read point
-
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
- return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
- p.buf = p.buf[0:0] // for reading/writing
- p.index = 0 // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
- p.buf = s
- p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-// SetDeterministic sets whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (p *Buffer) SetDeterministic(deterministic bool) {
- p.deterministic = deterministic
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
- p := new(int32)
- *p = int32(v)
- return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
- return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name. Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
- var u uint64
-
- obuf := p.buf
- index := p.index
- p.buf = b
- p.index = 0
- depth := 0
-
- fmt.Printf("\n--- %s ---\n", s)
-
-out:
- for {
- for i := 0; i < depth; i++ {
- fmt.Print(" ")
- }
-
- index := p.index
- if index == len(p.buf) {
- break
- }
-
- op, err := p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: fetching op err %v\n", index, err)
- break out
- }
- tag := op >> 3
- wire := op & 7
-
- switch wire {
- default:
- fmt.Printf("%3d: t=%3d unknown wire=%d\n",
- index, tag, wire)
- break out
-
- case WireBytes:
- var r []byte
-
- r, err = p.DecodeRawBytes(false)
- if err != nil {
- break out
- }
- fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
- if len(r) <= 6 {
- for i := 0; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- } else {
- for i := 0; i < 3; i++ {
- fmt.Printf(" %.2x", r[i])
- }
- fmt.Printf(" ..")
- for i := len(r) - 3; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- }
- fmt.Printf("\n")
-
- case WireFixed32:
- u, err = p.DecodeFixed32()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
- case WireFixed64:
- u, err = p.DecodeFixed64()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
- case WireVarint:
- u, err = p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
- case WireStartGroup:
- fmt.Printf("%3d: t=%3d start\n", index, tag)
- depth++
-
- case WireEndGroup:
- depth--
- fmt.Printf("%3d: t=%3d end\n", index, tag)
- }
- }
-
- if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
- }
- fmt.Printf("\n")
-
- p.buf = obuf
- p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
- setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
- v = v.Elem()
-
- defaultMu.RLock()
- dm, ok := defaults[v.Type()]
- defaultMu.RUnlock()
- if !ok {
- dm = buildDefaultMessage(v.Type())
- defaultMu.Lock()
- defaults[v.Type()] = dm
- defaultMu.Unlock()
- }
-
- for _, sf := range dm.scalars {
- f := v.Field(sf.index)
- if !f.IsNil() {
- // field already set
- continue
- }
- dv := sf.value
- if dv == nil && !zeros {
- // no explicit default, and don't want to set zeros
- continue
- }
- fptr := f.Addr().Interface() // **T
- // TODO: Consider batching the allocations we do here.
- switch sf.kind {
- case reflect.Bool:
- b := new(bool)
- if dv != nil {
- *b = dv.(bool)
- }
- *(fptr.(**bool)) = b
- case reflect.Float32:
- f := new(float32)
- if dv != nil {
- *f = dv.(float32)
- }
- *(fptr.(**float32)) = f
- case reflect.Float64:
- f := new(float64)
- if dv != nil {
- *f = dv.(float64)
- }
- *(fptr.(**float64)) = f
- case reflect.Int32:
- // might be an enum
- if ft := f.Type(); ft != int32PtrType {
- // enum
- f.Set(reflect.New(ft.Elem()))
- if dv != nil {
- f.Elem().SetInt(int64(dv.(int32)))
- }
- } else {
- // int32 field
- i := new(int32)
- if dv != nil {
- *i = dv.(int32)
- }
- *(fptr.(**int32)) = i
- }
- case reflect.Int64:
- i := new(int64)
- if dv != nil {
- *i = dv.(int64)
- }
- *(fptr.(**int64)) = i
- case reflect.String:
- s := new(string)
- if dv != nil {
- *s = dv.(string)
- }
- *(fptr.(**string)) = s
- case reflect.Uint8:
- // exceptional case: []byte
- var b []byte
- if dv != nil {
- db := dv.([]byte)
- b = make([]byte, len(db))
- copy(b, db)
- } else {
- b = []byte{}
- }
- *(fptr.(*[]byte)) = b
- case reflect.Uint32:
- u := new(uint32)
- if dv != nil {
- *u = dv.(uint32)
- }
- *(fptr.(**uint32)) = u
- case reflect.Uint64:
- u := new(uint64)
- if dv != nil {
- *u = dv.(uint64)
- }
- *(fptr.(**uint64)) = u
- default:
- log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
- }
- }
-
- for _, ni := range dm.nested {
- f := v.Field(ni)
- // f is *T or []*T or map[T]*T
- switch f.Kind() {
- case reflect.Ptr:
- if f.IsNil() {
- continue
- }
- setDefaults(f, recur, zeros)
-
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- e := f.Index(i)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
-
- case reflect.Map:
- for _, k := range f.MapKeys() {
- e := f.MapIndex(k)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
- }
- }
-}
-
-var (
- // defaults maps a protocol buffer struct type to a slice of the fields,
- // with its scalar fields set to their proto-declared non-zero default values.
- defaultMu sync.RWMutex
- defaults = make(map[reflect.Type]defaultMessage)
-
- int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
- scalars []scalarField
- nested []int // struct field index of nested messages
-}
-
-type scalarField struct {
- index int // struct field index
- kind reflect.Kind // element type (the T in *T or []T)
- value interface{} // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
- sprop := GetProperties(t)
- for _, prop := range sprop.Prop {
- fi, ok := sprop.decoderTags.get(prop.Tag)
- if !ok {
- // XXX_unrecognized
- continue
- }
- ft := t.Field(fi).Type
-
- sf, nested, err := fieldDefault(ft, prop)
- switch {
- case err != nil:
- log.Print(err)
- case nested:
- dm.nested = append(dm.nested, fi)
- case sf != nil:
- sf.index = fi
- dm.scalars = append(dm.scalars, *sf)
- }
- }
-
- return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
- var canHaveDefault bool
- switch ft.Kind() {
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
- }
-
- if !canHaveDefault {
- if nestedMessage {
- return nil, true, nil
- }
- return nil, false, nil
- }
-
- // We now know that ft is a pointer or slice.
- sf = &scalarField{kind: ft.Elem().Kind()}
-
- // scalar fields without defaults
- if !prop.HasDefault {
- return sf, false, nil
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
- }
- sf.value = x
- default:
- return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
- }
-
- return sf, false, nil
-}
-
-// mapKeys returns a sort.Interface to be used for sorting the map keys.
-// Map fields may have key types of non-float scalars, strings and enums.
-func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{vs: vs}
-
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
- if len(vs) == 0 {
- return s
- }
- switch vs[0].Kind() {
- case reflect.Int32, reflect.Int64:
- s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
- case reflect.Uint32, reflect.Uint64:
- s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
- case reflect.Bool:
- s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
- case reflect.String:
- s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
- default:
- panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
- }
-
- return s
-}
-
-type mapKeySorter struct {
- vs []reflect.Value
- less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
- return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint32, reflect.Uint64:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.String:
- return v.String() == ""
- }
- return false
-}
-
-const (
- // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion3 = true
-
- // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion2 = true
-
- // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- ProtoPackageIsVersion1 = true
-)
-
-// InternalMessageInfo is a type used internally by generated .pb.go files.
-// This type is not intended to be used by non-generated code.
-// This type is not subject to any compatibility guarantee.
-type InternalMessageInfo struct {
- marshal *marshalInfo
- unmarshal *unmarshalInfo
- merge *mergeInfo
- discard *discardInfo
-}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
deleted file mode 100644
index f48a7567..00000000
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
- "errors"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
- TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
- Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
- Item []*_MessageSet_Item `protobuf:"group,1,rep"`
- XXX_unrecognized []byte
- // TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
- MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return nil
- }
- id := mti.MessageTypeId()
- for _, item := range ms.Item {
- if *item.TypeId == id {
- return item
- }
- }
- return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
- return ms.find(pb) != nil
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
- if item := ms.find(pb); item != nil {
- return Unmarshal(item.Message, pb)
- }
- if _, ok := pb.(messageTypeIder); !ok {
- return errNoMessageTypeID
- }
- return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
- msg, err := Marshal(pb)
- if err != nil {
- return err
- }
- if item := ms.find(pb); item != nil {
- // reuse existing item
- item.Message = msg
- return nil
- }
-
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return errNoMessageTypeID
- }
-
- mtid := mti.MessageTypeId()
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: &mtid,
- Message: msg,
- })
- return nil
-}
-
-func (ms *messageSet) Reset() { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage() {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
- i := 0
- for ; buf[i]&0x80 != 0; i++ {
- }
- return buf[i+1:]
-}
-
-// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func unmarshalMessageSet(buf []byte, exts interface{}) error {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m = exts.extensionsWrite()
- case map[int32]Extension:
- m = exts
- default:
- return errors.New("proto: not an extension map")
- }
-
- ms := new(messageSet)
- if err := Unmarshal(buf, ms); err != nil {
- return err
- }
- for _, item := range ms.Item {
- id := *item.TypeId
- msg := item.Message
-
- // Restore wire type and field number varint, plus length varint.
- // Be careful to preserve duplicate items.
- b := EncodeVarint(uint64(id)<<3 | WireBytes)
- if ext, ok := m[id]; ok {
- // Existing data; rip off the tag and length varint
- // so we join the new data correctly.
- // We can assume that ext.enc is set because we are unmarshaling.
- o := ext.enc[len(b):] // skip wire type and field number
- _, n := DecodeVarint(o) // calculate length of length varint
- o = o[n:] // skip length varint
- msg = append(o, msg...) // join old data and new data
- }
- b = append(b, EncodeVarint(uint64(len(msg)))...)
- b = append(b, msg...)
-
- m[id] = Extension{enc: b}
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index 94fa9194..00000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "reflect"
- "sync"
-)
-
-const unsafeAllowed = false
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// zeroField is a noop when calling pointer.offset.
-var zeroField = field([]int{})
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// The pointer type is for the table-driven decoder.
-// The implementation here uses a reflect.Value of pointer type to
-// create a generic pointer. In pointer_unsafe.go we use unsafe
-// instead of reflect to implement the same (but faster) interface.
-type pointer struct {
- v reflect.Value
-}
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- return pointer{v: reflect.ValueOf(*i)}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
- v := reflect.ValueOf(*i)
- u := reflect.New(v.Type())
- u.Elem().Set(v)
- if deref {
- u = u.Elem()
- }
- return pointer{v: u}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
-}
-
-func (p pointer) isNil() bool {
- return p.v.IsNil()
-}
-
-// grow updates the slice s in place to make it one element longer.
-// s must be addressable.
-// Returns the (addressable) new element.
-func grow(s reflect.Value) reflect.Value {
- n, m := s.Len(), s.Cap()
- if n < m {
- s.SetLen(n + 1)
- } else {
- s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
- }
- return s.Index(n)
-}
-
-func (p pointer) toInt64() *int64 {
- return p.v.Interface().(*int64)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return p.v.Interface().(**int64)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return p.v.Interface().(*[]int64)
-}
-
-var int32ptr = reflect.TypeOf((*int32)(nil))
-
-func (p pointer) toInt32() *int32 {
- return p.v.Convert(int32ptr).Interface().(*int32)
-}
-
-// The toInt32Ptr/Slice methods don't work because of enums.
-// Instead, we must use set/get methods for the int32ptr/slice case.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return p.v.Interface().(**int32)
-}
- func (p pointer) toInt32Slice() *[]int32 {
- return p.v.Interface().(*[]int32)
-}
-*/
-func (p pointer) getInt32Ptr() *int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().(*int32)
- }
- // an enum
- return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
-}
-func (p pointer) setInt32Ptr(v int32) {
- // Allocate value in a *int32. Possibly convert that to a *enum.
- // Then assign it to a **int32 or **enum.
- // Note: we can convert *int32 to *enum, but we can't convert
- // **int32 to **enum!
- p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
-}
-
-// getInt32Slice copies []int32 from p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getInt32Slice() []int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().([]int32)
- }
- // an enum
- // Allocate a []int32, then assign []enum's values into it.
- // Note: we can't convert []enum to []int32.
- slice := p.v.Elem()
- s := make([]int32, slice.Len())
- for i := 0; i < slice.Len(); i++ {
- s[i] = int32(slice.Index(i).Int())
- }
- return s
-}
-
-// setInt32Slice copies []int32 into p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setInt32Slice(v []int32) {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- p.v.Elem().Set(reflect.ValueOf(v))
- return
- }
- // an enum
- // Allocate a []enum, then assign []int32's values into it.
- // Note: we can't convert []enum to []int32.
- slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
- for i, x := range v {
- slice.Index(i).SetInt(int64(x))
- }
- p.v.Elem().Set(slice)
-}
-func (p pointer) appendInt32Slice(v int32) {
- grow(p.v.Elem()).SetInt(int64(v))
-}
-
-func (p pointer) toUint64() *uint64 {
- return p.v.Interface().(*uint64)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return p.v.Interface().(**uint64)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return p.v.Interface().(*[]uint64)
-}
-func (p pointer) toUint32() *uint32 {
- return p.v.Interface().(*uint32)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return p.v.Interface().(**uint32)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return p.v.Interface().(*[]uint32)
-}
-func (p pointer) toBool() *bool {
- return p.v.Interface().(*bool)
-}
-func (p pointer) toBoolPtr() **bool {
- return p.v.Interface().(**bool)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return p.v.Interface().(*[]bool)
-}
-func (p pointer) toFloat64() *float64 {
- return p.v.Interface().(*float64)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return p.v.Interface().(**float64)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return p.v.Interface().(*[]float64)
-}
-func (p pointer) toFloat32() *float32 {
- return p.v.Interface().(*float32)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return p.v.Interface().(**float32)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return p.v.Interface().(*[]float32)
-}
-func (p pointer) toString() *string {
- return p.v.Interface().(*string)
-}
-func (p pointer) toStringPtr() **string {
- return p.v.Interface().(**string)
-}
-func (p pointer) toStringSlice() *[]string {
- return p.v.Interface().(*[]string)
-}
-func (p pointer) toBytes() *[]byte {
- return p.v.Interface().(*[]byte)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return p.v.Interface().(*[][]byte)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return p.v.Interface().(*XXX_InternalExtensions)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return p.v.Interface().(*map[int32]Extension)
-}
-func (p pointer) getPointer() pointer {
- return pointer{v: p.v.Elem()}
-}
-func (p pointer) setPointer(q pointer) {
- p.v.Elem().Set(q.v)
-}
-func (p pointer) appendPointer(q pointer) {
- grow(p.v.Elem()).Set(q.v)
-}
-
-// getPointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getPointerSlice() []pointer {
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// setPointerSlice copies []pointer into p as a new []*T.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setPointerSlice(v []pointer) {
- if v == nil {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
- return
- }
- s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
- for _, p := range v {
- s = reflect.Append(s, p.v)
- }
- p.v.Elem().Set(s)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- if p.v.Elem().IsNil() {
- return pointer{v: p.v.Elem()}
- }
- return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
-}
-
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- // TODO: check that p.v.Type().Elem() == t?
- return p.v
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-
-var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index dbfffe07..00000000
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "sync/atomic"
- "unsafe"
-)
-
-const unsafeAllowed = true
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// zeroField is a noop when calling pointer.offset.
-const zeroField = field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
- return f != invalidField
-}
-
-// The pointer type below is for the new table-driven encoder/decoder.
-// The implementation here uses unsafe.Pointer to create a generic pointer.
-// In pointer_reflect.go we use reflect instead of unsafe to implement
-// the same (but slower) interface.
-type pointer struct {
- p unsafe.Pointer
-}
-
-// size of pointer
-var ptrSize = unsafe.Sizeof(uintptr(0))
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- // Super-tricky - read pointer out of data word of interface value.
- // Saves ~25ns over the equivalent:
- // return valToPointer(reflect.ValueOf(*i))
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
- // Super-tricky - read or get the address of data word of interface value.
- if isptr {
- // The interface is of pointer type, thus it is a direct interface.
- // The data word is the pointer data itself. We take its address.
- p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
- } else {
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
- }
- if deref {
- p.p = *(*unsafe.Pointer)(p.p)
- }
- return p
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{p: unsafe.Pointer(v.Pointer())}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- // For safety, we should panic if !f.IsValid, however calling panic causes
- // this to no longer be inlineable, which is a serious performance cost.
- /*
- if !f.IsValid() {
- panic("invalid field")
- }
- */
- return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
-}
-
-func (p pointer) isNil() bool {
- return p.p == nil
-}
-
-func (p pointer) toInt64() *int64 {
- return (*int64)(p.p)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return (**int64)(p.p)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return (*[]int64)(p.p)
-}
-func (p pointer) toInt32() *int32 {
- return (*int32)(p.p)
-}
-
-// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return (**int32)(p.p)
- }
- func (p pointer) toInt32Slice() *[]int32 {
- return (*[]int32)(p.p)
- }
-*/
-func (p pointer) getInt32Ptr() *int32 {
- return *(**int32)(p.p)
-}
-func (p pointer) setInt32Ptr(v int32) {
- *(**int32)(p.p) = &v
-}
-
-// getInt32Slice loads a []int32 from p.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getInt32Slice() []int32 {
- return *(*[]int32)(p.p)
-}
-
-// setInt32Slice stores a []int32 to p.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setInt32Slice(v []int32) {
- *(*[]int32)(p.p) = v
-}
-
-// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
-func (p pointer) appendInt32Slice(v int32) {
- s := (*[]int32)(p.p)
- *s = append(*s, v)
-}
-
-func (p pointer) toUint64() *uint64 {
- return (*uint64)(p.p)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return (**uint64)(p.p)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return (*[]uint64)(p.p)
-}
-func (p pointer) toUint32() *uint32 {
- return (*uint32)(p.p)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return (**uint32)(p.p)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return (*[]uint32)(p.p)
-}
-func (p pointer) toBool() *bool {
- return (*bool)(p.p)
-}
-func (p pointer) toBoolPtr() **bool {
- return (**bool)(p.p)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return (*[]bool)(p.p)
-}
-func (p pointer) toFloat64() *float64 {
- return (*float64)(p.p)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return (**float64)(p.p)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return (*[]float64)(p.p)
-}
-func (p pointer) toFloat32() *float32 {
- return (*float32)(p.p)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return (**float32)(p.p)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return (*[]float32)(p.p)
-}
-func (p pointer) toString() *string {
- return (*string)(p.p)
-}
-func (p pointer) toStringPtr() **string {
- return (**string)(p.p)
-}
-func (p pointer) toStringSlice() *[]string {
- return (*[]string)(p.p)
-}
-func (p pointer) toBytes() *[]byte {
- return (*[]byte)(p.p)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return (*[][]byte)(p.p)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(p.p)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return (*map[int32]Extension)(p.p)
-}
-
-// getPointerSlice loads []*T from p as a []pointer.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getPointerSlice() []pointer {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We load it as []pointer.
- return *(*[]pointer)(p.p)
-}
-
-// setPointerSlice stores []pointer into p as a []*T.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setPointerSlice(v []pointer) {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We store it as []pointer.
- *(*[]pointer)(p.p) = v
-}
-
-// getPointer loads the pointer at p and returns it.
-func (p pointer) getPointer() pointer {
- return pointer{p: *(*unsafe.Pointer)(p.p)}
-}
-
-// setPointer stores the pointer q at p.
-func (p pointer) setPointer(q pointer) {
- *(*unsafe.Pointer)(p.p) = q.p
-}
-
-// append q to the slice pointed to by p.
-func (p pointer) appendPointer(q pointer) {
- s := (*[]unsafe.Pointer)(p.p)
- *s = append(*s, q.p)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- // Super-tricky - read pointer out of data word of interface value.
- return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
-}
-
-// asPointerTo returns a reflect.Value that is a pointer to an
-// object of type t stored at p.
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- return reflect.NewAt(t, p.p)
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index a4b8c0cd..dcdc2202 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -1,162 +1,104 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package proto
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
import (
"fmt"
- "log"
"reflect"
- "sort"
"strconv"
"strings"
"sync"
-)
-
-const debug bool = false
-// Constants that identify the encoding of a value on the wire.
-const (
- WireVarint = 0
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
- WireFixed32 = 5
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
)
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
- fastTags []int
- slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
- if t > 0 && t < tagMapFastLimit {
- if t >= len(p.fastTags) {
- return 0, false
- }
- fi := p.fastTags[t]
- return fi, fi >= 0
- }
- fi, ok := p.slowTags[t]
- return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
- if t > 0 && t < tagMapFastLimit {
- for len(p.fastTags) < t+1 {
- p.fastTags = append(p.fastTags, -1)
- }
- p.fastTags[t] = fi
- return
- }
- if p.slowTags == nil {
- p.slowTags = make(map[int]int)
- }
- p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
type StructProperties struct {
- Prop []*Properties // properties for each field
- reqCount int // required count
- decoderTags tagMap // map from proto tag to struct field number
- decoderOrigNames map[string]int // map from original name to struct field number
- order []int // list of struct field numbers in tag order
+ // Prop are the properties for each field.
+ //
+ // Fields belonging to a oneof are stored in OneofTypes instead, with a
+ // single Properties representing the parent oneof held here.
+ //
+ // The order of Prop matches the order of fields in the Go struct.
+ // Struct fields that are not related to protobufs have a "XXX_" prefix
+ // in the Properties.Name and must be ignored by the user.
+ Prop []*Properties
// OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the original name of a field.
+ // It is keyed by the protobuf field name.
OneofTypes map[string]*OneofProperties
}
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
- Type reflect.Type // pointer to generated struct type for this oneof field
- Field int // struct field number of the containing oneof in the message
- Prop *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
- return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
type Properties struct {
- Name string // name of the field, for error messages
- OrigName string // original name before protocol compiler (always set)
- JSONName string // name to use for JSON; determined by protoc
- Wire string
+ // Name is a placeholder name with little meaningful semantic value.
+ // If the name has an "XXX_" prefix, the entire Properties must be ignored.
+ Name string
+ // OrigName is the protobuf field name or oneof name.
+ OrigName string
+ // JSONName is the JSON name for the protobuf field.
+ JSONName string
+ // Enum is a placeholder name for enums.
+ // For historical reasons, this is neither the Go name for the enum,
+ // nor the protobuf name for the enum.
+ Enum string // Deprecated: Do not use.
+ // Weak contains the full name of the weakly referenced message.
+ Weak string
+ // Wire is a string representation of the wire type.
+ Wire string
+ // WireType is the protobuf wire type for the field.
WireType int
- Tag int
+ // Tag is the protobuf field number.
+ Tag int
+ // Required reports whether this is a required field.
Required bool
+ // Optional reports whether this is a optional field.
Optional bool
+ // Repeated reports whether this is a repeated field.
Repeated bool
- Packed bool // relevant for repeated primitives only
- Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field
- oneof bool // whether this is a oneof field
-
- Default string // default value
- HasDefault bool // whether an explicit default was provided
-
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
+ // Packed reports whether this is a packed repeated field of scalars.
+ Packed bool
+ // Proto3 reports whether this field operates under the proto3 syntax.
+ Proto3 bool
+ // Oneof reports whether this field belongs within a oneof.
+ Oneof bool
+
+ // Default is the default value in string form.
+ Default string
+ // HasDefault reports whether the field has a default value.
+ HasDefault bool
+
+ // MapKeyProp is the properties for the key field for a map field.
+ MapKeyProp *Properties
+ // MapValProp is the properties for the value field for a map field.
+ MapValProp *Properties
+}
- mtype reflect.Type // set for map types only
- MapKeyProp *Properties // set for map types only
- MapValProp *Properties // set for map types only
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+ // Type is a pointer to the generated wrapper type for the field value.
+ // This is nil for messages that are not in the open-struct API.
+ Type reflect.Type
+ // Field is the index into StructProperties.Prop for the containing oneof.
+ Field int
+ // Prop is the properties for the field.
+ Prop *Properties
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
- s += ","
- s += strconv.Itoa(p.Tag)
+ s += "," + strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
@@ -170,18 +112,21 @@ func (p *Properties) String() string {
s += ",packed"
}
s += ",name=" + p.OrigName
- if p.JSONName != p.OrigName {
+ if p.JSONName != "" {
s += ",json=" + p.JSONName
}
- if p.proto3 {
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if len(p.Weak) > 0 {
+ s += ",weak=" + p.Weak
+ }
+ if p.Proto3 {
s += ",proto3"
}
- if p.oneof {
+ if p.Oneof {
s += ",oneof"
}
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
if p.HasDefault {
s += ",def=" + p.Default
}
@@ -189,356 +134,173 @@ func (p *Properties) String() string {
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
- // "bytes,49,opt,name=foo,def=hello!"
- fields := strings.Split(s, ",") // breaks def=, but handled below.
- if len(fields) < 2 {
- log.Printf("proto: tag has too few fields: %q", s)
- return
- }
-
- p.Wire = fields[0]
- switch p.Wire {
- case "varint":
- p.WireType = WireVarint
- case "fixed32":
- p.WireType = WireFixed32
- case "fixed64":
- p.WireType = WireFixed64
- case "zigzag32":
- p.WireType = WireVarint
- case "zigzag64":
- p.WireType = WireVarint
- case "bytes", "group":
- p.WireType = WireBytes
- // no numeric converter for non-numeric types
- default:
- log.Printf("proto: tag has unknown wire type: %q", s)
- return
- }
-
- var err error
- p.Tag, err = strconv.Atoi(fields[1])
- if err != nil {
- return
- }
-
-outer:
- for i := 2; i < len(fields); i++ {
- f := fields[i]
- switch {
- case f == "req":
- p.Required = true
- case f == "opt":
+func (p *Properties) Parse(tag string) {
+ // For example: "bytes,49,opt,name=foo,def=hello!"
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ p.OrigName = s[len("name="):]
+ case strings.HasPrefix(s, "json="):
+ p.JSONName = s[len("json="):]
+ case strings.HasPrefix(s, "enum="):
+ p.Enum = s[len("enum="):]
+ case strings.HasPrefix(s, "weak="):
+ p.Weak = s[len("weak="):]
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ p.Tag = int(n)
+ case s == "opt":
p.Optional = true
- case f == "rep":
+ case s == "req":
+ p.Required = true
+ case s == "rep":
p.Repeated = true
- case f == "packed":
+ case s == "varint" || s == "zigzag32" || s == "zigzag64":
+ p.Wire = s
+ p.WireType = WireVarint
+ case s == "fixed32":
+ p.Wire = s
+ p.WireType = WireFixed32
+ case s == "fixed64":
+ p.Wire = s
+ p.WireType = WireFixed64
+ case s == "bytes":
+ p.Wire = s
+ p.WireType = WireBytes
+ case s == "group":
+ p.Wire = s
+ p.WireType = WireStartGroup
+ case s == "packed":
p.Packed = true
- case strings.HasPrefix(f, "name="):
- p.OrigName = f[5:]
- case strings.HasPrefix(f, "json="):
- p.JSONName = f[5:]
- case strings.HasPrefix(f, "enum="):
- p.Enum = f[5:]
- case f == "proto3":
- p.proto3 = true
- case f == "oneof":
- p.oneof = true
- case strings.HasPrefix(f, "def="):
+ case s == "proto3":
+ p.Proto3 = true
+ case s == "oneof":
+ p.Oneof = true
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
p.HasDefault = true
- p.Default = f[4:] // rest of string
- if i+1 < len(fields) {
- // Commas aren't escaped, and def is always last.
- p.Default += "," + strings.Join(fields[i+1:], ",")
- break outer
- }
- }
- }
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// setFieldProps initializes the field properties for submessages and maps.
-func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- switch t1 := typ; t1.Kind() {
- case reflect.Ptr:
- if t1.Elem().Kind() == reflect.Struct {
- p.stype = t1.Elem()
- }
-
- case reflect.Slice:
- if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
- p.stype = t2.Elem()
- }
-
- case reflect.Map:
- p.mtype = t1
- p.MapKeyProp = &Properties{}
- p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.MapValProp = &Properties{}
- vtype := p.mtype.Elem()
- if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
- // The value type is not a message (*T) or bytes ([]byte),
- // so we need encoders for the pointer to this type.
- vtype = reflect.PtrTo(vtype)
- }
- p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
- }
-
- if p.stype != nil {
- if lockGetProp {
- p.sprop = GetProperties(p.stype)
- } else {
- p.sprop = getPropertiesLocked(p.stype)
+ p.Default, i = tag[len("def="):], len(tag)
}
+ tag = strings.TrimPrefix(tag[i:], ",")
}
}
-var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
-)
-
// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
- // "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
- p.setFieldProps(typ, f, lockGetProp)
+
+ if typ != nil && typ.Kind() == reflect.Map {
+ p.MapKeyProp = new(Properties)
+ p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+ p.MapValProp = new(Properties)
+ p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+ }
}
-var (
- propertiesMu sync.RWMutex
- propertiesMap = make(map[reflect.Type]*StructProperties)
-)
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
func GetProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic("proto: type must have kind struct")
- }
-
- // Most calls to GetProperties in a long-running program will be
- // retrieving details for types we have seen before.
- propertiesMu.RLock()
- sprop, ok := propertiesMap[t]
- propertiesMu.RUnlock()
- if ok {
- return sprop
+ if p, ok := propertiesCache.Load(t); ok {
+ return p.(*StructProperties)
}
-
- propertiesMu.Lock()
- sprop = getPropertiesLocked(t)
- propertiesMu.Unlock()
- return sprop
+ p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+ return p.(*StructProperties)
}
-type (
- oneofFuncsIface interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- oneofWrappersIface interface {
- XXX_OneofWrappers() []interface{}
- }
-)
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
- if prop, ok := propertiesMap[t]; ok {
- return prop
+func newProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
+ var hasOneof bool
prop := new(StructProperties)
- // in case of recursive protos, fill this in now.
- propertiesMap[t] = prop
-
- // build properties
- prop.Prop = make([]*Properties, t.NumField())
- prop.order = make([]int, t.NumField())
+ // Construct a list of properties for each field in the struct.
for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
p := new(Properties)
- name := f.Name
- p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+ f := t.Field(i)
+ tagField := f.Tag.Get("protobuf")
+ p.Init(f.Type, f.Name, tagField, &f)
- oneof := f.Tag.Get("protobuf_oneof") // special case
- if oneof != "" {
- // Oneof fields don't use the traditional protobuf tag.
- p.OrigName = oneof
+ tagOneof := f.Tag.Get("protobuf_oneof")
+ if tagOneof != "" {
+ hasOneof = true
+ p.OrigName = tagOneof
}
- prop.Prop[i] = p
- prop.order[i] = i
- if debug {
- print(i, " ", f.Name, " ", t.String(), " ")
- if p.Tag > 0 {
- print(p.String())
- }
- print("\n")
+
+ // Rename unrelated struct fields with the "XXX_" prefix since so much
+ // user code simply checks for this to exclude special fields.
+ if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+ p.Name = "XXX_" + p.Name
+ p.OrigName = "XXX_" + p.OrigName
+ } else if p.Weak != "" {
+ p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
}
+
+ prop.Prop = append(prop.Prop, p)
}
- // Re-order prop.order.
- sort.Sort(prop)
+ // Construct a mapping of oneof field names to properties.
+ if hasOneof {
+ var oneofWrappers []interface{}
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+ }
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+ }
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+ if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+ oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+ }
+ }
- var oots []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oots = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oots = m.XXX_OneofWrappers()
- }
- if len(oots) > 0 {
- // Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
+ for _, wrapper := range oneofWrappers {
+ p := &OneofProperties{
+ Type: reflect.ValueOf(wrapper).Type(), // *T
Prop: new(Properties),
}
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
+ f := p.Type.Elem().Field(0)
+ p.Prop.Name = f.Name
+ p.Prop.Parse(f.Tag.Get("protobuf"))
+
+ // Determine the struct field that contains this oneof.
+ // Each wrapper is assignable to exactly one parent field.
+ var foundOneof bool
+ for i := 0; i < t.NumField() && !foundOneof; i++ {
+ if p.Type.AssignableTo(t.Field(i).Type) {
+ p.Field = i
+ foundOneof = true
}
- if !oop.Type.AssignableTo(f.Type) {
- continue
- }
- oop.Field = i
- break
}
- prop.OneofTypes[oop.Prop.OrigName] = oop
- }
- }
-
- // build required counts
- // build tags
- reqCount := 0
- prop.decoderOrigNames = make(map[string]int)
- for i, p := range prop.Prop {
- if strings.HasPrefix(p.Name, "XXX_") {
- // Internal fields should not appear in tags/origNames maps.
- // They are handled specially when encoding and decoding.
- continue
- }
- if p.Required {
- reqCount++
+ if !foundOneof {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+ prop.OneofTypes[p.Prop.OrigName] = p
}
- prop.decoderTags.put(p.Tag, i)
- prop.decoderOrigNames[p.OrigName] = i
}
- prop.reqCount = reqCount
return prop
}
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
- if _, ok := enumValueMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
- return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
- protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
- protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
- revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
- if _, ok := protoTypedNils[name]; ok {
- // TODO: Some day, make this a panic.
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
- // Generated code always calls RegisterType with nil x.
- // This check is just for extra safety.
- protoTypedNils[name] = x
- } else {
- protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
- }
- revProtoTypes[t] = name
-}
-
-// RegisterMapType is called from generated code and maps from the fully qualified
-// proto name to the native map type of the proto map definition.
-func RegisterMapType(x interface{}, name string) {
- if reflect.TypeOf(x).Kind() != reflect.Map {
- panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
- }
- if _, ok := protoMapTypes[name]; ok {
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- protoMapTypes[name] = t
- revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
- type xname interface {
- XXX_MessageName() string
- }
- if m, ok := x.(xname); ok {
- return m.XXX_MessageName()
- }
- return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-// The type is not guaranteed to implement proto.Message if the name refers to a
-// map entry.
-func MessageType(name string) reflect.Type {
- if t, ok := protoTypedNils[name]; ok {
- return reflect.TypeOf(t)
- }
- return protoMapTypes[name]
-}
-
-// A registry of all linked proto files.
-var (
- protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
- protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
+func (sp *StructProperties) Len() int { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 00000000..5aee89c3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ ProtoPackageIsVersion1 = true
+ ProtoPackageIsVersion2 = true
+ ProtoPackageIsVersion3 = true
+ ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+ return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+ // Marshal formats the encoded bytes of the message.
+ // It should be deterministic and emit valid protobuf wire data.
+ // The caller takes ownership of the returned buffer.
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+ // Unmarshal parses the encoded bytes of the protobuf wire input.
+ // The provided buffer is only valid for during method call.
+ // It should not reset the receiver message.
+ Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+ // Merge merges the contents of src into the receiver message.
+ // It clones all data structures in src such that it aliases no mutable
+ // memory referenced by src.
+ Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+ err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+ if e.err != nil {
+ return e.err.Error()
+ }
+ return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+ if err := protoV2.CheckInitialized(m); err != nil {
+ return &RequiredNotSetError{err: err}
+ }
+ return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+ return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 00000000..1e7ff642
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,323 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+ // Decompress the descriptor.
+ zr, err := gzip.NewReader(bytes.NewReader(d))
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+ b, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+
+ // Construct a protoreflect.FileDescriptor from the raw descriptor.
+ // Note that DescBuilder.Build automatically registers the constructed
+ // file descriptor with the v2 registry.
+ protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+ // Locally cache the raw descriptor form for the file.
+ fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+ if v, ok := fileCache.Load(s); ok {
+ return v.(fileDescGZIP)
+ }
+
+ // Find the descriptor in the v2 registry.
+ var b []byte
+ if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+ if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
+ b = fd.ProtoLegacyRawDesc()
+ } else {
+ // TODO: Use protodesc.ToFileDescriptorProto to construct
+ // a descriptorpb.FileDescriptorProto and marshal it.
+ // However, doing so causes the proto package to have a dependency
+ // on descriptorpb, leading to cyclic dependency issues.
+ }
+ }
+
+ // Locally cache the raw descriptor form for the file.
+ if len(b) > 0 {
+ v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+ return v.(fileDescGZIP)
+ }
+ return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+ if _, ok := enumCache.Load(s); ok {
+ panic("proto: duplicate enum registered: " + s)
+ }
+ enumCache.Store(s, m)
+
+ // This does not forward registration to the v2 registry since this API
+ // lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+
+ // Check whether the cache is stale. If the number of files in the current
+ // package differs, then it means that some enums may have been recently
+ // registered upstream that we do not know about.
+ var protoPkg protoreflect.FullName
+ if i := strings.LastIndexByte(s, '.'); i >= 0 {
+ protoPkg = protoreflect.FullName(s[:i])
+ }
+ v, _ := numFilesCache.Load(protoPkg)
+ numFiles, _ := v.(int)
+ if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+ return nil // cache is up-to-date; was not found earlier
+ }
+
+ // Update the enum cache for all enums declared in the given proto package.
+ numFiles = 0
+ protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+ walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+ name := protoimpl.X.LegacyEnumName(ed)
+ if _, ok := enumCache.Load(name); !ok {
+ m := make(enumsByName)
+ evs := ed.Values()
+ for i := evs.Len() - 1; i >= 0; i-- {
+ ev := evs.Get(i)
+ m[string(ev.Name())] = int32(ev.Number())
+ }
+ enumCache.LoadOrStore(name, m)
+ }
+ })
+ numFiles++
+ return true
+ })
+ numFilesCache.Store(protoPkg, numFiles)
+
+ // Check cache again for enum map.
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+ return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+ Enums() protoreflect.EnumDescriptors
+ Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+ eds := d.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ }
+ mds := d.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ walkEnums(mds.Get(i), f)
+ }
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+ mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+ if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+ messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+ t := reflect.TypeOf(m)
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid map kind: %v", t))
+ }
+ if _, ok := messageTypeCache.Load(s); ok {
+ panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+ }
+ messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+ if v, ok := messageTypeCache.Load(s); ok {
+ return v.(reflect.Type)
+ }
+
+ // Derive the message type from the v2 registry.
+ var t reflect.Type
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+ t = messageGoType(mt)
+ }
+
+ // If we could not get a concrete type, it is possible that it is a
+ // pseudo-message for a map entry.
+ if t == nil {
+ d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+ if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+ kt := goTypeForField(md.Fields().ByNumber(1))
+ vt := goTypeForField(md.Fields().ByNumber(2))
+ t = reflect.MapOf(kt, vt)
+ }
+ }
+
+ // Locally cache the message type for the given name.
+ if t != nil {
+ v, _ := messageTypeCache.LoadOrStore(s, t)
+ return v.(reflect.Type)
+ }
+ return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+ switch k := fd.Kind(); k {
+ case protoreflect.EnumKind:
+ if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+ return enumGoType(et)
+ }
+ return reflect.TypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+ return messageGoType(mt)
+ }
+ return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+ default:
+ return reflect.TypeOf(fd.Default().Interface())
+ }
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+ return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+ return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+ if m == nil {
+ return ""
+ }
+ if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+ return m.XXX_MessageName()
+ }
+ return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+ if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+ panic(err)
+ }
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+ // Check whether the cache is stale. If the number of extensions for
+ // the given message differs, then it means that some extensions were
+ // recently registered upstream that we do not know about.
+ s := MessageName(m)
+ v, _ := extensionCache.Load(s)
+ xs, _ := v.(extensionsByNumber)
+ if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+ return xs // cache is up-to-date
+ }
+
+ // Cache is stale, re-compute the extensions map.
+ xs = make(extensionsByNumber)
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+ if xd, ok := xt.(*ExtensionDesc); ok {
+ xs[int32(xt.TypeDescriptor().Number())] = xd
+ } else {
+ // TODO: This implies that the protoreflect.ExtensionType is a
+ // custom type not generated by protoc-gen-go. We could try and
+ // convert the type to an ExtensionDesc.
+ }
+ return true
+ })
+ extensionCache.Store(s, xs)
+ return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
deleted file mode 100644
index 5cb11fa9..00000000
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ /dev/null
@@ -1,2776 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// a sizer takes a pointer to a field and the size of its tag, computes the size of
-// the encoded data.
-type sizer func(pointer, int) int
-
-// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
-// marshals the field to the end of the slice, returns the slice and error (if any).
-type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
-
-// marshalInfo is the information used for marshaling a message.
-type marshalInfo struct {
- typ reflect.Type
- fields []*marshalFieldInfo
- unrecognized field // offset of XXX_unrecognized
- extensions field // offset of XXX_InternalExtensions
- v1extensions field // offset of XXX_extensions
- sizecache field // offset of XXX_sizecache
- initialized int32 // 0 -- only typ is set, 1 -- fully initialized
- messageset bool // uses message set wire format
- hasmarshaler bool // has custom marshaler
- sync.RWMutex // protect extElems map, also for initialization
- extElems map[int32]*marshalElemInfo // info of extension elements
-}
-
-// marshalFieldInfo is the information used for marshaling a field of a message.
-type marshalFieldInfo struct {
- field field
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isPointer bool
- required bool // field is required
- name string // name of the field, for error reporting
- oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
-}
-
-// marshalElemInfo is the information used for marshaling an extension or oneof element.
-type marshalElemInfo struct {
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
- deref bool // dereference the pointer before operating on it; implies isptr
-}
-
-var (
- marshalInfoMap = map[reflect.Type]*marshalInfo{}
- marshalInfoLock sync.Mutex
-)
-
-// getMarshalInfo returns the information to marshal a given type of message.
-// The info it returns may not necessarily initialized.
-// t is the type of the message (NOT the pointer to it).
-func getMarshalInfo(t reflect.Type) *marshalInfo {
- marshalInfoLock.Lock()
- u, ok := marshalInfoMap[t]
- if !ok {
- u = &marshalInfo{typ: t}
- marshalInfoMap[t] = u
- }
- marshalInfoLock.Unlock()
- return u
-}
-
-// Size is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It computes the size of encoded data of msg.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Size(msg Message) int {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return 0
- }
- return u.size(ptr)
-}
-
-// Marshal is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It marshals msg to the end of b.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return b, ErrNil
- }
- return u.marshal(b, ptr, deterministic)
-}
-
-func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
- // u := a.marshal, but atomically.
- // We use an atomic here to ensure memory consistency.
- u := atomicLoadMarshalInfo(&a.marshal)
- if u == nil {
- // Get marshal information from type of message.
- t := reflect.ValueOf(msg).Type()
- if t.Kind() != reflect.Ptr {
- panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
- }
- u = getMarshalInfo(t.Elem())
- // Store it in the cache for later users.
- // a.marshal = u, but atomically.
- atomicStoreMarshalInfo(&a.marshal, u)
- }
- return u
-}
-
-// size is the main function to compute the size of the encoded data of a message.
-// ptr is the pointer to the message.
-func (u *marshalInfo) size(ptr pointer) int {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b, _ := m.Marshal()
- return len(b)
- }
-
- n := 0
- for _, f := range u.fields {
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- n += f.sizer(ptr.offset(f.field), f.tagsize)
- }
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- n += u.sizeMessageSet(e)
- } else {
- n += u.sizeExtensions(e)
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- n += u.sizeV1Extensions(m)
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- n += len(s)
- }
- // cache the result for use in marshal
- if u.sizecache.IsValid() {
- atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
- }
- return n
-}
-
-// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
-// fall back to compute the size.
-func (u *marshalInfo) cachedsize(ptr pointer) int {
- if u.sizecache.IsValid() {
- return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
- }
- return u.size(ptr)
-}
-
-// marshal is the main function to marshal a message. It takes a byte slice and appends
-// the encoded data to the end of the slice, returns the slice and error (if any).
-// ptr is the pointer to the message.
-// If deterministic is true, map is marshaled in deterministic order.
-func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b1, err := m.Marshal()
- b = append(b, b1...)
- return b, err
- }
-
- var err, errLater error
- // The old marshaler encodes extensions at beginning.
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- b, err = u.appendMessageSet(b, e, deterministic)
- } else {
- b, err = u.appendExtensions(b, e, deterministic)
- }
- if err != nil {
- return b, err
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- b, err = u.appendV1Extensions(b, m, deterministic)
- if err != nil {
- return b, err
- }
- }
- for _, f := range u.fields {
- if f.required {
- if ptr.offset(f.field).getPointer().isNil() {
- // Required field is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name}
- }
- continue
- }
- }
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
- if err != nil {
- if err1, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name + "." + err1.field}
- }
- continue
- }
- if err == errRepeatedHasNil {
- err = errors.New("proto: repeated field " + f.name + " has nil element")
- }
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return b, err
- }
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- b = append(b, s...)
- }
- return b, errLater
-}
-
-// computeMarshalInfo initializes the marshal info.
-func (u *marshalInfo) computeMarshalInfo() {
- u.Lock()
- defer u.Unlock()
- if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
- return
- }
-
- t := u.typ
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.v1extensions = invalidField
- u.sizecache = invalidField
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if reflect.PtrTo(t).Implements(marshalerType) {
- u.hasmarshaler = true
- atomic.StoreInt32(&u.initialized, 1)
- return
- }
-
- // get oneof implementers
- var oneofImplementers []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
-
- n := t.NumField()
-
- // deal with XXX fields first
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if !strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- switch f.Name {
- case "XXX_sizecache":
- u.sizecache = toField(&f)
- case "XXX_unrecognized":
- u.unrecognized = toField(&f)
- case "XXX_InternalExtensions":
- u.extensions = toField(&f)
- u.messageset = f.Tag.Get("protobuf_messageset") == "1"
- case "XXX_extensions":
- u.v1extensions = toField(&f)
- case "XXX_NoUnkeyedLiteral":
- // nothing to do
- default:
- panic("unknown XXX field: " + f.Name)
- }
- n--
- }
-
- // normal fields
- fields := make([]marshalFieldInfo, n) // batch allocation
- u.fields = make([]*marshalFieldInfo, 0, n)
- for i, j := 0, 0; i < t.NumField(); i++ {
- f := t.Field(i)
-
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- field := &fields[j]
- j++
- field.name = f.Name
- u.fields = append(u.fields, field)
- if f.Tag.Get("protobuf_oneof") != "" {
- field.computeOneofFieldInfo(&f, oneofImplementers)
- continue
- }
- if f.Tag.Get("protobuf") == "" {
- // field has no tag (not in generated message), ignore it
- u.fields = u.fields[:len(u.fields)-1]
- j--
- continue
- }
- field.computeMarshalFieldInfo(&f)
- }
-
- // fields are marshaled in tag order on the wire.
- sort.Sort(byTag(u.fields))
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// helper for sorting fields by tag
-type byTag []*marshalFieldInfo
-
-func (a byTag) Len() int { return len(a) }
-func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
-
-// getExtElemInfo returns the information to marshal an extension element.
-// The info it returns is initialized.
-func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
- // get from cache first
- u.RLock()
- e, ok := u.extElems[desc.Field]
- u.RUnlock()
- if ok {
- return e
- }
-
- t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
- tags := strings.Split(desc.Tag, ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
- t = t.Elem()
- }
- sizer, marshaler := typeMarshaler(t, tags, false, false)
- var deref bool
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- t = reflect.PtrTo(t)
- deref = true
- }
- e = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizer,
- marshaler: marshaler,
- isptr: t.Kind() == reflect.Ptr,
- deref: deref,
- }
-
- // update cache
- u.Lock()
- if u.extElems == nil {
- u.extElems = make(map[int32]*marshalElemInfo)
- }
- u.extElems[desc.Field] = e
- u.Unlock()
- return e
-}
-
-// computeMarshalFieldInfo fills up the information to marshal a field.
-func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
- // parse protobuf tag of the field.
- // tag has format of "bytes,49,opt,name=foo,def=hello!"
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- if tags[0] == "" {
- return
- }
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if tags[2] == "req" {
- fi.required = true
- }
- fi.setTag(f, tag, wt)
- fi.setMarshaler(f, tags)
-}
-
-func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
- fi.field = toField(f)
- fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
- fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
-
- ityp := f.Type // interface type
- for _, o := range oneofImplementers {
- t := reflect.TypeOf(o)
- if !t.Implements(ityp) {
- continue
- }
- sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
- tags := strings.Split(sf.Tag.Get("protobuf"), ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
- fi.oneofElems[t.Elem()] = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizer,
- marshaler: marshaler,
- }
- }
-}
-
-// wiretype returns the wire encoding of the type.
-func wiretype(encoding string) uint64 {
- switch encoding {
- case "fixed32":
- return WireFixed32
- case "fixed64":
- return WireFixed64
- case "varint", "zigzag32", "zigzag64":
- return WireVarint
- case "bytes":
- return WireBytes
- case "group":
- return WireStartGroup
- }
- panic("unknown wire type " + encoding)
-}
-
-// setTag fills up the tag (in wire format) and its size in the info of a field.
-func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
- fi.field = toField(f)
- fi.wiretag = uint64(tag)<<3 | wt
- fi.tagsize = SizeVarint(uint64(tag) << 3)
-}
-
-// setMarshaler fills up the sizer and marshaler in the info of a field.
-func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
- switch f.Type.Kind() {
- case reflect.Map:
- // map field
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeMapMarshaler(f)
- return
- case reflect.Ptr, reflect.Slice:
- fi.isPointer = true
- }
- fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
-}
-
-// typeMarshaler returns the sizer and marshaler of a given field.
-// t is the type of the field.
-// tags is the generated "protobuf" tag of the field.
-// If nozero is true, zero value is not marshaled to the wire.
-// If oneof is true, it is a oneof field.
-func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
- encoding := tags[0]
-
- pointer := false
- slice := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- packed := false
- proto3 := false
- validateUTF8 := true
- for i := 2; i < len(tags); i++ {
- if tags[i] == "packed" {
- packed = true
- }
- if tags[i] == "proto3" {
- proto3 = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return sizeBoolPtr, appendBoolPtr
- }
- if slice {
- if packed {
- return sizeBoolPackedSlice, appendBoolPackedSlice
- }
- return sizeBoolSlice, appendBoolSlice
- }
- if nozero {
- return sizeBoolValueNoZero, appendBoolValueNoZero
- }
- return sizeBoolValue, appendBoolValue
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixed32Ptr, appendFixed32Ptr
- }
- if slice {
- if packed {
- return sizeFixed32PackedSlice, appendFixed32PackedSlice
- }
- return sizeFixed32Slice, appendFixed32Slice
- }
- if nozero {
- return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
- }
- return sizeFixed32Value, appendFixed32Value
- case "varint":
- if pointer {
- return sizeVarint32Ptr, appendVarint32Ptr
- }
- if slice {
- if packed {
- return sizeVarint32PackedSlice, appendVarint32PackedSlice
- }
- return sizeVarint32Slice, appendVarint32Slice
- }
- if nozero {
- return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
- }
- return sizeVarint32Value, appendVarint32Value
- }
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixedS32Ptr, appendFixedS32Ptr
- }
- if slice {
- if packed {
- return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
- }
- return sizeFixedS32Slice, appendFixedS32Slice
- }
- if nozero {
- return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
- }
- return sizeFixedS32Value, appendFixedS32Value
- case "varint":
- if pointer {
- return sizeVarintS32Ptr, appendVarintS32Ptr
- }
- if slice {
- if packed {
- return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
- }
- return sizeVarintS32Slice, appendVarintS32Slice
- }
- if nozero {
- return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
- }
- return sizeVarintS32Value, appendVarintS32Value
- case "zigzag32":
- if pointer {
- return sizeZigzag32Ptr, appendZigzag32Ptr
- }
- if slice {
- if packed {
- return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
- }
- return sizeZigzag32Slice, appendZigzag32Slice
- }
- if nozero {
- return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
- }
- return sizeZigzag32Value, appendZigzag32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixed64Ptr, appendFixed64Ptr
- }
- if slice {
- if packed {
- return sizeFixed64PackedSlice, appendFixed64PackedSlice
- }
- return sizeFixed64Slice, appendFixed64Slice
- }
- if nozero {
- return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
- }
- return sizeFixed64Value, appendFixed64Value
- case "varint":
- if pointer {
- return sizeVarint64Ptr, appendVarint64Ptr
- }
- if slice {
- if packed {
- return sizeVarint64PackedSlice, appendVarint64PackedSlice
- }
- return sizeVarint64Slice, appendVarint64Slice
- }
- if nozero {
- return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
- }
- return sizeVarint64Value, appendVarint64Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixedS64Ptr, appendFixedS64Ptr
- }
- if slice {
- if packed {
- return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
- }
- return sizeFixedS64Slice, appendFixedS64Slice
- }
- if nozero {
- return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
- }
- return sizeFixedS64Value, appendFixedS64Value
- case "varint":
- if pointer {
- return sizeVarintS64Ptr, appendVarintS64Ptr
- }
- if slice {
- if packed {
- return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
- }
- return sizeVarintS64Slice, appendVarintS64Slice
- }
- if nozero {
- return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
- }
- return sizeVarintS64Value, appendVarintS64Value
- case "zigzag64":
- if pointer {
- return sizeZigzag64Ptr, appendZigzag64Ptr
- }
- if slice {
- if packed {
- return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
- }
- return sizeZigzag64Slice, appendZigzag64Slice
- }
- if nozero {
- return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
- }
- return sizeZigzag64Value, appendZigzag64Value
- }
- case reflect.Float32:
- if pointer {
- return sizeFloat32Ptr, appendFloat32Ptr
- }
- if slice {
- if packed {
- return sizeFloat32PackedSlice, appendFloat32PackedSlice
- }
- return sizeFloat32Slice, appendFloat32Slice
- }
- if nozero {
- return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
- }
- return sizeFloat32Value, appendFloat32Value
- case reflect.Float64:
- if pointer {
- return sizeFloat64Ptr, appendFloat64Ptr
- }
- if slice {
- if packed {
- return sizeFloat64PackedSlice, appendFloat64PackedSlice
- }
- return sizeFloat64Slice, appendFloat64Slice
- }
- if nozero {
- return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
- }
- return sizeFloat64Value, appendFloat64Value
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return sizeStringPtr, appendUTF8StringPtr
- }
- if slice {
- return sizeStringSlice, appendUTF8StringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendUTF8StringValueNoZero
- }
- return sizeStringValue, appendUTF8StringValue
- }
- if pointer {
- return sizeStringPtr, appendStringPtr
- }
- if slice {
- return sizeStringSlice, appendStringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendStringValueNoZero
- }
- return sizeStringValue, appendStringValue
- case reflect.Slice:
- if slice {
- return sizeBytesSlice, appendBytesSlice
- }
- if oneof {
- // Oneof bytes field may also have "proto3" tag.
- // We want to marshal it as a oneof field. Do this
- // check before the proto3 check.
- return sizeBytesOneof, appendBytesOneof
- }
- if proto3 {
- return sizeBytes3, appendBytes3
- }
- return sizeBytes, appendBytes
- case reflect.Struct:
- switch encoding {
- case "group":
- if slice {
- return makeGroupSliceMarshaler(getMarshalInfo(t))
- }
- return makeGroupMarshaler(getMarshalInfo(t))
- case "bytes":
- if slice {
- return makeMessageSliceMarshaler(getMarshalInfo(t))
- }
- return makeMessageMarshaler(getMarshalInfo(t))
- }
- }
- panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
-}
-
-// Below are functions to size/marshal a specific type of a field.
-// They are stored in the field's info, and called by function pointers.
-// They have type sizer or marshaler.
-
-func sizeFixed32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixedS32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFloat32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixed64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFixedS64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFloat64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeVarint32Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarint32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarint64Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(*p) + tagsize
-}
-func sizeVarint64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(v) + tagsize
- }
- return n
-}
-func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
- }
- return n
-}
-func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
- }
- return n
-}
-func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeBoolValue(_ pointer, tagsize int) int {
- return 1 + tagsize
-}
-func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toBool()
- if !v {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolPtr(ptr pointer, tagsize int) int {
- p := *ptr.toBoolPtr()
- if p == nil {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- return (1 + tagsize) * len(s)
-}
-func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return 0
- }
- return len(s) + SizeVarint(uint64(len(s))) + tagsize
-}
-func sizeStringValue(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- if v == "" {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringPtr(ptr pointer, tagsize int) int {
- p := *ptr.toStringPtr()
- if p == nil {
- return 0
- }
- v := *p
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringSlice(ptr pointer, tagsize int) int {
- s := *ptr.toStringSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-func sizeBytes(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if v == nil {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytes3(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesOneof(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBytesSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-
-// appendFixed32 appends an encoded fixed32 to b.
-func appendFixed32(b []byte, v uint32) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24))
- return b
-}
-
-// appendFixed64 appends an encoded fixed64 to b.
-func appendFixed64(b []byte, v uint64) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24),
- byte(v>>32),
- byte(v>>40),
- byte(v>>48),
- byte(v>>56))
- return b
-}
-
-// appendVarint appends an encoded varint to b.
-func appendVarint(b []byte, v uint64) []byte {
- // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
- // have non-leaf inliner.
- switch {
- case v < 1<<7:
- b = append(b, byte(v))
- case v < 1<<14:
- b = append(b,
- byte(v&0x7f|0x80),
- byte(v>>7))
- case v < 1<<21:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte(v>>14))
- case v < 1<<28:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte(v>>21))
- case v < 1<<35:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte(v>>28))
- case v < 1<<42:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte(v>>35))
- case v < 1<<49:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte(v>>42))
- case v < 1<<56:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte(v>>49))
- case v < 1<<63:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte(v>>56))
- default:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte((v>>56)&0x7f|0x80),
- 1)
- }
- return b
-}
-
-func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, *p)
- return b, nil
-}
-func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(*p))
- return b, nil
-}
-func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(*p))
- return b, nil
-}
-func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, *p)
- return b, nil
-}
-func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(*p))
- return b, nil
-}
-func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(*p))
- return b, nil
-}
-func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, *p)
- return b, nil
-}
-func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- if !v {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = append(b, 1)
- return b, nil
-}
-
-func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toBoolPtr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- if *p {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(len(s)))
- for _, v := range s {
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toStringSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- s := *ptr.toStringSlice()
- for _, v := range s {
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if v == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBytesSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-
-// makeGroupMarshaler returns the sizer and marshaler for a group.
-// u is the marshal info of the underlying message.
-func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- return u.size(p) + 2*tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- var err error
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, p, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- return b, err
- }
-}
-
-// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
-// u is the marshal info of the underlying message.
-func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- n += u.size(v) + 2*tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, v, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMessageMarshaler returns the sizer and marshaler for a message field.
-// u is the marshal info of the message.
-func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.size(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(p)
- b = appendVarint(b, uint64(siz))
- return u.marshal(b, p, deterministic)
- }
-}
-
-// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
-// u is the marshal info of the message.
-func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- siz := u.size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(v)
- b = appendVarint(b, uint64(siz))
- b, err = u.marshal(b, v, deterministic)
-
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMapMarshaler returns the sizer and marshaler for a map field.
-// f is the pointer to the reflect data structure of the field.
-func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
- // figure out key and value type
- t := f.Type
- keyType := t.Key()
- valType := t.Elem()
- keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
- valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
- keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
- valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
- keyWireTag := 1<<3 | wiretype(keyTags[0])
- valWireTag := 2<<3 | wiretype(valTags[0])
-
- // We create an interface to get the addresses of the map key and value.
- // If value is pointer-typed, the interface is a direct interface, the
- // idata itself is the value. Otherwise, the idata is the pointer to the
- // value.
- // Key cannot be pointer-typed.
- valIsPtr := valType.Kind() == reflect.Ptr
-
- // If value is a message with nested maps, calling
- // valSizer in marshal may be quadratic. We should use
- // cached version in marshal (but not in size).
- // If value is not message type, we don't have size cache,
- // but it cannot be nested either. Just use valSizer.
- valCachedSizer := valSizer
- if valIsPtr && valType.Elem().Kind() == reflect.Struct {
- u := getMarshalInfo(valType.Elem())
- valCachedSizer = func(ptr pointer, tagsize int) int {
- // Same as message sizer, but use cache.
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.cachedsize(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- }
- }
- return func(ptr pointer, tagsize int) int {
- m := ptr.asPointerTo(t).Elem() // the map
- n := 0
- for _, k := range m.MapKeys() {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
- siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
- m := ptr.asPointerTo(t).Elem() // the map
- var err error
- keys := m.MapKeys()
- if len(keys) > 1 && deterministic {
- sort.Sort(mapKeys(keys))
- }
-
- var nerr nonFatal
- for _, k := range keys {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
- b = appendVarint(b, tag)
- siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- b = appendVarint(b, uint64(siz))
- b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
- if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
-// fi is the marshal info of the field.
-// f is the pointer to the reflect data structure of the field.
-func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
- // Oneof field is an interface. We need to get the actual data type on the fly.
- t := f.Type
- return func(ptr pointer, _ int) int {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return 0
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- e := fi.oneofElems[telem]
- return e.sizer(p, e.tagsize)
- },
- func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return b, nil
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
- return b, errOneofHasNil
- }
- e := fi.oneofElems[telem]
- return e.marshaler(b, p, e.wiretag, deterministic)
- }
-}
-
-// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
-func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, ei.tagsize)
- }
- mu.Unlock()
- return n
-}
-
-// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
-func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- // Not sure this is required, but the old code does it.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// message set format is:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-
-// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
-// in message set format (above).
-func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for id, e := range m {
- n += 2 // start group, end group. tag = 1 (size=1)
- n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- siz := len(msgWithLen)
- n += siz + 1 // message, tag = 3 (size=1)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, 1) // message, tag = 3 (size=1)
- }
- mu.Unlock()
- return n
-}
-
-// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
-// to the end of byte slice b.
-func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for id, e := range m {
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b = append(b, 1<<3|WireEndGroup)
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, id := range keys {
- e := m[int32(id)]
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- b = append(b, 1<<3|WireEndGroup)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
-func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
- if m == nil {
- return 0
- }
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- n += ei.sizer(p, ei.tagsize)
- }
- return n
-}
-
-// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
-func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
- if m == nil {
- return b, nil
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- var err error
- var nerr nonFatal
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr, ei.deref)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// newMarshaler is the interface representing objects that can marshal themselves.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newMarshaler interface {
- XXX_Size() int
- XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
-}
-
-// Size returns the encoded size of a protocol buffer message.
-// This is the main entry point.
-func Size(pb Message) int {
- if m, ok := pb.(newMarshaler); ok {
- return m.XXX_Size()
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, _ := m.Marshal()
- return len(b)
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return 0
- }
- var info InternalMessageInfo
- return info.Size(pb)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, returning the data.
-// This is the main entry point.
-func Marshal(pb Message) ([]byte, error) {
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- b := make([]byte, 0, siz)
- return m.XXX_Marshal(b, false)
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- return m.Marshal()
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return nil, ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- b := make([]byte, 0, siz)
- return info.Marshal(b, pb, false)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-// This is an alternative entry point. It is not necessary to use
-// a Buffer for most applications.
-func (p *Buffer) Marshal(pb Message) error {
- var err error
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
- return err
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, err := m.Marshal()
- p.buf = append(p.buf, b...)
- return err
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
- return err
-}
-
-// grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-func (p *Buffer) grow(n int) {
- need := len(p.buf) + n
- if need <= cap(p.buf) {
- return
- }
- newCap := len(p.buf) * 2
- if newCap < need {
- newCap = need
- }
- p.buf = append(make([]byte, 0, newCap), p.buf...)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
deleted file mode 100644
index 5525def6..00000000
--- a/vendor/github.com/golang/protobuf/proto/table_merge.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-// Merge merges the src message into dst.
-// This assumes that dst and src of the same type and are non-nil.
-func (a *InternalMessageInfo) Merge(dst, src Message) {
- mi := atomicLoadMergeInfo(&a.merge)
- if mi == nil {
- mi = getMergeInfo(reflect.TypeOf(dst).Elem())
- atomicStoreMergeInfo(&a.merge, mi)
- }
- mi.merge(toPointer(&dst), toPointer(&src))
-}
-
-type mergeInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []mergeFieldInfo
- unrecognized field // Offset of XXX_unrecognized
-}
-
-type mergeFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
-
- // isPointer reports whether the value in the field is a pointer.
- // This is true for the following situations:
- // * Pointer to struct
- // * Pointer to basic type (proto2 only)
- // * Slice (first value in slice header is a pointer)
- // * String (first value in string header is a pointer)
- isPointer bool
-
- // basicWidth reports the width of the field assuming that it is directly
- // embedded in the struct (as is the case for basic types in proto3).
- // The possible values are:
- // 0: invalid
- // 1: bool
- // 4: int32, uint32, float32
- // 8: int64, uint64, float64
- basicWidth int
-
- // Where dst and src are pointers to the types being merged.
- merge func(dst, src pointer)
-}
-
-var (
- mergeInfoMap = map[reflect.Type]*mergeInfo{}
- mergeInfoLock sync.Mutex
-)
-
-func getMergeInfo(t reflect.Type) *mergeInfo {
- mergeInfoLock.Lock()
- defer mergeInfoLock.Unlock()
- mi := mergeInfoMap[t]
- if mi == nil {
- mi = &mergeInfo{typ: t}
- mergeInfoMap[t] = mi
- }
- return mi
-}
-
-// merge merges src into dst assuming they are both of type *mi.typ.
-func (mi *mergeInfo) merge(dst, src pointer) {
- if dst.isNil() {
- panic("proto: nil destination")
- }
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&mi.initialized) == 0 {
- mi.computeMergeInfo()
- }
-
- for _, fi := range mi.fields {
- sfp := src.offset(fi.field)
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
- continue
- }
- if fi.basicWidth > 0 {
- switch {
- case fi.basicWidth == 1 && !*sfp.toBool():
- continue
- case fi.basicWidth == 4 && *sfp.toUint32() == 0:
- continue
- case fi.basicWidth == 8 && *sfp.toUint64() == 0:
- continue
- }
- }
- }
-
- dfp := dst.offset(fi.field)
- fi.merge(dfp, sfp)
- }
-
- // TODO: Make this faster?
- out := dst.asPointerTo(mi.typ).Elem()
- in := src.asPointerTo(mi.typ).Elem()
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- if mi.unrecognized.IsValid() {
- if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
- *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
- }
- }
-}
-
-func (mi *mergeInfo) computeMergeInfo() {
- mi.lock.Lock()
- defer mi.lock.Unlock()
- if mi.initialized != 0 {
- return
- }
- t := mi.typ
- n := t.NumField()
-
- props := GetProperties(t)
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- mfi := mergeFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- switch tf.Kind() {
- case reflect.Ptr, reflect.Slice, reflect.String:
- // As a special case, we assume slices and strings are pointers
- // since we know that the first field in the SliceSlice or
- // StringHeader is a data pointer.
- mfi.isPointer = true
- case reflect.Bool:
- mfi.basicWidth = 1
- case reflect.Int32, reflect.Uint32, reflect.Float32:
- mfi.basicWidth = 4
- case reflect.Int64, reflect.Uint64, reflect.Float64:
- mfi.basicWidth = 8
- }
- }
-
- // Unwrap tf to get at its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + tf.Name())
- }
-
- switch tf.Kind() {
- case reflect.Int32:
- switch {
- case isSlice: // E.g., []int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
- /*
- sfsp := src.toInt32Slice()
- if *sfsp != nil {
- dfsp := dst.toInt32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- */
- sfs := src.getInt32Slice()
- if sfs != nil {
- dfs := dst.getInt32Slice()
- dfs = append(dfs, sfs...)
- if dfs == nil {
- dfs = []int32{}
- }
- dst.setInt32Slice(dfs)
- }
- }
- case isPointer: // E.g., *int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
- /*
- sfpp := src.toInt32Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt32Ptr()
- if *dfpp == nil {
- *dfpp = Int32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- */
- sfp := src.getInt32Ptr()
- if sfp != nil {
- dfp := dst.getInt32Ptr()
- if dfp == nil {
- dst.setInt32Ptr(*sfp)
- } else {
- *dfp = *sfp
- }
- }
- }
- default: // E.g., int32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt32(); v != 0 {
- *dst.toInt32() = v
- }
- }
- }
- case reflect.Int64:
- switch {
- case isSlice: // E.g., []int64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toInt64Slice()
- if *sfsp != nil {
- dfsp := dst.toInt64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- }
- case isPointer: // E.g., *int64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toInt64Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt64Ptr()
- if *dfpp == nil {
- *dfpp = Int64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., int64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt64(); v != 0 {
- *dst.toInt64() = v
- }
- }
- }
- case reflect.Uint32:
- switch {
- case isSlice: // E.g., []uint32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint32Slice()
- if *sfsp != nil {
- dfsp := dst.toUint32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint32{}
- }
- }
- }
- case isPointer: // E.g., *uint32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint32Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint32Ptr()
- if *dfpp == nil {
- *dfpp = Uint32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint32(); v != 0 {
- *dst.toUint32() = v
- }
- }
- }
- case reflect.Uint64:
- switch {
- case isSlice: // E.g., []uint64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint64Slice()
- if *sfsp != nil {
- dfsp := dst.toUint64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint64{}
- }
- }
- }
- case isPointer: // E.g., *uint64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint64Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint64Ptr()
- if *dfpp == nil {
- *dfpp = Uint64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint64(); v != 0 {
- *dst.toUint64() = v
- }
- }
- }
- case reflect.Float32:
- switch {
- case isSlice: // E.g., []float32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat32Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float32{}
- }
- }
- }
- case isPointer: // E.g., *float32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat32Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat32Ptr()
- if *dfpp == nil {
- *dfpp = Float32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat32(); v != 0 {
- *dst.toFloat32() = v
- }
- }
- }
- case reflect.Float64:
- switch {
- case isSlice: // E.g., []float64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat64Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float64{}
- }
- }
- }
- case isPointer: // E.g., *float64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat64Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat64Ptr()
- if *dfpp == nil {
- *dfpp = Float64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat64(); v != 0 {
- *dst.toFloat64() = v
- }
- }
- }
- case reflect.Bool:
- switch {
- case isSlice: // E.g., []bool
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toBoolSlice()
- if *sfsp != nil {
- dfsp := dst.toBoolSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []bool{}
- }
- }
- }
- case isPointer: // E.g., *bool
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toBoolPtr()
- if *sfpp != nil {
- dfpp := dst.toBoolPtr()
- if *dfpp == nil {
- *dfpp = Bool(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., bool
- mfi.merge = func(dst, src pointer) {
- if v := *src.toBool(); v {
- *dst.toBool() = v
- }
- }
- }
- case reflect.String:
- switch {
- case isSlice: // E.g., []string
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toStringSlice()
- if *sfsp != nil {
- dfsp := dst.toStringSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []string{}
- }
- }
- }
- case isPointer: // E.g., *string
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toStringPtr()
- if *sfpp != nil {
- dfpp := dst.toStringPtr()
- if *dfpp == nil {
- *dfpp = String(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., string
- mfi.merge = func(dst, src pointer) {
- if v := *src.toString(); v != "" {
- *dst.toString() = v
- }
- }
- }
- case reflect.Slice:
- isProto3 := props.Prop[i].proto3
- switch {
- case isPointer:
- panic("bad pointer in byte slice case in " + tf.Name())
- case tf.Elem().Kind() != reflect.Uint8:
- panic("bad element kind in byte slice case in " + tf.Name())
- case isSlice: // E.g., [][]byte
- mfi.merge = func(dst, src pointer) {
- sbsp := src.toBytesSlice()
- if *sbsp != nil {
- dbsp := dst.toBytesSlice()
- for _, sb := range *sbsp {
- if sb == nil {
- *dbsp = append(*dbsp, nil)
- } else {
- *dbsp = append(*dbsp, append([]byte{}, sb...))
- }
- }
- if *dbsp == nil {
- *dbsp = [][]byte{}
- }
- }
- }
- default: // E.g., []byte
- mfi.merge = func(dst, src pointer) {
- sbp := src.toBytes()
- if *sbp != nil {
- dbp := dst.toBytes()
- if !isProto3 || len(*sbp) > 0 {
- *dbp = append([]byte{}, *sbp...)
- }
- }
- }
- }
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("message field %s without pointer", tf))
- case isSlice: // E.g., []*pb.T
- mi := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sps := src.getPointerSlice()
- if sps != nil {
- dps := dst.getPointerSlice()
- for _, sp := range sps {
- var dp pointer
- if !sp.isNil() {
- dp = valToPointer(reflect.New(tf))
- mi.merge(dp, sp)
- }
- dps = append(dps, dp)
- }
- if dps == nil {
- dps = []pointer{}
- }
- dst.setPointerSlice(dps)
- }
- }
- default: // E.g., *pb.T
- mi := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- dp := dst.getPointer()
- if dp.isNil() {
- dp = valToPointer(reflect.New(tf))
- dst.setPointer(dp)
- }
- mi.merge(dp, sp)
- }
- }
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in map case in " + tf.Name())
- default: // E.g., map[K]V
- mfi.merge = func(dst, src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- dm := dst.asPointerTo(tf).Elem()
- if dm.IsNil() {
- dm.Set(reflect.MakeMap(tf))
- }
-
- switch tf.Elem().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(Clone(val.Interface().(Message)))
- dm.SetMapIndex(key, val)
- }
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- dm.SetMapIndex(key, val)
- }
- default: // Basic type (e.g., string)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- dm.SetMapIndex(key, val)
- }
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in interface case in " + tf.Name())
- default: // E.g., interface{}
- // TODO: Make this faster?
- mfi.merge = func(dst, src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- du := dst.asPointerTo(tf).Elem()
- typ := su.Elem().Type()
- if du.IsNil() || du.Elem().Type() != typ {
- du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
- }
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- dv := du.Elem().Elem().Field(0)
- if dv.Kind() == reflect.Ptr && dv.IsNil() {
- dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- Merge(dv.Interface().(Message), sv.Interface().(Message))
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
- default: // Basic type (e.g., string)
- dv.Set(sv)
- }
- }
- }
- }
- default:
- panic(fmt.Sprintf("merger not found for type:%s", tf))
- }
- mi.fields = append(mi.fields, mfi)
- }
-
- mi.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- mi.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&mi.initialized, 1)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
deleted file mode 100644
index acee2fc5..00000000
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ /dev/null
@@ -1,2053 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// Unmarshal is the entry point from the generated .pb.go files.
-// This function is not intended to be used by non-generated code.
-// This function is not subject to any compatibility guarantee.
-// msg contains a pointer to a protocol buffer struct.
-// b is the data to be unmarshaled into the protocol buffer.
-// a is a pointer to a place to store cached unmarshal information.
-func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
- // Load the unmarshal information for this message type.
- // The atomic load ensures memory consistency.
- u := atomicLoadUnmarshalInfo(&a.unmarshal)
- if u == nil {
- // Slow path: find unmarshal info for msg, update a with it.
- u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
- atomicStoreUnmarshalInfo(&a.unmarshal, u)
- }
- // Then do the unmarshaling.
- err := u.unmarshal(toPointer(&msg), b)
- return err
-}
-
-type unmarshalInfo struct {
- typ reflect.Type // type of the protobuf struct
-
- // 0 = only typ field is initialized
- // 1 = completely initialized
- initialized int32
- lock sync.Mutex // prevents double initialization
- dense []unmarshalFieldInfo // fields indexed by tag #
- sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
- reqFields []string // names of required fields
- reqMask uint64 // 1< 0 {
- // Read tag and wire type.
- // Special case 1 and 2 byte varints.
- var x uint64
- if b[0] < 128 {
- x = uint64(b[0])
- b = b[1:]
- } else if len(b) >= 2 && b[1] < 128 {
- x = uint64(b[0]&0x7f) + uint64(b[1])<<7
- b = b[2:]
- } else {
- var n int
- x, n = decodeVarint(b)
- if n == 0 {
- return io.ErrUnexpectedEOF
- }
- b = b[n:]
- }
- tag := x >> 3
- wire := int(x) & 7
-
- // Dispatch on the tag to one of the unmarshal* functions below.
- var f unmarshalFieldInfo
- if tag < uint64(len(u.dense)) {
- f = u.dense[tag]
- } else {
- f = u.sparse[tag]
- }
- if fn := f.unmarshal; fn != nil {
- var err error
- b, err = fn(b, m.offset(f.field), wire)
- if err == nil {
- reqMask |= f.reqMask
- continue
- }
- if r, ok := err.(*RequiredNotSetError); ok {
- // Remember this error, but keep parsing. We need to produce
- // a full parse even if a required field is missing.
- if errLater == nil {
- errLater = r
- }
- reqMask |= f.reqMask
- continue
- }
- if err != errInternalBadWireType {
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return err
- }
- // Fragments with bad wire type are treated as unknown fields.
- }
-
- // Unknown tag.
- if !u.unrecognized.IsValid() {
- // Don't keep unrecognized data; just skip it.
- var err error
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- continue
- }
- // Keep unrecognized data around.
- // maybe in extensions, maybe in the unrecognized field.
- z := m.offset(u.unrecognized).toBytes()
- var emap map[int32]Extension
- var e Extension
- for _, r := range u.extensionRanges {
- if uint64(r.Start) <= tag && tag <= uint64(r.End) {
- if u.extensions.IsValid() {
- mp := m.offset(u.extensions).toExtensions()
- emap = mp.extensionsWrite()
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- if u.oldExtensions.IsValid() {
- p := m.offset(u.oldExtensions).toOldExtensions()
- emap = *p
- if emap == nil {
- emap = map[int32]Extension{}
- *p = emap
- }
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- panic("no extensions field available")
- }
- }
-
- // Use wire type to skip data.
- var err error
- b0 := b
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- *z = encodeVarint(*z, tag<<3|uint64(wire))
- *z = append(*z, b0[:len(b0)-len(b)]...)
-
- if emap != nil {
- emap[int32(tag)] = e
- }
- }
- if reqMask != u.reqMask && errLater == nil {
- // A required field of this message is missing.
- for _, n := range u.reqFields {
- if reqMask&1 == 0 {
- errLater = &RequiredNotSetError{n}
- }
- reqMask >>= 1
- }
- }
- return errLater
-}
-
-// computeUnmarshalInfo fills in u with information for use
-// in unmarshaling protocol buffers of type u.typ.
-func (u *unmarshalInfo) computeUnmarshalInfo() {
- u.lock.Lock()
- defer u.lock.Unlock()
- if u.initialized != 0 {
- return
- }
- t := u.typ
- n := t.NumField()
-
- // Set up the "not found" value for the unrecognized byte buffer.
- // This is the default for proto3.
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.oldExtensions = invalidField
-
- // List of the generated type and offset for each oneof field.
- type oneofField struct {
- ityp reflect.Type // interface type of oneof field
- field field // offset in containing message
- }
- var oneofFields []oneofField
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if f.Name == "XXX_unrecognized" {
- // The byte slice used to hold unrecognized input is special.
- if f.Type != reflect.TypeOf(([]byte)(nil)) {
- panic("bad type for XXX_unrecognized field: " + f.Type.Name())
- }
- u.unrecognized = toField(&f)
- continue
- }
- if f.Name == "XXX_InternalExtensions" {
- // Ditto here.
- if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
- panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
- }
- u.extensions = toField(&f)
- if f.Tag.Get("protobuf_messageset") == "1" {
- u.isMessageSet = true
- }
- continue
- }
- if f.Name == "XXX_extensions" {
- // An older form of the extensions field.
- if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
- panic("bad type for XXX_extensions field: " + f.Type.Name())
- }
- u.oldExtensions = toField(&f)
- continue
- }
- if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
- continue
- }
-
- oneof := f.Tag.Get("protobuf_oneof")
- if oneof != "" {
- oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
- // The rest of oneof processing happens below.
- continue
- }
-
- tags := f.Tag.Get("protobuf")
- tagArray := strings.Split(tags, ",")
- if len(tagArray) < 2 {
- panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
- }
- tag, err := strconv.Atoi(tagArray[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tagArray[1])
- }
-
- name := ""
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- }
-
- // Extract unmarshaling function from the field (its type and tags).
- unmarshal := fieldUnmarshaler(&f)
-
- // Required field?
- var reqMask uint64
- if tagArray[2] == "req" {
- bit := len(u.reqFields)
- u.reqFields = append(u.reqFields, name)
- reqMask = uint64(1) << uint(bit)
- // TODO: if we have more than 64 required fields, we end up
- // not verifying that all required fields are present.
- // Fix this, perhaps using a count of required fields?
- }
-
- // Store the info in the correct slot in the message.
- u.setTag(tag, toField(&f), unmarshal, reqMask, name)
- }
-
- // Find any types associated with oneof fields.
- var oneofImplementers []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
- for _, v := range oneofImplementers {
- tptr := reflect.TypeOf(v) // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- fieldNum, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tags[1])
- }
- var name string
- for _, tag := range tags {
- if strings.HasPrefix(tag, "name=") {
- name = strings.TrimPrefix(tag, "name=")
- break
- }
- }
-
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(fieldNum, of.field, unmarshal, 0, name)
- }
- }
-
- }
-
- // Get extension ranges, if any.
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
- if fn.IsValid() {
- if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
- panic("a message with extensions, but no extensions field in " + t.Name())
- }
- u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
- }
-
- // Explicitly disallow tag 0. This will ensure we flag an error
- // when decoding a buffer of all zeros. Without this code, we
- // would decode and skip an all-zero buffer of even length.
- // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
- u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
- return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
- }, 0, "")
-
- // Set mask for required field check.
- u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
- for len(u.dense) <= tag {
- u.dense = append(u.dense, unmarshalFieldInfo{})
- }
- u.dense[tag] = i
- return
- }
- if u.sparse == nil {
- u.sparse = map[uint64]unmarshalFieldInfo{}
- }
- u.sparse[uint64(tag)] = i
-}
-
-// fieldUnmarshaler returns an unmarshaler for the given field.
-func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
- if f.Type.Kind() == reflect.Map {
- return makeUnmarshalMap(f)
- }
- return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
-}
-
-// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
-func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
- tagArray := strings.Split(tags, ",")
- encoding := tagArray[0]
- name := "unknown"
- proto3 := false
- validateUTF8 := true
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- if tag == "proto3" {
- proto3 = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- // Figure out packaging (pointer, slice, or both)
- slice := false
- pointer := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- // We'll never have both pointer and slice for basic types.
- if pointer && slice && t.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + t.Name())
- }
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return unmarshalBoolPtr
- }
- if slice {
- return unmarshalBoolSlice
- }
- return unmarshalBoolValue
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixedS32Ptr
- }
- if slice {
- return unmarshalFixedS32Slice
- }
- return unmarshalFixedS32Value
- case "varint":
- // this could be int32 or enum
- if pointer {
- return unmarshalInt32Ptr
- }
- if slice {
- return unmarshalInt32Slice
- }
- return unmarshalInt32Value
- case "zigzag32":
- if pointer {
- return unmarshalSint32Ptr
- }
- if slice {
- return unmarshalSint32Slice
- }
- return unmarshalSint32Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixedS64Ptr
- }
- if slice {
- return unmarshalFixedS64Slice
- }
- return unmarshalFixedS64Value
- case "varint":
- if pointer {
- return unmarshalInt64Ptr
- }
- if slice {
- return unmarshalInt64Slice
- }
- return unmarshalInt64Value
- case "zigzag64":
- if pointer {
- return unmarshalSint64Ptr
- }
- if slice {
- return unmarshalSint64Slice
- }
- return unmarshalSint64Value
- }
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixed32Ptr
- }
- if slice {
- return unmarshalFixed32Slice
- }
- return unmarshalFixed32Value
- case "varint":
- if pointer {
- return unmarshalUint32Ptr
- }
- if slice {
- return unmarshalUint32Slice
- }
- return unmarshalUint32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixed64Ptr
- }
- if slice {
- return unmarshalFixed64Slice
- }
- return unmarshalFixed64Value
- case "varint":
- if pointer {
- return unmarshalUint64Ptr
- }
- if slice {
- return unmarshalUint64Slice
- }
- return unmarshalUint64Value
- }
- case reflect.Float32:
- if pointer {
- return unmarshalFloat32Ptr
- }
- if slice {
- return unmarshalFloat32Slice
- }
- return unmarshalFloat32Value
- case reflect.Float64:
- if pointer {
- return unmarshalFloat64Ptr
- }
- if slice {
- return unmarshalFloat64Slice
- }
- return unmarshalFloat64Value
- case reflect.Map:
- panic("map type in typeUnmarshaler in " + t.Name())
- case reflect.Slice:
- if pointer {
- panic("bad pointer in slice case in " + t.Name())
- }
- if slice {
- return unmarshalBytesSlice
- }
- return unmarshalBytesValue
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return unmarshalUTF8StringPtr
- }
- if slice {
- return unmarshalUTF8StringSlice
- }
- return unmarshalUTF8StringValue
- }
- if pointer {
- return unmarshalStringPtr
- }
- if slice {
- return unmarshalStringSlice
- }
- return unmarshalStringValue
- case reflect.Struct:
- // message or group field
- if !pointer {
- panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
- }
- switch encoding {
- case "bytes":
- if slice {
- return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
- case "group":
- if slice {
- return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
- }
- }
- panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
-}
-
-// Below are all the unmarshalers for individual fields of various types.
-
-func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64() = v
- return b, nil
-}
-
-func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32() = v
- return b, nil
-}
-
-func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64() = v
- return b[8:], nil
-}
-
-func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64() = v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32() = v
- return b[4:], nil
-}
-
-func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- *f.toInt32() = v
- return b[4:], nil
-}
-
-func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.setInt32Ptr(v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- return b[4:], nil
-}
-
-func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- // Note: any length varint is allowed, even though any sane
- // encoder will use one byte.
- // See https://github.com/golang/protobuf/issues/76
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- // TODO: check if x>1? Tests seem to indicate no.
- v := x != 0
- *f.toBool() = v
- return b[n:], nil
-}
-
-func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- *f.toBoolPtr() = &v
- return b[n:], nil
-}
-
-func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- b = b[n:]
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- return b[n:], nil
-}
-
-func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64() = v
- return b[8:], nil
-}
-
-func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32() = v
- return b[4:], nil
-}
-
-func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- return b[x:], nil
-}
-
-func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- return b[x:], nil
-}
-
-func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-var emptyBuf [0]byte
-
-func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // The use of append here is a trick which avoids the zeroing
- // that would be required if we used a make/copy pair.
- // We append to emptyBuf instead of nil because we want
- // a non-nil result even when the length is 0.
- v := append(emptyBuf[:], b[:x]...)
- *f.toBytes() = v
- return b[x:], nil
-}
-
-func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := append(emptyBuf[:], b[:x]...)
- s := f.toBytesSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // First read the message field to see if something is there.
- // The semantics of multiple submessages are weird. Instead of
- // the last one winning (as it is for all other fields), multiple
- // submessages are merged.
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[x:], err
- }
-}
-
-func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[x:], err
- }
-}
-
-func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[y:], err
- }
-}
-
-func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[y:], err
- }
-}
-
-func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
- t := f.Type
- kt := t.Key()
- vt := t.Elem()
- unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
- unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // The map entry is a submessage. Figure out how big it is.
- if w != WireBytes {
- return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- r := b[x:] // unused data to return
- b = b[:x] // data for map entry
-
- // Note: we could use #keys * #values ~= 200 functions
- // to do map decoding without reflection. Probably not worth it.
- // Maps will be somewhat slow. Oh well.
-
- // Read key and value from data.
- var nerr nonFatal
- k := reflect.New(kt)
- v := reflect.New(vt)
- for len(b) > 0 {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- wire := int(x) & 7
- b = b[n:]
-
- var err error
- switch x >> 3 {
- case 1:
- b, err = unmarshalKey(b, valToPointer(k), wire)
- case 2:
- b, err = unmarshalVal(b, valToPointer(v), wire)
- default:
- err = errInternalBadWireType // skip unknown tag
- }
-
- if nerr.Merge(err) {
- continue
- }
- if err != errInternalBadWireType {
- return nil, err
- }
-
- // Skip past unknown fields.
- b, err = skipField(b, wire)
- if err != nil {
- return nil, err
- }
- }
-
- // Get map, allocate if needed.
- m := f.asPointerTo(t).Elem() // an addressable map[K]T
- if m.IsNil() {
- m.Set(reflect.MakeMap(t))
- }
-
- // Insert into map.
- m.SetMapIndex(k.Elem(), v.Elem())
-
- return r, nerr.E
- }
-}
-
-// makeUnmarshalOneof makes an unmarshaler for oneof fields.
-// for:
-// message Msg {
-// oneof F {
-// int64 X = 1;
-// float64 Y = 2;
-// }
-// }
-// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
-// ityp is the interface type of the oneof field (e.g. isMsg_F).
-// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
-// Note that this function will be called once for each case in the oneof.
-func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
- sf := typ.Field(0)
- field0 := toField(&sf)
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // Allocate holder for value.
- v := reflect.New(typ)
-
- // Unmarshal data into holder.
- // We unmarshal into the first field of the holder object.
- var err error
- var nerr nonFatal
- b, err = unmarshal(b, valToPointer(v).offset(field0), w)
- if !nerr.Merge(err) {
- return nil, err
- }
-
- // Write pointer to holder into target field.
- f.asPointerTo(ityp).Elem().Set(v)
-
- return b, nerr.E
- }
-}
-
-// Error used by decode internally.
-var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
-
-// skipField skips past a field of type wire and returns the remaining bytes.
-func skipField(b []byte, wire int) ([]byte, error) {
- switch wire {
- case WireVarint:
- _, k := decodeVarint(b)
- if k == 0 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[k:]
- case WireFixed32:
- if len(b) < 4 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[4:]
- case WireFixed64:
- if len(b) < 8 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[8:]
- case WireBytes:
- m, k := decodeVarint(b)
- if k == 0 || uint64(len(b)-k) < m {
- return b, io.ErrUnexpectedEOF
- }
- b = b[uint64(k)+m:]
- case WireStartGroup:
- _, i := findEndGroup(b)
- if i == -1 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[i:]
- default:
- return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
- }
- return b, nil
-}
-
-// findEndGroup finds the index of the next EndGroup tag.
-// Groups may be nested, so the "next" EndGroup tag is the first
-// unpaired EndGroup.
-// findEndGroup returns the indexes of the start and end of the EndGroup tag.
-// Returns (-1,-1) if it can't find one.
-func findEndGroup(b []byte) (int, int) {
- depth := 1
- i := 0
- for {
- x, n := decodeVarint(b[i:])
- if n == 0 {
- return -1, -1
- }
- j := i
- i += n
- switch x & 7 {
- case WireVarint:
- _, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- case WireFixed32:
- if len(b)-4 < i {
- return -1, -1
- }
- i += 4
- case WireFixed64:
- if len(b)-8 < i {
- return -1, -1
- }
- i += 8
- case WireBytes:
- m, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- if uint64(len(b)-i) < m {
- return -1, -1
- }
- i += int(m)
- case WireStartGroup:
- depth++
- case WireEndGroup:
- depth--
- if depth == 0 {
- return j, i
- }
- default:
- return -1, -1
- }
- }
-}
-
-// encodeVarint appends a varint-encoded integer to b and returns the result.
-func encodeVarint(b []byte, x uint64) []byte {
- for x >= 1<<7 {
- b = append(b, byte(x&0x7f|0x80))
- x >>= 7
- }
- return append(b, byte(x))
-}
-
-// decodeVarint reads a varint-encoded integer from b.
-// Returns the decoded integer and the number of bytes read.
-// If there is an error, it returns 0,0.
-func decodeVarint(b []byte) (uint64, int) {
- var x, y uint64
- if len(b) == 0 {
- goto bad
- }
- x = uint64(b[0])
- if x < 0x80 {
- return x, 1
- }
- x -= 0x80
-
- if len(b) <= 1 {
- goto bad
- }
- y = uint64(b[1])
- x += y << 7
- if y < 0x80 {
- return x, 2
- }
- x -= 0x80 << 7
-
- if len(b) <= 2 {
- goto bad
- }
- y = uint64(b[2])
- x += y << 14
- if y < 0x80 {
- return x, 3
- }
- x -= 0x80 << 14
-
- if len(b) <= 3 {
- goto bad
- }
- y = uint64(b[3])
- x += y << 21
- if y < 0x80 {
- return x, 4
- }
- x -= 0x80 << 21
-
- if len(b) <= 4 {
- goto bad
- }
- y = uint64(b[4])
- x += y << 28
- if y < 0x80 {
- return x, 5
- }
- x -= 0x80 << 28
-
- if len(b) <= 5 {
- goto bad
- }
- y = uint64(b[5])
- x += y << 35
- if y < 0x80 {
- return x, 6
- }
- x -= 0x80 << 35
-
- if len(b) <= 6 {
- goto bad
- }
- y = uint64(b[6])
- x += y << 42
- if y < 0x80 {
- return x, 7
- }
- x -= 0x80 << 42
-
- if len(b) <= 7 {
- goto bad
- }
- y = uint64(b[7])
- x += y << 49
- if y < 0x80 {
- return x, 8
- }
- x -= 0x80 << 49
-
- if len(b) <= 8 {
- goto bad
- }
- y = uint64(b[8])
- x += y << 56
- if y < 0x80 {
- return x, 9
- }
- x -= 0x80 << 56
-
- if len(b) <= 9 {
- goto bad
- }
- y = uint64(b[9])
- x += y << 63
- if y < 2 {
- return x, 10
- }
-
-bad:
- return 0, 0
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
deleted file mode 100644
index 1aaee725..00000000
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ /dev/null
@@ -1,843 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
- "bufio"
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "log"
- "math"
- "reflect"
- "sort"
- "strings"
-)
-
-var (
- newline = []byte("\n")
- spaces = []byte(" ")
- endBraceNewline = []byte("}\n")
- backslashN = []byte{'\\', 'n'}
- backslashR = []byte{'\\', 'r'}
- backslashT = []byte{'\\', 't'}
- backslashDQ = []byte{'\\', '"'}
- backslashBS = []byte{'\\', '\\'}
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-type writer interface {
- io.Writer
- WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- ind int
- complete bool // if the current position is a complete line
- compact bool // whether to write out as a one-liner
- w writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
- if !strings.Contains(s, "\n") {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
- return io.WriteString(w.w, s)
- }
- // WriteString is typically called without newlines, so this
- // codepath and its copy are rare. We copy to avoid
- // duplicating all of Write's logic here.
- return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- n, err = w.w.Write(p)
- w.complete = false
- return n, err
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- if err := w.w.WriteByte(' '); err != nil {
- return n, err
- }
- n++
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- if i+1 < len(frags) {
- if err := w.w.WriteByte('\n'); err != nil {
- return n, err
- }
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- err := w.w.WriteByte(c)
- w.complete = c == '\n'
- return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
- if w.ind == 0 {
- log.Print("proto: textWriter unindented too far")
- return
- }
- w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
- if _, err := w.WriteString(props.OrigName); err != nil {
- return err
- }
- if props.Wire != "group" {
- return w.WriteByte(':')
- }
- return nil
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
- type wkt interface {
- XXX_WellKnownType() string
- }
- t, ok := sv.Addr().Interface().(wkt)
- return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
- turl := sv.FieldByName("TypeUrl")
- val := sv.FieldByName("Value")
- if !turl.IsValid() || !val.IsValid() {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- b, ok := val.Interface().([]byte)
- if !ok {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- parts := strings.Split(turl.String(), "/")
- mt := MessageType(parts[len(parts)-1])
- if mt == nil {
- return false, nil
- }
- m := reflect.New(mt.Elem())
- if err := Unmarshal(b, m.Interface().(Message)); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- u := turl.String()
- if requiresQuotes(u) {
- writeString(w, u)
- } else {
- w.Write([]byte(u))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.ind++
- }
- if err := tm.writeStruct(w, m.Elem()); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.ind--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
- if tm.ExpandAny && isAny(sv) {
- if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
- return err
- }
- }
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < sv.NumField(); i++ {
- fv := sv.Field(i)
- props := sprops.Prop[i]
- name := st.Field(i).Name
-
- if name == "XXX_NoUnkeyedLiteral" {
- continue
- }
-
- if strings.HasPrefix(name, "XXX_") {
- // There are two XXX_ fields:
- // XXX_unrecognized []byte
- // XXX_extensions map[int32]proto.Extension
- // The first is handled here;
- // the second is handled at the bottom of this function.
- if name == "XXX_unrecognized" && !fv.IsNil() {
- if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Field not filled in. This could be an optional field or
- // a required field that wasn't filled in. Either way, there
- // isn't anything we can show for it.
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- // Repeated field that is empty, or a bytes field that is unused.
- continue
- }
-
- if props.Repeated && fv.Kind() == reflect.Slice {
- // Repeated field.
- for j := 0; j < fv.Len(); j++ {
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- v := fv.Index(j)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- // A nil message in a repeated field is not valid,
- // but we can handle that more gracefully than panicking.
- if _, err := w.Write([]byte("\n")); err != nil {
- return err
- }
- continue
- }
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Map {
- // Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys()
- sort.Sort(mapKeys(keys))
- for _, key := range keys {
- val := fv.MapIndex(key)
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- // open struct
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- // key
- if _, err := w.WriteString("key:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- // nil values aren't legal, but we can avoid panicking because of them.
- if val.Kind() != reflect.Ptr || !val.IsNil() {
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, val, props.MapValProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- // close struct
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
- // empty bytes field
- continue
- }
- if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
- // proto3 non-repeated scalar field; skip if zero value
- if isProto3Zero(fv) {
- continue
- }
- }
-
- if fv.Kind() == reflect.Interface {
- // Check if it is a oneof.
- if st.Field(i).Tag.Get("protobuf_oneof") != "" {
- // fv is nil, or holds a pointer to generated struct.
- // That generated struct has exactly one field,
- // which has a protobuf struct tag.
- if fv.IsNil() {
- continue
- }
- inner := fv.Elem().Elem() // interface -> *T -> T
- tag := inner.Type().Field(0).Tag.Get("protobuf")
- props = new(Properties) // Overwrite the outer props var, but not its pointee.
- props.Parse(tag)
- // Write the value in the oneof, not the oneof itself.
- fv = inner.Field(0)
-
- // Special case to cope with malformed messages gracefully:
- // If the value in the oneof is a nil pointer, don't panic
- // in writeAny.
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Use errors.New so writeAny won't render quotes.
- msg := errors.New("/* nil */")
- fv = reflect.ValueOf(&msg).Elem()
- }
- }
- }
-
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
-
- // Enums have a String method, so writeAny will work fine.
- if err := tm.writeAny(w, fv, props); err != nil {
- return err
- }
-
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
-
- // Extensions (the XXX_extensions field).
- pv := sv.Addr()
- if _, err := extendable(pv.Interface()); err == nil {
- if err := tm.writeExtensions(w, pv); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
- v = reflect.Indirect(v)
-
- // Floats have special cases.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- x := v.Float()
- var b []byte
- switch {
- case math.IsInf(x, 1):
- b = posInf
- case math.IsInf(x, -1):
- b = negInf
- case math.IsNaN(x):
- b = nan
- }
- if b != nil {
- _, err := w.Write(b)
- return err
- }
- // Other values are handled below.
- }
-
- // We don't attempt to serialise every possible value type; only those
- // that can occur in protocol buffers.
- switch v.Kind() {
- case reflect.Slice:
- // Should only be a []byte; repeated fields are handled in writeStruct.
- if err := writeString(w, string(v.Bytes())); err != nil {
- return err
- }
- case reflect.String:
- if err := writeString(w, v.String()); err != nil {
- return err
- }
- case reflect.Struct:
- // Required/optional group/message.
- var bra, ket byte = '<', '>'
- if props != nil && props.Wire == "group" {
- bra, ket = '{', '}'
- }
- if err := w.WriteByte(bra); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if v.CanAddr() {
- // Calling v.Interface on a struct causes the reflect package to
- // copy the entire struct. This is racy with the new Marshaler
- // since we atomically update the XXX_sizecache.
- //
- // Thus, we retrieve a pointer to the struct if possible to avoid
- // a race since v.Interface on the pointer doesn't copy the struct.
- //
- // If v is not addressable, then we are not worried about a race
- // since it implies that the binary Marshaler cannot possibly be
- // mutating this value.
- v = v.Addr()
- }
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = w.Write(text); err != nil {
- return err
- }
- } else {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if err := tm.writeStruct(w, v); err != nil {
- return err
- }
- }
- w.unindent()
- if err := w.WriteByte(ket); err != nil {
- return err
- }
- default:
- _, err := fmt.Fprint(w, v.Interface())
- return err
- }
- return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
- return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
- // use WriteByte here to get any needed indent
- if err := w.WriteByte('"'); err != nil {
- return err
- }
- // Loop over the bytes, not the runes.
- for i := 0; i < len(s); i++ {
- var err error
- // Divergence from C++: we don't escape apostrophes.
- // There's no need to escape them, and the C++ parser
- // copes with a naked apostrophe.
- switch c := s[i]; c {
- case '\n':
- _, err = w.w.Write(backslashN)
- case '\r':
- _, err = w.w.Write(backslashR)
- case '\t':
- _, err = w.w.Write(backslashT)
- case '"':
- _, err = w.w.Write(backslashDQ)
- case '\\':
- _, err = w.w.Write(backslashBS)
- default:
- if isprint(c) {
- err = w.w.WriteByte(c)
- } else {
- _, err = fmt.Fprintf(w.w, "\\%03o", c)
- }
- }
- if err != nil {
- return err
- }
- }
- return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
- if !w.compact {
- if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
- return err
- }
- }
- b := NewBuffer(data)
- for b.index < len(b.buf) {
- x, err := b.DecodeVarint()
- if err != nil {
- _, err := fmt.Fprintf(w, "/* %v */\n", err)
- return err
- }
- wire, tag := x&7, x>>3
- if wire == WireEndGroup {
- w.unindent()
- if _, err := w.Write(endBraceNewline); err != nil {
- return err
- }
- continue
- }
- if _, err := fmt.Fprint(w, tag); err != nil {
- return err
- }
- if wire != WireStartGroup {
- if err := w.WriteByte(':'); err != nil {
- return err
- }
- }
- if !w.compact || wire == WireStartGroup {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- switch wire {
- case WireBytes:
- buf, e := b.DecodeRawBytes(false)
- if e == nil {
- _, err = fmt.Fprintf(w, "%q", buf)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", e)
- }
- case WireFixed32:
- x, err = b.DecodeFixed32()
- err = writeUnknownInt(w, x, err)
- case WireFixed64:
- x, err = b.DecodeFixed64()
- err = writeUnknownInt(w, x, err)
- case WireStartGroup:
- err = w.WriteByte('{')
- w.indent()
- case WireVarint:
- x, err = b.DecodeVarint()
- err = writeUnknownInt(w, x, err)
- default:
- _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
- }
- if err != nil {
- return err
- }
- if err = w.WriteByte('\n'); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
- if err == nil {
- _, err = fmt.Fprint(w, x)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", err)
- }
- return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
- emap := extensionMaps[pv.Type().Elem()]
- ep, _ := extendable(pv.Interface())
-
- // Order the extensions by ID.
- // This isn't strictly necessary, but it will give us
- // canonical output, which will also make testing easier.
- m, mu := ep.extensionsRead()
- if m == nil {
- return nil
- }
- mu.Lock()
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- mu.Unlock()
-
- for _, extNum := range ids {
- ext := m[extNum]
- var desc *ExtensionDesc
- if emap != nil {
- desc = emap[extNum]
- }
- if desc == nil {
- // Unknown extension.
- if err := writeUnknownStruct(w, ext.enc); err != nil {
- return err
- }
- continue
- }
-
- pb, err := GetExtension(ep, desc)
- if err != nil {
- return fmt.Errorf("failed getting extension: %v", err)
- }
-
- // Repeated extensions will appear as a slice.
- if !desc.repeated() {
- if err := tm.writeExtension(w, desc.Name, pb); err != nil {
- return err
- }
- } else {
- v := reflect.ValueOf(pb)
- for i := 0; i < v.Len(); i++ {
- if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
- if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- remain := w.ind * 2
- for remain > 0 {
- n := remain
- if n > len(spaces) {
- n = len(spaces)
- }
- w.w.Write(spaces[:n])
- remain -= n
- }
- w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line).
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
- val := reflect.ValueOf(pb)
- if pb == nil || val.IsNil() {
- w.Write([]byte(""))
- return nil
- }
- var bw *bufio.Writer
- ww, ok := w.(writer)
- if !ok {
- bw = bufio.NewWriter(w)
- ww = bw
- }
- aw := &textWriter{
- w: ww,
- complete: true,
- compact: tm.Compact,
- }
-
- if etm, ok := pb.(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = aw.Write(text); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
- }
- // Dereference the received pointer so we don't have outer < and >.
- v := reflect.Indirect(val)
- if err := tm.writeStruct(aw, v); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
- var buf bytes.Buffer
- tm.Marshal(&buf, pb)
- return buf.String()
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 00000000..47eb3e44
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+ Message string
+
+ // Deprecated: Do not use.
+ Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+ if wrapTextUnmarshalV2 {
+ return e.Message
+ }
+ if e.Line == 1 {
+ return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+ }
+ return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+ if u, ok := m.(encoding.TextUnmarshaler); ok {
+ return u.UnmarshalText([]byte(s))
+ }
+
+ m.Reset()
+ mi := MessageV2(m)
+
+ if wrapTextUnmarshalV2 {
+ err := prototext.UnmarshalOptions{
+ AllowPartial: true,
+ }.Unmarshal([]byte(s), mi)
+ if err != nil {
+ return &ParseError{Message: err.Error()}
+ }
+ return checkRequiredNotSet(mi)
+ } else {
+ if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+ return err
+ }
+ return checkRequiredNotSet(mi)
+ }
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ seen := make(map[protoreflect.FieldNumber]bool)
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := protoreflect.Name(tok.value)
+ fd := fds.ByName(name)
+ switch {
+ case fd == nil:
+ gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+ fd = nil
+ case fd.IsWeak() && fd.Message().IsPlaceholder():
+ fd = nil
+ }
+ if fd == nil {
+ typeName := string(md.FullName())
+ if m, ok := m.Interface().(Message); ok {
+ t := reflect.TypeOf(m)
+ if t.Kind() == reflect.Ptr {
+ typeName = t.Elem().String()
+ }
+ }
+ return p.errorf("unknown field name %q in %v", name, typeName)
+ }
+ if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+ }
+ if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+ return p.errorf("non-repeated field %q was repeated", fd.Name())
+ }
+ seen[fd.Number()] = true
+
+ // Consume any colon.
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ if v, err = p.unmarshalValue(v, fd); err != nil {
+ return err
+ }
+ m.Set(fd, v)
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+ name, err := p.consumeExtensionOrAnyName()
+ if err != nil {
+ return err
+ }
+
+ // If it contains a slash, it's an Any type URL.
+ if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+ if err != nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+ }
+ m2 := mt.New()
+ if err := p.unmarshalMessage(m2, terminator); err != nil {
+ return err
+ }
+ b, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+ }
+
+ urlFD := m.Descriptor().Fields().ByName("type_url")
+ valFD := m.Descriptor().Fields().ByName("value")
+ if seen[urlFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+ }
+ if seen[valFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+ }
+ m.Set(urlFD, protoreflect.ValueOfString(name))
+ m.Set(valFD, protoreflect.ValueOfBytes(b))
+ seen[urlFD.Number()] = true
+ seen[valFD.Number()] = true
+ return nil
+ }
+
+ xname := protoreflect.FullName(name)
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(m.Descriptor()) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ return p.errorf("unrecognized extension %q", name)
+ }
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+ }
+
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ v, err = p.unmarshalValue(v, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch {
+ case fd.IsList():
+ lv := v.List()
+ var err error
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return v, nil
+ }
+
+ // One value of the repeated field.
+ p.back()
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+ return v, nil
+ case fd.IsMap():
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order.
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ keyFD := fd.MapKey()
+ valFD := fd.MapValue()
+
+ mv := v.Map()
+ kv := keyFD.Default()
+ vv := mv.NewValue()
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ var err error
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return v, err
+ }
+ if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ case "value":
+ if err := p.checkForColon(valFD); err != nil {
+ return v, err
+ }
+ if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ default:
+ p.back()
+ return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+ mv.Set(kv.MapKey(), vv)
+ return v, nil
+ default:
+ p.back()
+ return p.unmarshalSingularValue(v, fd)
+ }
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ switch tok.value {
+ case "true", "1", "t", "True":
+ return protoreflect.ValueOfBool(true), nil
+ case "false", "0", "f", "False":
+ return protoreflect.ValueOfBool(false), nil
+ }
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfUint32(uint32(x)), nil
+ }
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfUint64(uint64(x)), nil
+ }
+ case protoreflect.FloatKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 32); err == nil {
+ return protoreflect.ValueOfFloat32(float32(x)), nil
+ }
+ case protoreflect.DoubleKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 64); err == nil {
+ return protoreflect.ValueOfFloat64(float64(x)), nil
+ }
+ case protoreflect.StringKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfString(tok.unquoted), nil
+ }
+ case protoreflect.BytesKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+ }
+ case protoreflect.EnumKind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+ }
+ vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+ if vd != nil {
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ err := p.unmarshalMessage(v.Message(), terminator)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+ return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ if fd.Message() == nil {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(rune(i)), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 00000000..a31134ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line)
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+ b, err := tm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+ b, _ := tm.marshal(m)
+ return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return []byte(""), nil
+ }
+
+ if wrapTextMarshalV2 {
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ return m.MarshalText()
+ }
+
+ opts := prototext.MarshalOptions{
+ AllowPartial: true,
+ EmitUnknown: true,
+ }
+ if !tm.Compact {
+ opts.Indent = " "
+ }
+ if !tm.ExpandAny {
+ opts.Resolver = (*protoregistry.Types)(nil)
+ }
+ return opts.Marshal(mr.Interface())
+ } else {
+ w := &textWriter{
+ compact: tm.Compact,
+ expandAny: tm.ExpandAny,
+ complete: true,
+ }
+
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ b, err := m.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ w.Write(b)
+ return w.buf, nil
+ }
+
+ err := w.writeMessage(mr)
+ return w.buf, err
+ }
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+ newline = []byte("\n")
+ endBraceNewline = []byte("}\n")
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ compact bool // same as TextMarshaler.Compact
+ expandAny bool // same as TextMarshaler.ExpandAny
+ complete bool // whether the current position is a complete line
+ indent int // indentation level; never negative
+ buf []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, p...)
+ w.complete = false
+ return len(p), nil
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ w.buf = append(w.buf, ' ')
+ n++
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ if i+1 < len(frags) {
+ w.buf = append(w.buf, '\n')
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, c)
+ w.complete = c == '\n'
+ return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+
+ if fd.Kind() != protoreflect.GroupKind {
+ w.buf = append(w.buf, fd.Name()...)
+ w.WriteByte(':')
+ } else {
+ // Use message type name for group field name.
+ w.buf = append(w.buf, fd.Message().Name()...)
+ }
+
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+ md := m.Descriptor()
+ fdURL := md.Fields().ByName("type_url")
+ fdVal := md.Fields().ByName("value")
+
+ url := m.Get(fdURL).String()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return false, nil
+ }
+
+ b := m.Get(fdVal).Bytes()
+ m2 := mt.New()
+ if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ if requiresQuotes(url) {
+ w.writeQuotedString(url)
+ } else {
+ w.Write([]byte(url))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.indent++
+ }
+ if err := w.writeMessage(m2); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.indent--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if w.expandAny && md.FullName() == "google.protobuf.Any" {
+ if canExpand, err := w.writeProto3Any(m); canExpand {
+ return err
+ }
+ }
+
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ lv := m.Get(fd).List()
+ for j := 0; j < lv.Len(); j++ {
+ w.writeName(fd)
+ v := lv.Get(j)
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := m.Get(fd).Map()
+
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+ for _, entry := range entries {
+ w.writeName(fd)
+ w.WriteByte('<')
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ w.writeName(kfd)
+ if err := w.writeSingularValue(entry.key, kfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.writeName(vfd)
+ if err := w.writeSingularValue(entry.val, vfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.indent--
+ w.WriteByte('>')
+ w.WriteByte('\n')
+ }
+ default:
+ w.writeName(fd)
+ if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ }
+
+ if b := m.GetUnknown(); len(b) > 0 {
+ w.writeUnknownFields(b)
+ }
+ return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ switch fd.Kind() {
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ switch vf := v.Float(); {
+ case math.IsInf(vf, +1):
+ w.Write(posInf)
+ case math.IsInf(vf, -1):
+ w.Write(negInf)
+ case math.IsNaN(vf):
+ w.Write(nan)
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ case protoreflect.StringKind:
+ // NOTE: This does not validate UTF-8 for historical reasons.
+ w.writeQuotedString(string(v.String()))
+ case protoreflect.BytesKind:
+ w.writeQuotedString(string(v.Bytes()))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var bra, ket byte = '<', '>'
+ if fd.Kind() == protoreflect.GroupKind {
+ bra, ket = '{', '}'
+ }
+ w.WriteByte(bra)
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ m := v.Message()
+ if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+ b, err := m2.MarshalText()
+ if err != nil {
+ return err
+ }
+ w.Write(b)
+ } else {
+ w.writeMessage(m)
+ }
+ w.indent--
+ w.WriteByte(ket)
+ case protoreflect.EnumKind:
+ if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+ fmt.Fprint(w, ev.Name())
+ } else {
+ fmt.Fprint(w, v.Enum())
+ }
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+ w.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; c {
+ case '\n':
+ w.buf = append(w.buf, `\n`...)
+ case '\r':
+ w.buf = append(w.buf, `\r`...)
+ case '\t':
+ w.buf = append(w.buf, `\t`...)
+ case '"':
+ w.buf = append(w.buf, `\"`...)
+ case '\\':
+ w.buf = append(w.buf, `\\`...)
+ default:
+ if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+ w.buf = append(w.buf, c)
+ } else {
+ w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+ if !w.compact {
+ fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+ }
+
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+
+ if wtyp == protowire.EndGroupType {
+ w.indent--
+ w.Write(endBraceNewline)
+ continue
+ }
+ fmt.Fprint(w, num)
+ if wtyp != protowire.StartGroupType {
+ w.WriteByte(':')
+ }
+ if !w.compact || wtyp == protowire.StartGroupType {
+ w.WriteByte(' ')
+ }
+ switch wtyp {
+ case protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed32Type:
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed64Type:
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.BytesType:
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprintf(w, "%q", v)
+ case protowire.StartGroupType:
+ w.WriteByte('{')
+ w.indent++
+ default:
+ fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+ }
+ w.WriteByte('\n')
+ }
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if md.ExtensionRanges().Len() == 0 {
+ return nil
+ }
+
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ // For message set, use the name of the message as the extension name.
+ name := string(ext.desc.FullName())
+ if isMessageSet(ext.desc.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ if !ext.desc.IsList() {
+ if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+ return err
+ }
+ } else {
+ lv := ext.val.List()
+ for i := 0; i < lv.Len(); i++ {
+ if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ fmt.Fprintf(w, "[%s]:", name)
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ for i := 0; i < w.indent*2; i++ {
+ w.buf = append(w.buf, ' ')
+ }
+ w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
deleted file mode 100644
index bb55a3af..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,880 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
- Message string
- Line int // 1-based line number
- Offset int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
- if p.Line == 1 {
- // show offset only for first line
- return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
- }
- return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
- if t.err == nil {
- return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
- }
- return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-var (
- errBadUTF8 = errors.New("proto: bad UTF-8")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(i), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < st.NumField(); i++ {
- if !isNil(sv.Field(i)) {
- continue
- }
-
- props := sprops.Prop[i]
- if props.Required {
- return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
- }
- }
- return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
- i, ok := sprops.decoderOrigNames[name]
- if ok {
- return i, sprops.Prop[i], true
- }
- return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- // Colon is optional when the field is a group or message.
- needColon := true
- switch props.Wire {
- case "group":
- needColon = false
- case "bytes":
- // A "bytes" field is either a message, a string, or a repeated field;
- // those three become *T, *string and []T respectively, so we can check for
- // this field being a pointer to a non-string.
- if typ.Kind() == reflect.Ptr {
- // *T or *string
- if typ.Elem().Kind() == reflect.String {
- break
- }
- } else if typ.Kind() == reflect.Slice {
- // []T or []*T
- if typ.Elem().Kind() != reflect.Ptr {
- break
- }
- } else if typ.Kind() == reflect.String {
- // The proto3 exception is for a string field,
- // which requires a colon.
- break
- }
- needColon = false
- }
- if needColon {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
- st := sv.Type()
- sprops := GetProperties(st)
- reqCount := sprops.reqCount
- var reqFieldErr error
- fieldSet := make(map[string]bool)
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- // Looks like an extension or an Any.
- //
- // TODO: Check whether we need to handle
- // namespace rooted names (e.g. ".something.Foo").
- extName, err := p.consumeExtName()
- if err != nil {
- return err
- }
-
- if s := strings.LastIndex(extName, "/"); s >= 0 {
- // If it contains a slash, it's an Any type URL.
- messageName := extName[s+1:]
- mt := MessageType(messageName)
- if mt == nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
- }
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- v := reflect.New(mt.Elem())
- if pe := p.readStruct(v.Elem(), terminator); pe != nil {
- return pe
- }
- b, err := Marshal(v.Interface().(Message))
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", messageName, err)
- }
- if fieldSet["type_url"] {
- return p.errorf(anyRepeatedlyUnpacked, "type_url")
- }
- if fieldSet["value"] {
- return p.errorf(anyRepeatedlyUnpacked, "value")
- }
- sv.FieldByName("TypeUrl").SetString(extName)
- sv.FieldByName("Value").SetBytes(b)
- fieldSet["type_url"] = true
- fieldSet["value"] = true
- continue
- }
-
- var desc *ExtensionDesc
- // This could be faster, but it's functional.
- // TODO: Do something smarter than a linear scan.
- for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
- if d.Name == extName {
- desc = d
- break
- }
- }
- if desc == nil {
- return p.errorf("unrecognized extension %q", extName)
- }
-
- props := &Properties{}
- props.Parse(desc.Tag)
-
- typ := reflect.TypeOf(desc.ExtensionType)
- if err := p.checkForColon(props, typ); err != nil {
- return err
- }
-
- rep := desc.repeated()
-
- // Read the extension structure, and set it in
- // the value we're constructing.
- var ext reflect.Value
- if !rep {
- ext = reflect.New(typ).Elem()
- } else {
- ext = reflect.New(typ.Elem()).Elem()
- }
- if err := p.readAny(ext, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- ep := sv.Addr().Interface().(Message)
- if !rep {
- SetExtension(ep, desc, ext.Interface())
- } else {
- old, err := GetExtension(ep, desc)
- var sl reflect.Value
- if err == nil {
- sl = reflect.ValueOf(old) // existing slice
- } else {
- sl = reflect.MakeSlice(typ, 0, 1)
- }
- sl = reflect.Append(sl, ext)
- SetExtension(ep, desc, sl.Interface())
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := tok.value
- var dst reflect.Value
- fi, props, ok := structFieldByName(sprops, name)
- if ok {
- dst = sv.Field(fi)
- } else if oop, ok := sprops.OneofTypes[name]; ok {
- // It is a oneof.
- props = oop.Prop
- nv := reflect.New(oop.Type.Elem())
- dst = nv.Elem().Field(0)
- field := sv.Field(oop.Field)
- if !field.IsNil() {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
- }
- field.Set(nv)
- }
- if !dst.IsValid() {
- return p.errorf("unknown field name %q in %v", name, st)
- }
-
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order. See b/28924776 for a time
- // this went wrong.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.MapKeyProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- case "value":
- if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.MapValProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- default:
- p.back()
- return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- if props.Required {
- reqCount--
- }
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
-
- }
-
- if reqCount > 0 {
- return p.missingRequiredFieldError(sv)
- }
- return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "" {
- return p.errorf("unexpected EOF")
- }
-
- switch fv := v; fv.Kind() {
- case reflect.Slice:
- at := v.Type()
- if at.Elem().Kind() == reflect.Uint8 {
- // Special case for []byte
- if tok.value[0] != '"' && tok.value[0] != '\'' {
- // Deliberately written out here, as the error after
- // this switch statement would write "invalid []byte: ...",
- // which is not as user-friendly.
- return p.errorf("invalid string: %v", tok.value)
- }
- bytes := []byte(tok.unquoted)
- fv.Set(reflect.ValueOf(bytes))
- return nil
- }
- // Repeated field.
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- err := p.readAny(fv.Index(fv.Len()-1), props)
- if err != nil {
- return err
- }
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return nil
- }
- // One value of the repeated field.
- p.back()
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- return p.readAny(fv.Index(fv.Len()-1), props)
- case reflect.Bool:
- // true/1/t/True or false/f/0/False.
- switch tok.value {
- case "true", "1", "t", "True":
- fv.SetBool(true)
- return nil
- case "false", "0", "f", "False":
- fv.SetBool(false)
- return nil
- }
- case reflect.Float32, reflect.Float64:
- v := tok.value
- // Ignore 'f' for compatibility with output generated by C++, but don't
- // remove 'f' when the value is "-inf" or "inf".
- if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
- v = v[:len(v)-1]
- }
- if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
- fv.SetFloat(f)
- return nil
- }
- case reflect.Int32:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- if len(props.Enum) == 0 {
- break
- }
- m, ok := enumValueMaps[props.Enum]
- if !ok {
- break
- }
- x, ok := m[tok.value]
- if !ok {
- break
- }
- fv.SetInt(int64(x))
- return nil
- case reflect.Int64:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- case reflect.Ptr:
- // A basic field (indirected through pointer), or a repeated message/group
- p.back()
- fv.Set(reflect.New(fv.Type().Elem()))
- return p.readAny(fv.Elem(), props)
- case reflect.String:
- if tok.value[0] == '"' || tok.value[0] == '\'' {
- fv.SetString(tok.unquoted)
- return nil
- }
- case reflect.Struct:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
- return p.readStruct(fv, terminator)
- case reflect.Uint32:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
- return nil
- }
- case reflect.Uint64:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- fv.SetUint(x)
- return nil
- }
- }
- return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
- if um, ok := pb.(encoding.TextUnmarshaler); ok {
- return um.UnmarshalText([]byte(s))
- }
- pb.Reset()
- v := reflect.ValueOf(pb)
- return newTextParser(s).readStruct(v.Elem(), "")
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 00000000..d7c28da5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ if m == nil {
+ return 0
+ }
+ mi := MessageV2(m)
+ return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ b, err := marshalAppend(nil, m, false)
+ if b == nil {
+ b = zeroBytes
+ }
+ return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return nil, ErrNil
+ }
+ mi := MessageV2(m)
+ nbuf, err := protoV2.MarshalOptions{
+ Deterministic: deterministic,
+ AllowPartial: true,
+ }.MarshalAppend(buf, mi)
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) == len(nbuf) {
+ if !mi.ProtoReflect().IsValid() {
+ return buf, ErrNil
+ }
+ }
+ return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+ m.Reset()
+ return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+ mi := MessageV2(m)
+ out, err := protoV2.UnmarshalOptions{
+ AllowPartial: true,
+ Merge: true,
+ }.UnmarshalState(protoiface.UnmarshalInput{
+ Buf: b,
+ Message: mi.ProtoReflect(),
+ })
+ if err != nil {
+ return err
+ }
+ if out.Flags&protoiface.UnmarshalInitialized > 0 {
+ return nil
+ }
+ return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 00000000..398e3485
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index 70276e8f..e729dcff 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -1,141 +1,165 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements functions to marshal proto.Message to/from
-// google.protobuf.Any message.
-
import (
"fmt"
- "reflect"
"strings"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "github.com/golang/protobuf/ptypes/any"
)
-const googleApis = "type.googleapis.com/"
+const urlPrefix = "type.googleapis.com/"
-// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
-//
-// Note that regular type assertions should be done using the Is
-// function. AnyMessageName is provided for less common use cases like filtering a
-// sequence of Any messages based on a set of allowed message type names.
-func AnyMessageName(any *any.Any) (string, error) {
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+ name, err := anyMessageName(any)
+ return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
- slash := strings.LastIndex(any.TypeUrl, "/")
- if slash < 0 {
+ name := protoreflect.FullName(any.TypeUrl)
+ if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+ name = name[i+len("/"):]
+ }
+ if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
- return any.TypeUrl[slash+1:], nil
+ return name, nil
}
-// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
-func MarshalAny(pb proto.Message) (*any.Any, error) {
- value, err := proto.Marshal(pb)
+// MarshalAny marshals the given message m into an anypb.Any message.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+ switch dm := m.(type) {
+ case DynamicAny:
+ m = dm.Message
+ case *DynamicAny:
+ if dm == nil {
+ return nil, proto.ErrNil
+ }
+ m = dm.Message
+ }
+ b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
- return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in a google.protobuf.Any
-// message. The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-// var x ptypes.DynamicAny
-// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-// fmt.Printf("unmarshaled message: %v", x.Message)
-type DynamicAny struct {
- proto.Message
+ return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
-// Empty returns a new proto.Message of the type specified in a
-// google.protobuf.Any message. It returns an error if corresponding message
-// type isn't linked in.
-func Empty(any *any.Any) (proto.Message, error) {
- aname, err := AnyMessageName(any)
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
+func Empty(any *anypb.Any) (proto.Message, error) {
+ name, err := anyMessageName(any)
if err != nil {
return nil, err
}
-
- t := proto.MessageType(aname)
- if t == nil {
- return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+ mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+ if err != nil {
+ return nil, err
}
- return reflect.New(t.Elem()).Interface().(proto.Message), nil
+ return proto.MessageV1(mt.New().Interface()), nil
}
-// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
-// message and places the decoded result in pb. It returns an error if type of
-// contents of Any message does not match type of pb message.
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
//
-// pb can be a proto.Message, or a *DynamicAny.
-func UnmarshalAny(any *any.Any, pb proto.Message) error {
- if d, ok := pb.(*DynamicAny); ok {
- if d.Message == nil {
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+ if dm, ok := m.(*DynamicAny); ok {
+ if dm.Message == nil {
var err error
- d.Message, err = Empty(any)
+ dm.Message, err = Empty(any)
if err != nil {
return err
}
}
- return UnmarshalAny(any, d.Message)
+ m = dm.Message
}
- aname, err := AnyMessageName(any)
+ anyName, err := AnyMessageName(any)
if err != nil {
return err
}
-
- mname := proto.MessageName(pb)
- if aname != mname {
- return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+ msgName := proto.MessageName(m)
+ if anyName != msgName {
+ return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
- return proto.Unmarshal(any.Value, pb)
+ return proto.Unmarshal(any.Value, m)
}
-// Is returns true if any value contains a given message type.
-func Is(any *any.Any, pb proto.Message) bool {
- // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
- // but it avoids scanning TypeUrl for the slash.
- if any == nil {
+// Is reports whether the Any message contains a message of the specified type.
+func Is(any *anypb.Any, m proto.Message) bool {
+ if any == nil || m == nil {
return false
}
- name := proto.MessageName(pb)
- prefix := len(any.TypeUrl) - len(name)
- return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+ name := proto.MessageName(m)
+ if !strings.HasSuffix(any.TypeUrl, name) {
+ return false
+ }
+ return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+ if m.Message == nil {
+ return ""
+ }
+ return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+ if m.Message == nil {
+ return
+ }
+ m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+ return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+ if m.Message == nil {
+ return nil
+ }
+ return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+ return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+ return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+ return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+ return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+ return dynamicAny{t.MessageType.Zero()}
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 78ee5233..0ef27d33 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,200 +1,62 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/any.proto
+// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/any.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Any = anypb.Any
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
-// ...
-// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
-// ...
-// }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
-//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": ,
-// "lastName":
-// }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
-type Any struct {
- // A URL/resource name that uniquely identifies the type of the serialized
- // protocol buffer message. The last segment of the URL's path must represent
- // the fully qualified name of the type (as in
- // `path/google.protobuf.Duration`). The name should be in a canonical form
- // (e.g., leading "." is not accepted).
- //
- // In practice, teams usually precompile into the binary all types that they
- // expect it to use in the context of Any. However, for URLs which use the
- // scheme `http`, `https`, or no scheme, one can optionally set up a type
- // server that maps type URLs to message definitions as follows:
- //
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
- //
- // Note: this functionality is not currently available in the official
- // protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
- //
- // Schemes other than `http`, `https` (or the empty scheme) might be
- // used with implementation specific semantics.
- //
- TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
- // Must be a valid serialized protocol buffer of the above specified type.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
-func (m *Any) Reset() { *m = Any{} }
-func (m *Any) String() string { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage() {}
-func (*Any) Descriptor() ([]byte, []int) {
- return fileDescriptor_b53526c13ae22eb4, []int{0}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
}
-func (*Any) XXX_WellKnownType() string { return "Any" }
-
-func (m *Any) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Any.Unmarshal(m, b)
-}
-func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Any.Marshal(b, m, deterministic)
-}
-func (m *Any) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Any.Merge(m, src)
-}
-func (m *Any) XXX_Size() int {
- return xxx_messageInfo_Any.Size(m)
-}
-func (m *Any) XXX_DiscardUnknown() {
- xxx_messageInfo_Any.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Any proto.InternalMessageInfo
-
-func (m *Any) GetTypeUrl() string {
- if m != nil {
- return m.TypeUrl
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+ return
}
- return ""
-}
-
-func (m *Any) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Any)(nil), "google.protobuf.Any")
-}
-
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
-
-var fileDescriptor_b53526c13ae22eb4 = []byte{
- // 185 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
- 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
- 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
- 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
- 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
- 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
- 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
- 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
- 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
- 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
- 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+ file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
deleted file mode 100644
index 49329425..00000000
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ /dev/null
@@ -1,154 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option go_package = "github.com/golang/protobuf/ptypes/any";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "AnyProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// `Any` contains an arbitrary serialized protocol buffer message along with a
-// URL that describes the type of the serialized message.
-//
-// Protobuf library provides support to pack/unpack Any values in the form
-// of utility functions or additional generated methods of the Any type.
-//
-// Example 1: Pack and unpack a message in C++.
-//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
-//
-// Example 2: Pack and unpack a message in Java.
-//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := ptypes.MarshalAny(foo)
-// ...
-// foo := &pb.Foo{}
-// if err := ptypes.UnmarshalAny(any, foo); err != nil {
-// ...
-// }
-//
-// The pack methods provided by protobuf library will by default use
-// 'type.googleapis.com/full.type.name' as the type URL and the unpack
-// methods only use the fully qualified type name after the last '/'
-// in the type URL, for example "foo.bar.com/x/y.z" will yield type
-// name "y.z".
-//
-//
-// JSON
-// ====
-// The JSON representation of an `Any` value uses the regular
-// representation of the deserialized, embedded message, with an
-// additional field `@type` which contains the type URL. Example:
-//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
-//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": ,
-// "lastName":
-// }
-//
-// If the embedded message type is well-known and has a custom JSON
-// representation, that representation will be embedded adding a field
-// `value` which holds the custom JSON in addition to the `@type`
-// field. Example (for message [google.protobuf.Duration][]):
-//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
-message Any {
- // A URL/resource name that uniquely identifies the type of the serialized
- // protocol buffer message. The last segment of the URL's path must represent
- // the fully qualified name of the type (as in
- // `path/google.protobuf.Duration`). The name should be in a canonical form
- // (e.g., leading "." is not accepted).
- //
- // In practice, teams usually precompile into the binary all types that they
- // expect it to use in the context of Any. However, for URLs which use the
- // scheme `http`, `https`, or no scheme, one can optionally set up a type
- // server that maps type URLs to message definitions as follows:
- //
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
- //
- // Note: this functionality is not currently available in the official
- // protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com.
- //
- // Schemes other than `http`, `https` (or the empty scheme) might be
- // used with implementation specific semantics.
- //
- string type_url = 1;
-
- // Must be a valid serialized protocol buffer of the above specified type.
- bytes value = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
index c0d595da..fb9edd5c 100644
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -1,35 +1,6 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
-/*
-Package ptypes contains code for interacting with well-known types.
-*/
+// Package ptypes provides functionality for interacting with well-known types.
package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 26d1ca2f..6110ae8a 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -1,102 +1,72 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements conversions between google.protobuf.Duration
-// and time.Duration.
-
import (
"errors"
"fmt"
"time"
- durpb "github.com/golang/protobuf/ptypes/duration"
+ durationpb "github.com/golang/protobuf/ptypes/duration"
)
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
const (
- // Range of a durpb.Duration in seconds, as specified in
- // google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
-// validateDuration determines whether the durpb.Duration is valid according to the
-// definition in google/protobuf/duration.proto. A valid durpb.Duration
-// may still be too large to fit into a time.Duration (the range of durpb.Duration
-// is about 10,000 years, and the range of time.Duration is about 290).
-func validateDuration(d *durpb.Duration) error {
- if d == nil {
- return errors.New("duration: nil Duration")
- }
- if d.Seconds < minSeconds || d.Seconds > maxSeconds {
- return fmt.Errorf("duration: %v: seconds out of range", d)
- }
- if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
- return fmt.Errorf("duration: %v: nanos out of range", d)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
- return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
- }
- return nil
-}
-
-// Duration converts a durpb.Duration to a time.Duration. Duration
-// returns an error if the durpb.Duration is invalid or is too large to be
-// represented in a time.Duration.
-func Duration(p *durpb.Duration) (time.Duration, error) {
- if err := validateDuration(p); err != nil {
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+ if err := validateDuration(dur); err != nil {
return 0, err
}
- d := time.Duration(p.Seconds) * time.Second
- if int64(d/time.Second) != p.Seconds {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ d := time.Duration(dur.Seconds) * time.Second
+ if int64(d/time.Second) != dur.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
- if p.Nanos != 0 {
- d += time.Duration(p.Nanos) * time.Nanosecond
- if (d < 0) != (p.Nanos < 0) {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ if dur.Nanos != 0 {
+ d += time.Duration(dur.Nanos) * time.Nanosecond
+ if (d < 0) != (dur.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
-// DurationProto converts a time.Duration to a durpb.Duration.
-func DurationProto(d time.Duration) *durpb.Duration {
+// DurationProto converts a time.Duration to a durationpb.Duration.
+func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
- return &durpb.Duration{
- Seconds: secs,
+ return &durationpb.Duration{
+ Seconds: int64(secs),
Nanos: int32(nanos),
}
}
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+ if dur == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", dur)
+ }
+ if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", dur)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index 0d681ee2..d0079ee3 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,161 +1,63 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/duration.proto
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/duration.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Duration = durationpb.Duration
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (durations.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-type Duration struct {
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-func (m *Duration) Reset() { *m = Duration{} }
-func (m *Duration) String() string { return proto.CompactTextString(m) }
-func (*Duration) ProtoMessage() {}
-func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-func (*Duration) XXX_WellKnownType() string { return "Duration" }
-
-func (m *Duration) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Duration.Unmarshal(m, b)
-}
-func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
-}
-func (m *Duration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Duration.Merge(m, src)
-}
-func (m *Duration) XXX_Size() int {
- return xxx_messageInfo_Duration.Size(m)
-}
-func (m *Duration) XXX_DiscardUnknown() {
- xxx_messageInfo_Duration.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Duration proto.InternalMessageInfo
-
-func (m *Duration) GetSeconds() int64 {
- if m != nil {
- return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+ return
}
- return 0
-}
-
-func (m *Duration) GetNanos() int32 {
- if m != nil {
- return m.Nanos
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
-}
-
-func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
-
-var fileDescriptor_23597b2ebd7ac6c5 = []byte{
- // 190 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
- 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
- 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
- 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
- 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
- 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
- 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
- 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
- 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
- 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
- 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
deleted file mode 100644
index 975fce41..00000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
+++ /dev/null
@@ -1,117 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/duration";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "DurationProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Duration represents a signed, fixed-length span of time represented
-// as a count of seconds and fractions of seconds at nanosecond
-// resolution. It is independent of any calendar and concepts like "day"
-// or "month". It is related to Timestamp in that the difference between
-// two Timestamp values is a Duration and it can be added or subtracted
-// from a Timestamp. Range is approximately +-10,000 years.
-//
-// # Examples
-//
-// Example 1: Compute Duration from two Timestamps in pseudo code.
-//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
-//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
-//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (durations.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
-//
-// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
-//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
-//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
-//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
-//
-// Example 3: Compute Duration from datetime.timedelta in Python.
-//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
-//
-// # JSON Mapping
-//
-// In JSON format, the Duration type is encoded as a string rather than an
-// object, where the string ends in the suffix "s" (indicating seconds) and
-// is preceded by the number of seconds, with nanoseconds expressed as
-// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
-// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
-// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
-// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
-message Duration {
-
- // Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive. Note: these bounds are computed from:
- // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- int64 seconds = 1;
-
- // Signed fractions of a second at nanosecond resolution of the span
- // of time. Durations less than one second are represented with a 0
- // `seconds` field and a positive or negative `nanos` field. For durations
- // of one second or more, a non-zero value for the `nanos` field must be
- // of the same sign as the `seconds` field. Must be from -999,999,999
- // to +999,999,999 inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 8da0df01..026d0d49 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -1,46 +1,18 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package ptypes
-// This file implements operations on google.protobuf.Timestamp.
-
import (
"errors"
"fmt"
"time"
- tspb "github.com/golang/protobuf/ptypes/timestamp"
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
+// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
@@ -50,44 +22,18 @@ const (
maxValidSeconds = 253402300800
)
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range
-// [0001-01-01, 10000-01-01) and has a Nanos field
-// in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes
-// the problem.
-//
-// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
-func validateTimestamp(ts *tspb.Timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
-
-// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
-// Unlike most Go functions, if Timestamp returns an error, the first return value
-// is not the zero time.Time. Instead, it is the value obtained from the
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
-func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
@@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
-func TimestampNow() *tspb.Timestamp {
+func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
@@ -110,8 +56,8 @@ func TimestampNow() *tspb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
-func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
- ts := &tspb.Timestamp{
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+ ts := ×tamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
@@ -121,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
return ts, nil
}
-// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
-// Timestamps, it returns an error message in parentheses.
-func TimestampString(ts *tspb.Timestamp) string {
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes the problem.
+//
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index 31cd846d..a76f8076 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,179 +1,64 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/timestamp.proto
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
package timestamp
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+// Symbols defined in public import of google/protobuf/timestamp.proto.
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+type Timestamp = timestamppb.Timestamp
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-type Timestamp struct {
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_292007bbfe81227e, []int{0}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+ 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
}
-func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
-
-func (m *Timestamp) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Timestamp.Unmarshal(m, b)
-}
-func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
-}
-func (m *Timestamp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Timestamp.Merge(m, src)
-}
-func (m *Timestamp) XXX_Size() int {
- return xxx_messageInfo_Timestamp.Size(m)
-}
-func (m *Timestamp) XXX_DiscardUnknown() {
- xxx_messageInfo_Timestamp.DiscardUnknown(m)
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
}
-var xxx_messageInfo_Timestamp proto.InternalMessageInfo
-
-func (m *Timestamp) GetSeconds() int64 {
- if m != nil {
- return m.Seconds
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+ return
}
- return 0
-}
-
-func (m *Timestamp) GetNanos() int32 {
- if m != nil {
- return m.Nanos
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
-}
-
-func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
-
-var fileDescriptor_292007bbfe81227e = []byte{
- // 191 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
- 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
- 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
- 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
- 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
- 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
- 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
- 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
- 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
- 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
- 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
deleted file mode 100644
index eafb3fa0..00000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ /dev/null
@@ -1,135 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/timestamp";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "TimestampProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// A Timestamp represents a point in time independent of any time zone
-// or calendar, represented as seconds and fractions of seconds at
-// nanosecond resolution in UTC Epoch time. It is encoded using the
-// Proleptic Gregorian Calendar which extends the Gregorian calendar
-// backwards to year one. It is encoded assuming all minutes are 60
-// seconds long, i.e. leap seconds are "smeared" so that no leap second
-// table is needed for interpretation. Range is from
-// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
-// By restricting to that range, we ensure that we can convert to
-// and from RFC 3339 date strings.
-// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
-//
-// # Examples
-//
-// Example 1: Compute Timestamp from POSIX `time()`.
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
-//
-// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
-//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
-//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
-//
-// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
-//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
-//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
-//
-// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
-//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
-//
-//
-// Example 5: Compute Timestamp from current time in Python.
-//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
-//
-// # JSON Mapping
-//
-// In JSON format, the Timestamp type is encoded as a string in the
-// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
-// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
-// where {year} is always expressed using four digits while {month}, {day},
-// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
-// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
-// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required. A proto3 JSON serializer should always use UTC (as indicated by
-// "Z") when printing the Timestamp type and a proto3 JSON parser should be
-// able to accept both UTC and other timezones (as indicated by an offset).
-//
-// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
-// 01:30 UTC on January 15, 2017.
-//
-// In JavaScript, one can convert a Date object to this format using the
-// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
-// method. In Python, a standard `datetime.datetime` object can be converted
-// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
-// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
-// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
-// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
-message Timestamp {
-
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- int64 seconds = 1;
-
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- int32 nanos = 2;
-}
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
index 50d56ffb..52b111d5 100644
--- a/vendor/github.com/json-iterator/go/README.md
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -1,5 +1,5 @@
[](https://sourcegraph.com/github.com/json-iterator/go?badge)
-[](http://godoc.org/github.com/json-iterator/go)
+[](https://pkg.go.dev/github.com/json-iterator/go)
[](https://travis-ci.org/json-iterator/go)
[](https://codecov.io/gh/json-iterator/go)
[](https://goreportcard.com/report/github.com/json-iterator/go)
@@ -18,16 +18,16 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu
Raw Result (easyjson requires static code generation)
-| | ns/op | allocation bytes | allocation times |
-| --- | --- | --- | --- |
-| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
-| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
-| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
-| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
-| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
-| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+| | ns/op | allocation bytes | allocation times |
+| --------------- | ----------- | ---------------- | ---------------- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
-Always benchmark with your own workload.
+Always benchmark with your own workload.
The result depends heavily on the data input.
# Usage
@@ -41,10 +41,10 @@ import "encoding/json"
json.Marshal(&data)
```
-with
+with
```go
-import "github.com/json-iterator/go"
+import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Marshal(&data)
@@ -60,7 +60,7 @@ json.Unmarshal(input, &data)
with
```go
-import "github.com/json-iterator/go"
+import jsoniter "github.com/json-iterator/go"
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Unmarshal(input, &data)
@@ -78,10 +78,10 @@ go get github.com/json-iterator/go
Contributors
-* [thockin](https://github.com/thockin)
-* [mattn](https://github.com/mattn)
-* [cch123](https://github.com/cch123)
-* [Oleg Shaldybin](https://github.com/olegshaldybin)
-* [Jason Toffaletti](https://github.com/toffaletti)
+- [thockin](https://github.com/thockin)
+- [mattn](https://github.com/mattn)
+- [cch123](https://github.com/cch123)
+- [Oleg Shaldybin](https://github.com/olegshaldybin)
+- [Jason Toffaletti](https://github.com/toffaletti)
Report issue or pull request, or email taowen@gmail.com, or [](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
index a4b93c78..1f12f661 100644
--- a/vendor/github.com/json-iterator/go/any_str.go
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -64,7 +64,6 @@ func (any *stringAny) ToInt64() int64 {
flag := 1
startPos := 0
- endPos := 0
if any.val[0] == '+' || any.val[0] == '-' {
startPos = 1
}
@@ -73,6 +72,7 @@ func (any *stringAny) ToInt64() int64 {
flag = -1
}
+ endPos := startPos
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
@@ -98,7 +98,6 @@ func (any *stringAny) ToUint64() uint64 {
}
startPos := 0
- endPos := 0
if any.val[0] == '-' {
return 0
@@ -107,6 +106,7 @@ func (any *stringAny) ToUint64() uint64 {
startPos = 1
}
+ endPos := startPos
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
index 8c58fcba..2adcdc3b 100644
--- a/vendor/github.com/json-iterator/go/config.go
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
rawMessage := *(*json.RawMessage)(ptr)
iter := cfg.BorrowIterator([]byte(rawMessage))
+ defer cfg.ReturnIterator(iter)
iter.Read()
- if iter.Error != nil {
+ if iter.Error != nil && iter.Error != io.EOF {
stream.WriteRaw("null")
} else {
- cfg.ReturnIterator(iter)
stream.WriteRaw(string(rawMessage))
}
}, func(ptr unsafe.Pointer) bool {
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
index b6513711..58ee89c8 100644
--- a/vendor/github.com/json-iterator/go/iter_object.go
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return iter.decrementDepth()
}
- iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+ iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}
@@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return iter.decrementDepth()
}
- iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
iter.decrementDepth()
return false
}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
index e27e8d19..74a97bfe 100644
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -341,7 +341,7 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
continue
}
- if tag == "-" {
+ if tag == "-" || field.Name() == "_" {
continue
}
tagParts := strings.Split(tag, ",")
@@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole
fieldNames = []string{tagProvidedFieldName}
}
// private?
- isNotExported := unicode.IsLower(rune(originalFieldName[0]))
+ isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
if isNotExported {
fieldNames = []string{}
}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
index 08e9a391..58296713 100644
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -49,6 +49,33 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
return decoder
}
}
+
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(unmarshalerType) {
+ return &unmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(textUnmarshalerType) {
+ return &textUnmarshalerDecoder{
+ valType: typ,
+ }
+ }
+
switch typ.Kind() {
case reflect.String:
return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
@@ -63,31 +90,6 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
default:
- ptrType := reflect2.PtrTo(typ)
- if ptrType.Implements(unmarshalerType) {
- return &referenceDecoder{
- &unmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(unmarshalerType) {
- return &unmarshalerDecoder{
- valType: typ,
- }
- }
- if ptrType.Implements(textUnmarshalerType) {
- return &referenceDecoder{
- &textUnmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(textUnmarshalerType) {
- return &textUnmarshalerDecoder{
- valType: typ,
- }
- }
return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
}
}
@@ -103,6 +105,19 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
return encoder
}
}
+
+ if typ == textMarshalerType {
+ return &directTextMarshalerEncoder{
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Implements(textMarshalerType) {
+ return &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+
switch typ.Kind() {
case reflect.String:
return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
@@ -117,17 +132,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
default:
- if typ == textMarshalerType {
- return &directTextMarshalerEncoder{
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
- if typ.Implements(textMarshalerType) {
- return &textMarshalerEncoder{
- valType: typ,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
if typ.Kind() == reflect.Interface {
return &dynamicMapKeyEncoder{ctx, typ}
}
@@ -163,10 +167,6 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if c == '}' {
return
}
- if c != '"' {
- iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
- return
- }
iter.unreadByte()
key := decoder.keyType.UnsafeNew()
decoder.keyDecoder.Decode(key, iter)
@@ -290,16 +290,17 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteObjectStart()
mapIter := encoder.mapType.UnsafeIterate(ptr)
subStream := stream.cfg.BorrowStream(nil)
+ subStream.Attachment = stream.Attachment
subIter := stream.cfg.BorrowIterator(nil)
keyValues := encodedKeyValues{}
for mapIter.HasNext() {
- subStream.buf = make([]byte, 0, 64)
key, elem := mapIter.UnsafeNext()
+ subStreamIndex := subStream.Buffered()
encoder.keyEncoder.Encode(key, subStream)
if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
stream.Error = subStream.Error
}
- encodedKey := subStream.Buffer()
+ encodedKey := subStream.Buffer()[subStreamIndex:]
subIter.ResetBytes(encodedKey)
decodedKey := subIter.ReadString()
if stream.indention > 0 {
@@ -310,7 +311,7 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.elemEncoder.Encode(elem, subStream)
keyValues = append(keyValues, encodedKV{
key: decodedKey,
- keyValue: subStream.Buffer(),
+ keyValue: subStream.Buffer()[subStreamIndex:],
})
}
sort.Sort(keyValues)
@@ -320,6 +321,9 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
}
stream.Write(keyValue.keyValue)
}
+ if subStream.Error != nil && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
stream.WriteObjectEnd()
stream.cfg.ReturnStream(subStream)
stream.cfg.ReturnIterator(subIter)
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
index 43ec71d6..fa71f474 100644
--- a/vendor/github.com/json-iterator/go/reflect_optional.go
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -2,7 +2,6 @@ package jsoniter
import (
"github.com/modern-go/reflect2"
- "reflect"
"unsafe"
)
@@ -10,9 +9,6 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
ptrType := typ.(*reflect2.UnsafePtrType)
elemType := ptrType.Elem()
decoder := decoderOfType(ctx, elemType)
- if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
- return &dereferenceDecoder{elemType, decoder}
- }
return &OptionalDecoder{elemType, decoder}
}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
index 5ad5cc56..d7eb0eb5 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
for c = ','; c == ','; c = iter.nextToken() {
decoder.decodeOneField(ptr, iter)
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
if c != '}' {
@@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
@@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
break
}
}
- if iter.Error != nil && iter.Error != io.EOF {
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
iter.decrementDepth()
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
index d0759cf6..152e3ef5 100644
--- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -200,6 +200,7 @@ type stringModeStringEncoder struct {
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
tempStream := encoder.cfg.BorrowStream(nil)
+ tempStream.Attachment = stream.Attachment
defer encoder.cfg.ReturnStream(tempStream)
encoder.elemEncoder.Encode(ptr, tempStream)
stream.WriteString(string(tempStream.Buffer()))
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
index 17662fde..23d8a3ad 100644
--- a/vendor/github.com/json-iterator/go/stream.go
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -103,14 +103,14 @@ func (stream *Stream) Flush() error {
if stream.Error != nil {
return stream.Error
}
- n, err := stream.out.Write(stream.buf)
+ _, err := stream.out.Write(stream.buf)
if err != nil {
if stream.Error == nil {
stream.Error = err
}
return err
}
- stream.buf = stream.buf[n:]
+ stream.buf = stream.buf[:0]
return nil
}
@@ -177,7 +177,6 @@ func (stream *Stream) WriteEmptyObject() {
func (stream *Stream) WriteMore() {
stream.writeByte(',')
stream.writeIndention(0)
- stream.Flush()
}
// WriteArrayStart write [ with possible indention
diff --git a/vendor/github.com/open-policy-agent/opa/LICENSE b/vendor/github.com/open-policy-agent/opa/LICENSE
new file mode 100644
index 00000000..8f71f43f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/open-policy-agent/opa/ast/builtins.go b/vendor/github.com/open-policy-agent/opa/ast/builtins.go
new file mode 100644
index 00000000..362e6311
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/builtins.go
@@ -0,0 +1,2311 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "strings"
+
+ "github.com/open-policy-agent/opa/types"
+)
+
+// Builtins is the registry of built-in functions supported by OPA.
+// Call RegisterBuiltin to add a new built-in.
+var Builtins []*Builtin
+
+// RegisterBuiltin adds a new built-in function to the registry.
+func RegisterBuiltin(b *Builtin) {
+ Builtins = append(Builtins, b)
+ BuiltinMap[b.Name] = b
+ if len(b.Infix) > 0 {
+ BuiltinMap[b.Infix] = b
+ }
+}
+
+// DefaultBuiltins is the registry of built-in functions supported in OPA
+// by default. When adding a new built-in function to OPA, update this
+// list.
+var DefaultBuiltins = [...]*Builtin{
+ // Unification/equality ("=")
+ Equality,
+
+ // Assignment (":=")
+ Assign,
+
+ // Comparisons
+ GreaterThan,
+ GreaterThanEq,
+ LessThan,
+ LessThanEq,
+ NotEqual,
+ Equal,
+
+ // Arithmetic
+ Plus,
+ Minus,
+ Multiply,
+ Divide,
+ Ceil,
+ Floor,
+ Round,
+ Abs,
+ Rem,
+
+ // Bitwise Arithmetic
+ BitsOr,
+ BitsAnd,
+ BitsNegate,
+ BitsXOr,
+ BitsShiftLeft,
+ BitsShiftRight,
+
+ // Binary
+ And,
+ Or,
+
+ // Aggregates
+ Count,
+ Sum,
+ Product,
+ Max,
+ Min,
+ Any,
+ All,
+
+ // Arrays
+ ArrayConcat,
+ ArraySlice,
+
+ // Conversions
+ ToNumber,
+
+ // Casts (DEPRECATED)
+ CastObject,
+ CastNull,
+ CastBoolean,
+ CastString,
+ CastSet,
+ CastArray,
+
+ // Regular Expressions
+ RegexIsValid,
+ RegexMatch,
+ RegexMatchDeprecated,
+ RegexSplit,
+ GlobsMatch,
+ RegexTemplateMatch,
+ RegexFind,
+ RegexFindAllStringSubmatch,
+
+ // Sets
+ SetDiff,
+ Intersection,
+ Union,
+
+ // Strings
+ Concat,
+ FormatInt,
+ IndexOf,
+ Substring,
+ Lower,
+ Upper,
+ Contains,
+ StartsWith,
+ EndsWith,
+ Split,
+ Replace,
+ ReplaceN,
+ Trim,
+ TrimLeft,
+ TrimPrefix,
+ TrimRight,
+ TrimSuffix,
+ TrimSpace,
+ Sprintf,
+
+ // Numbers
+ NumbersRange,
+
+ // Encoding
+ JSONMarshal,
+ JSONUnmarshal,
+ JSONIsValid,
+ Base64Encode,
+ Base64Decode,
+ Base64IsValid,
+ Base64UrlEncode,
+ Base64UrlEncodeNoPad,
+ Base64UrlDecode,
+ URLQueryDecode,
+ URLQueryEncode,
+ URLQueryEncodeObject,
+ URLQueryDecodeObject,
+ YAMLMarshal,
+ YAMLUnmarshal,
+ YAMLIsValid,
+ HexEncode,
+ HexDecode,
+
+ // Object Manipulation
+ ObjectUnion,
+ ObjectRemove,
+ ObjectFilter,
+ ObjectGet,
+
+ // JSON Object Manipulation
+ JSONFilter,
+ JSONRemove,
+ JSONPatch,
+
+ // Tokens
+ JWTDecode,
+ JWTVerifyRS256,
+ JWTVerifyRS384,
+ JWTVerifyRS512,
+ JWTVerifyPS256,
+ JWTVerifyPS384,
+ JWTVerifyPS512,
+ JWTVerifyES256,
+ JWTVerifyES384,
+ JWTVerifyES512,
+ JWTVerifyHS256,
+ JWTVerifyHS384,
+ JWTVerifyHS512,
+ JWTDecodeVerify,
+ JWTEncodeSignRaw,
+ JWTEncodeSign,
+
+ // Time
+ NowNanos,
+ ParseNanos,
+ ParseRFC3339Nanos,
+ ParseDurationNanos,
+ Date,
+ Clock,
+ Weekday,
+ AddDate,
+
+ // Crypto
+ CryptoX509ParseCertificates,
+ CryptoMd5,
+ CryptoSha1,
+ CryptoSha256,
+ CryptoX509ParseCertificateRequest,
+
+ // Graphs
+ WalkBuiltin,
+ ReachableBuiltin,
+
+ // Sort
+ Sort,
+
+ // Types
+ IsNumber,
+ IsString,
+ IsBoolean,
+ IsArray,
+ IsSet,
+ IsObject,
+ IsNull,
+ TypeNameBuiltin,
+
+ // HTTP
+ HTTPSend,
+
+ // Rego
+ RegoParseModule,
+
+ // OPA
+ OPARuntime,
+
+ // Tracing
+ Trace,
+
+ // CIDR
+ NetCIDROverlap,
+ NetCIDRIntersects,
+ NetCIDRContains,
+ NetCIDRContainsMatches,
+ NetCIDRExpand,
+ NetCIDRMerge,
+
+ // Glob
+ GlobMatch,
+ GlobQuoteMeta,
+
+ // Units
+ UnitsParseBytes,
+
+ // UUIDs
+ UUIDRFC4122,
+
+ //SemVers
+ SemVerIsValid,
+ SemVerCompare,
+}
+
+// BuiltinMap provides a convenient mapping of built-in names to
+// built-in definitions.
+var BuiltinMap map[string]*Builtin
+
+// IgnoreDuringPartialEval is a set of built-in functions that should not be
+// evaluated during partial evaluation. These functions are not partially
+// evaluated because they are not pure.
+var IgnoreDuringPartialEval = []*Builtin{
+ NowNanos,
+ HTTPSend,
+ UUIDRFC4122,
+}
+
+/**
+ * Unification
+ */
+
+// Equality represents the "=" operator.
+var Equality = &Builtin{
+ Name: "eq",
+ Infix: "=",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+/**
+ * Assignment
+ */
+
+// Assign represents the assignment (":=") operator.
+var Assign = &Builtin{
+ Name: "assign",
+ Infix: ":=",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+/**
+ * Comparisons
+ */
+
+// GreaterThan represents the ">" comparison operator.
+var GreaterThan = &Builtin{
+ Name: "gt",
+ Infix: ">",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+// GreaterThanEq represents the ">=" comparison operator.
+var GreaterThanEq = &Builtin{
+ Name: "gte",
+ Infix: ">=",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+// LessThan represents the "<" comparison operator.
+var LessThan = &Builtin{
+ Name: "lt",
+ Infix: "<",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+// LessThanEq represents the "<=" comparison operator.
+var LessThanEq = &Builtin{
+ Name: "lte",
+ Infix: "<=",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+// NotEqual represents the "!=" comparison operator.
+var NotEqual = &Builtin{
+ Name: "neq",
+ Infix: "!=",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+// Equal represents the "==" comparison operator.
+var Equal = &Builtin{
+ Name: "equal",
+ Infix: "==",
+ Decl: types.NewFunction(
+ types.Args(types.A, types.A),
+ types.B,
+ ),
+}
+
+/**
+ * Arithmetic
+ */
+
+// Plus adds two numbers together.
+var Plus = &Builtin{
+ Name: "plus",
+ Infix: "+",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+// Minus subtracts the second number from the first number or computes the diff
+// between two sets.
+var Minus = &Builtin{
+ Name: "minus",
+ Infix: "-",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(types.N, types.NewSet(types.A)),
+ types.NewAny(types.N, types.NewSet(types.A)),
+ ),
+ types.NewAny(types.N, types.NewSet(types.A)),
+ ),
+}
+
+// Multiply multiplies two numbers together.
+var Multiply = &Builtin{
+ Name: "mul",
+ Infix: "*",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+// Divide divides the first number by the second number.
+var Divide = &Builtin{
+ Name: "div",
+ Infix: "/",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+// Round rounds the number to the nearest integer.
+var Round = &Builtin{
+ Name: "round",
+ Decl: types.NewFunction(
+ types.Args(types.N),
+ types.N,
+ ),
+}
+
+// Ceil rounds the number up to the nearest integer.
+var Ceil = &Builtin{
+ Name: "ceil",
+ Decl: types.NewFunction(
+ types.Args(types.N),
+ types.N,
+ ),
+}
+
+// Floor rounds the number down to the nearest integer.
+var Floor = &Builtin{
+ Name: "floor",
+ Decl: types.NewFunction(
+ types.Args(types.N),
+ types.N,
+ ),
+}
+
+// Abs returns the number without its sign.
+var Abs = &Builtin{
+ Name: "abs",
+ Decl: types.NewFunction(
+ types.Args(types.N),
+ types.N,
+ ),
+}
+
+// Rem returns the remainder for x%y for y != 0.
+var Rem = &Builtin{
+ Name: "rem",
+ Infix: "%",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+/**
+ * Bitwise
+ */
+
+// BitsOr returns the bitwise "or" of two integers.
+var BitsOr = &Builtin{
+ Name: "bits.or",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+// BitsAnd returns the bitwise "and" of two integers.
+var BitsAnd = &Builtin{
+ Name: "bits.and",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+// BitsNegate returns the bitwise "negation" of an integer (i.e. flips each
+// bit).
+var BitsNegate = &Builtin{
+ Name: "bits.negate",
+ Decl: types.NewFunction(
+ types.Args(types.N),
+ types.N,
+ ),
+}
+
+// BitsXOr returns the bitwise "exclusive-or" of two integers.
+var BitsXOr = &Builtin{
+ Name: "bits.xor",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+// BitsShiftLeft returns a new integer with its bits shifted some value to the
+// left.
+var BitsShiftLeft = &Builtin{
+ Name: "bits.lsh",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+// BitsShiftRight returns a new integer with its bits shifted some value to the
+// right.
+var BitsShiftRight = &Builtin{
+ Name: "bits.rsh",
+ Decl: types.NewFunction(
+ types.Args(types.N, types.N),
+ types.N,
+ ),
+}
+
+/**
+ * Sets
+ */
+
+// And performs an intersection operation on sets.
+var And = &Builtin{
+ Name: "and",
+ Infix: "&",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewSet(types.A),
+ types.NewSet(types.A),
+ ),
+ types.NewSet(types.A),
+ ),
+}
+
+// Or performs a union operation on sets.
+var Or = &Builtin{
+ Name: "or",
+ Infix: "|",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewSet(types.A),
+ types.NewSet(types.A),
+ ),
+ types.NewSet(types.A),
+ ),
+}
+
+/**
+ * Aggregates
+ */
+
+// Count takes a collection or string and counts the number of elements in it.
+var Count = &Builtin{
+ Name: "count",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewSet(types.A),
+ types.NewArray(nil, types.A),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.S,
+ ),
+ ),
+ types.N,
+ ),
+}
+
+// Sum takes an array or set of numbers and sums them.
+var Sum = &Builtin{
+ Name: "sum",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewSet(types.N),
+ types.NewArray(nil, types.N),
+ ),
+ ),
+ types.N,
+ ),
+}
+
+// Product takes an array or set of numbers and multiplies them.
+var Product = &Builtin{
+ Name: "product",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewSet(types.N),
+ types.NewArray(nil, types.N),
+ ),
+ ),
+ types.N,
+ ),
+}
+
+// Max returns the maximum value in a collection.
+var Max = &Builtin{
+ Name: "max",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewSet(types.A),
+ types.NewArray(nil, types.A),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// Min returns the minimum value in a collection.
+var Min = &Builtin{
+ Name: "min",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewSet(types.A),
+ types.NewArray(nil, types.A),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// All takes a list and returns true if all of the items
+// are true. A collection of length 0 returns true.
+var All = &Builtin{
+ Name: "all",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewSet(types.A),
+ types.NewArray(nil, types.A),
+ ),
+ ),
+ types.B,
+ ),
+}
+
+// Any takes a collection and returns true if any of the items
+// is true. A collection of length 0 returns false.
+var Any = &Builtin{
+ Name: "any",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewSet(types.A),
+ types.NewArray(nil, types.A),
+ ),
+ ),
+ types.B,
+ ),
+}
+
+/**
+ * Arrays
+ */
+
+// ArrayConcat returns the result of concatenating two arrays together.
+var ArrayConcat = &Builtin{
+ Name: "array.concat",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewArray(nil, types.A),
+ types.NewArray(nil, types.A),
+ ),
+ types.NewArray(nil, types.A),
+ ),
+}
+
+// ArraySlice returns a slice of a given array
+var ArraySlice = &Builtin{
+ Name: "array.slice",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewArray(nil, types.A),
+ types.NewNumber(),
+ types.NewNumber(),
+ ),
+ types.NewArray(nil, types.A),
+ ),
+}
+
+/**
+ * Conversions
+ */
+
+// ToNumber takes a string, bool, or number value and converts it to a number.
+// Strings are converted to numbers using strconv.Atoi.
+// Boolean false is converted to 0 and boolean true is converted to 1.
+var ToNumber = &Builtin{
+ Name: "to_number",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.N,
+ types.S,
+ types.B,
+ types.NewNull(),
+ ),
+ ),
+ types.N,
+ ),
+}
+
+/**
+ * Regular Expressions
+ */
+
+// RegexMatch takes two strings and evaluates to true if the string in the second
+// position matches the pattern in the first position.
+var RegexMatch = &Builtin{
+ Name: "regex.match",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// RegexIsValid returns true if the regex pattern string is valid, otherwise false.
+var RegexIsValid = &Builtin{
+ Name: "regex.is_valid",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// RegexFindAllStringSubmatch returns an array of all successive matches of the expression.
+// It takes two strings and a number, the pattern, the value and number of matches to
+// return, -1 means all matches.
+var RegexFindAllStringSubmatch = &Builtin{
+ Name: "regex.find_all_string_submatch_n",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ types.N,
+ ),
+ types.NewArray(nil, types.NewArray(nil, types.S)),
+ ),
+}
+
+// RegexTemplateMatch takes two strings and evaluates to true if the string in the second
+// position matches the pattern in the first position.
+var RegexTemplateMatch = &Builtin{
+ Name: "regex.template_match",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// RegexSplit splits the input string by the occurrences of the given pattern.
+var RegexSplit = &Builtin{
+ Name: "regex.split",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.NewArray(nil, types.S),
+ ),
+}
+
+// RegexFind takes two strings and a number, the pattern, the value and number of match values to
+// return, -1 means all match values.
+var RegexFind = &Builtin{
+ Name: "regex.find_n",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ types.N,
+ ),
+ types.NewArray(nil, types.S),
+ ),
+}
+
+// GlobsMatch takes two strings regexp-style strings and evaluates to true if their
+// intersection matches a non-empty set of non-empty strings.
+// Examples:
+// - "a.a." and ".b.b" -> true.
+// - "[a-z]*" and [0-9]+" -> not true.
+var GlobsMatch = &Builtin{
+ Name: "regex.globs_match",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+/**
+ * Strings
+ */
+
+// Concat joins an array of strings with an input string.
+var Concat = &Builtin{
+ Name: "concat",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.NewAny(
+ types.NewSet(types.S),
+ types.NewArray(nil, types.S),
+ ),
+ ),
+ types.S,
+ ),
+}
+
+// FormatInt returns the string representation of the number in the given base after converting it to an integer value.
+var FormatInt = &Builtin{
+ Name: "format_int",
+ Decl: types.NewFunction(
+ types.Args(
+ types.N,
+ types.N,
+ ),
+ types.S,
+ ),
+}
+
+// IndexOf returns the index of a substring contained inside a string
+var IndexOf = &Builtin{
+ Name: "indexof",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.N,
+ ),
+}
+
+// Substring returns the portion of a string for a given start index and a length.
+// If the length is less than zero, then substring returns the remainder of the string.
+var Substring = &Builtin{
+ Name: "substring",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.N,
+ types.N,
+ ),
+ types.S,
+ ),
+}
+
+// Contains returns true if the search string is included in the base string
+var Contains = &Builtin{
+ Name: "contains",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// StartsWith returns true if the search string begins with the base string
+var StartsWith = &Builtin{
+ Name: "startswith",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// EndsWith returns true if the search string begins with the base string
+var EndsWith = &Builtin{
+ Name: "endswith",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// Lower returns the input string but with all characters in lower-case
+var Lower = &Builtin{
+ Name: "lower",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// Upper returns the input string but with all characters in upper-case
+var Upper = &Builtin{
+ Name: "upper",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// Split returns an array containing elements of the input string split on a delimiter.
+var Split = &Builtin{
+ Name: "split",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.NewArray(nil, types.S),
+ ),
+}
+
+// Replace returns the given string with all instances of the second argument replaced
+// by the third.
+var Replace = &Builtin{
+ Name: "replace",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// ReplaceN replaces a string from a list of old, new string pairs.
+// Replacements are performed in the order they appear in the target string, without overlapping matches.
+// The old string comparisons are done in argument order.
+var ReplaceN = &Builtin{
+ Name: "strings.replace_n",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(
+ types.S,
+ types.S)),
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// Trim returns the given string with all leading or trailing instances of the second
+// argument removed.
+var Trim = &Builtin{
+ Name: "trim",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// TrimLeft returns the given string with all leading instances of second argument removed.
+var TrimLeft = &Builtin{
+ Name: "trim_left",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// TrimPrefix returns the given string without the second argument prefix string.
+// If the given string doesn't start with prefix, it is returned unchanged.
+var TrimPrefix = &Builtin{
+ Name: "trim_prefix",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// TrimRight returns the given string with all trailing instances of second argument removed.
+var TrimRight = &Builtin{
+ Name: "trim_right",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// TrimSuffix returns the given string without the second argument suffix string.
+// If the given string doesn't end with suffix, it is returned unchanged.
+var TrimSuffix = &Builtin{
+ Name: "trim_suffix",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// TrimSpace return the given string with all leading and trailing white space removed.
+var TrimSpace = &Builtin{
+ Name: "trim_space",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// Sprintf returns the given string, formatted.
+var Sprintf = &Builtin{
+ Name: "sprintf",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.NewArray(nil, types.A),
+ ),
+ types.S,
+ ),
+}
+
+/**
+ * Numbers
+ */
+
+// NumbersRange returns an array of numbers in the given inclusive range.
+var NumbersRange = &Builtin{
+ Name: "numbers.range",
+ Decl: types.NewFunction(
+ types.Args(
+ types.N,
+ types.N,
+ ),
+ types.NewArray(nil, types.N),
+ ),
+}
+
+/**
+ * Units
+ */
+
+// UnitsParseBytes converts strings like 10GB, 5K, 4mb, and the like into an
+// integer number of bytes.
+var UnitsParseBytes = &Builtin{
+ Name: "units.parse_bytes",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ ),
+ types.N,
+ ),
+}
+
+//
+/**
+ * Type
+ */
+
+// UUIDRFC4122 returns a version 4 UUID string.
+var UUIDRFC4122 = &Builtin{
+ Name: "uuid.rfc4122",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+/**
+ * JSON
+ */
+
+// JSONMarshal serializes the input term.
+var JSONMarshal = &Builtin{
+ Name: "json.marshal",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.S,
+ ),
+}
+
+// JSONUnmarshal deserializes the input string.
+var JSONUnmarshal = &Builtin{
+ Name: "json.unmarshal",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.A,
+ ),
+}
+
+// JSONIsValid verifies the input string is a valid JSON document.
+var JSONIsValid = &Builtin{
+ Name: "json.is_valid",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.B,
+ ),
+}
+
+// JSONFilter filters the JSON object
+var JSONFilter = &Builtin{
+ Name: "json.filter",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ types.NewAny(
+ types.NewArray(
+ nil,
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ types.NewSet(
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// JSONRemove removes paths in the JSON object
+var JSONRemove = &Builtin{
+ Name: "json.remove",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ types.NewAny(
+ types.NewArray(
+ nil,
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ types.NewSet(
+ types.NewAny(
+ types.S,
+ types.NewArray(
+ nil,
+ types.A,
+ ),
+ ),
+ ),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// JSONPatch patches a JSON object according to RFC6902
+var JSONPatch = &Builtin{
+ Name: "json.patch",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ types.NewArray(
+ nil,
+ types.NewObject(
+ []*types.StaticProperty{
+ {Key: "op", Value: types.S},
+ {Key: "path", Value: types.A},
+ },
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// ObjectGet returns takes an object and returns a value under its key if
+// present, otherwise it returns the default.
+var ObjectGet = &Builtin{
+ Name: "object.get",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.A,
+ types.A,
+ ),
+ types.A,
+ ),
+}
+
+// ObjectUnion creates a new object that is the asymmetric union of two objects
+var ObjectUnion = &Builtin{
+ Name: "object.union",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// ObjectRemove Removes specified keys from an object
+var ObjectRemove = &Builtin{
+ Name: "object.remove",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ types.NewAny(
+ types.NewArray(nil, types.A),
+ types.NewSet(types.A),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// ObjectFilter filters the object by keeping only specified keys
+var ObjectFilter = &Builtin{
+ Name: "object.filter",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(types.A, types.A),
+ ),
+ types.NewAny(
+ types.NewArray(nil, types.A),
+ types.NewSet(types.A),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ ),
+ ),
+ types.A,
+ ),
+}
+
+// Base64Encode serializes the input string into base64 encoding.
+var Base64Encode = &Builtin{
+ Name: "base64.encode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// Base64Decode deserializes the base64 encoded input string.
+var Base64Decode = &Builtin{
+ Name: "base64.decode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// Base64IsValid verifies the input string is base64 encoded.
+var Base64IsValid = &Builtin{
+ Name: "base64.is_valid",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.B,
+ ),
+}
+
+// Base64UrlEncode serializes the input string into base64url encoding.
+var Base64UrlEncode = &Builtin{
+ Name: "base64url.encode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// Base64UrlEncodeNoPad serializes the input string into base64url encoding without padding.
+var Base64UrlEncodeNoPad = &Builtin{
+ Name: "base64url.encode_no_pad",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// Base64UrlDecode deserializes the base64url encoded input string.
+var Base64UrlDecode = &Builtin{
+ Name: "base64url.decode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// URLQueryDecode decodes a URL encoded input string.
+var URLQueryDecode = &Builtin{
+ Name: "urlquery.decode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// URLQueryEncode encodes the input string into a URL encoded string.
+var URLQueryEncode = &Builtin{
+ Name: "urlquery.encode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// URLQueryEncodeObject encodes the given JSON into a URL encoded query string.
+var URLQueryEncodeObject = &Builtin{
+ Name: "urlquery.encode_object",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(
+ types.S,
+ types.NewAny(
+ types.S,
+ types.NewArray(nil, types.S),
+ types.NewSet(types.S))))),
+ types.S,
+ ),
+}
+
+// URLQueryDecodeObject decodes the given URL query string into an object.
+var URLQueryDecodeObject = &Builtin{
+ Name: "urlquery.decode_object",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.NewObject(nil, types.NewDynamicProperty(
+ types.S,
+ types.NewArray(nil, types.S))),
+ ),
+}
+
+// YAMLMarshal serializes the input term.
+var YAMLMarshal = &Builtin{
+ Name: "yaml.marshal",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.S,
+ ),
+}
+
+// YAMLUnmarshal deserializes the input string.
+var YAMLUnmarshal = &Builtin{
+ Name: "yaml.unmarshal",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.A,
+ ),
+}
+
+// YAMLIsValid verifies the input string is a valid YAML document.
+var YAMLIsValid = &Builtin{
+ Name: "yaml.is_valid",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.B,
+ ),
+}
+
+// HexEncode serializes the input string into hex encoding.
+var HexEncode = &Builtin{
+ Name: "hex.encode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// HexDecode deserializes the hex encoded input string.
+var HexDecode = &Builtin{
+ Name: "hex.decode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+/**
+ * Tokens
+ */
+
+// JWTDecode decodes a JSON Web Token and outputs it as an Object.
+var JWTDecode = &Builtin{
+ Name: "io.jwt.decode",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.NewArray([]types.Type{
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.S,
+ }, nil),
+ ),
+}
+
+// JWTVerifyRS256 verifies if a RS256 JWT signature is valid or not.
+var JWTVerifyRS256 = &Builtin{
+ Name: "io.jwt.verify_rs256",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyRS384 verifies if a RS384 JWT signature is valid or not.
+var JWTVerifyRS384 = &Builtin{
+ Name: "io.jwt.verify_rs384",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyRS512 verifies if a RS512 JWT signature is valid or not.
+var JWTVerifyRS512 = &Builtin{
+ Name: "io.jwt.verify_rs512",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyPS256 verifies if a PS256 JWT signature is valid or not.
+var JWTVerifyPS256 = &Builtin{
+ Name: "io.jwt.verify_ps256",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyPS384 verifies if a PS384 JWT signature is valid or not.
+var JWTVerifyPS384 = &Builtin{
+ Name: "io.jwt.verify_ps384",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyPS512 verifies if a PS512 JWT signature is valid or not.
+var JWTVerifyPS512 = &Builtin{
+ Name: "io.jwt.verify_ps512",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyES256 verifies if a ES256 JWT signature is valid or not.
+var JWTVerifyES256 = &Builtin{
+ Name: "io.jwt.verify_es256",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyES384 verifies if a ES384 JWT signature is valid or not.
+var JWTVerifyES384 = &Builtin{
+ Name: "io.jwt.verify_es384",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyES512 verifies if a ES512 JWT signature is valid or not.
+var JWTVerifyES512 = &Builtin{
+ Name: "io.jwt.verify_es512",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyHS256 verifies if a HS256 (secret) JWT signature is valid or not.
+var JWTVerifyHS256 = &Builtin{
+ Name: "io.jwt.verify_hs256",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyHS384 verifies if a HS384 (secret) JWT signature is valid or not.
+var JWTVerifyHS384 = &Builtin{
+ Name: "io.jwt.verify_hs384",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTVerifyHS512 verifies if a HS512 (secret) JWT signature is valid or not.
+var JWTVerifyHS512 = &Builtin{
+ Name: "io.jwt.verify_hs512",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// JWTDecodeVerify verifies a JWT signature under parameterized constraints and decodes the claims if it is valid.
+var JWTDecodeVerify = &Builtin{
+ Name: "io.jwt.decode_verify",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)),
+ ),
+ types.NewArray([]types.Type{
+ types.B,
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ }, nil),
+ ),
+}
+
+// JWTEncodeSignRaw encodes and optionally sign a JSON Web Token.
+// Inputs are protected headers, payload, secret
+var JWTEncodeSignRaw = &Builtin{
+ Name: "io.jwt.encode_sign_raw",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+// JWTEncodeSign encodes and optionally sign a JSON Web Token.
+// Inputs are protected headers, payload, secret
+var JWTEncodeSign = &Builtin{
+ Name: "io.jwt.encode_sign",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)),
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)),
+ ),
+ types.S,
+ ),
+}
+
+/**
+ * Time
+ */
+
+// NowNanos returns the current time since epoch in nanoseconds.
+var NowNanos = &Builtin{
+ Name: "time.now_ns",
+ Decl: types.NewFunction(
+ nil,
+ types.N,
+ ),
+}
+
+// ParseNanos returns the time in nanoseconds parsed from the string in the given format.
+var ParseNanos = &Builtin{
+ Name: "time.parse_ns",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.N,
+ ),
+}
+
+// ParseRFC3339Nanos returns the time in nanoseconds parsed from the string in RFC3339 format.
+var ParseRFC3339Nanos = &Builtin{
+ Name: "time.parse_rfc3339_ns",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.N,
+ ),
+}
+
+// ParseDurationNanos returns the duration in nanoseconds represented by a duration string.
+// Duration string is similar to the Go time.ParseDuration string
+var ParseDurationNanos = &Builtin{
+ Name: "time.parse_duration_ns",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.N,
+ ),
+}
+
+// Date returns the [year, month, day] for the nanoseconds since epoch.
+var Date = &Builtin{
+ Name: "time.date",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ ),
+ ),
+ types.NewArray([]types.Type{types.N, types.N, types.N}, nil),
+ ),
+}
+
+// Clock returns the [hour, minute, second] of the day for the nanoseconds since epoch.
+var Clock = &Builtin{
+ Name: "time.clock",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ ),
+ ),
+ types.NewArray([]types.Type{types.N, types.N, types.N}, nil),
+ ),
+}
+
+// Weekday returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.
+var Weekday = &Builtin{
+ Name: "time.weekday",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.N,
+ types.NewArray([]types.Type{types.N, types.S}, nil),
+ ),
+ ),
+ types.S,
+ ),
+}
+
+// AddDate returns the nanoseconds since epoch after adding years, months and days to nanoseconds.
+var AddDate = &Builtin{
+ Name: "time.add_date",
+ Decl: types.NewFunction(
+ types.Args(
+ types.N,
+ types.N,
+ types.N,
+ types.N,
+ ),
+ types.N,
+ ),
+}
+
+/**
+ * Crypto.
+ */
+
+// CryptoX509ParseCertificates returns one or more certificates from the given
+// base64 encoded string containing DER encoded certificates that have been
+// concatenated.
+var CryptoX509ParseCertificates = &Builtin{
+ Name: "crypto.x509.parse_certificates",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))),
+ ),
+}
+
+// CryptoX509ParseCertificateRequest returns a PKCS #10 certificate signing
+// request from the given PEM-encoded PKCS#10 certificate signing request.
+var CryptoX509ParseCertificateRequest = &Builtin{
+ Name: "crypto.x509.parse_certificate_request",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)),
+ ),
+}
+
+// CryptoMd5 returns a string representing the input string hashed with the md5 function
+var CryptoMd5 = &Builtin{
+ Name: "crypto.md5",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// CryptoSha1 returns a string representing the input string hashed with the sha1 function
+var CryptoSha1 = &Builtin{
+ Name: "crypto.sha1",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+// CryptoSha256 returns a string representing the input string hashed with the sha256 function
+var CryptoSha256 = &Builtin{
+ Name: "crypto.sha256",
+ Decl: types.NewFunction(
+ types.Args(types.S),
+ types.S,
+ ),
+}
+
+/**
+ * Graphs.
+ */
+
+// WalkBuiltin generates [path, value] tuples for all nested documents
+// (recursively).
+var WalkBuiltin = &Builtin{
+ Name: "walk",
+ Relation: true,
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.NewArray(
+ []types.Type{
+ types.NewArray(nil, types.A),
+ types.A,
+ },
+ nil,
+ ),
+ ),
+}
+
+// ReachableBuiltin computes the set of reachable nodes in the graph from a set
+// of starting nodes.
+var ReachableBuiltin = &Builtin{
+ Name: "graph.reachable",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(
+ nil,
+ types.NewDynamicProperty(
+ types.A,
+ types.NewAny(
+ types.NewSet(types.A),
+ types.NewArray(nil, types.A)),
+ )),
+ types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A)),
+ ),
+ types.NewSet(types.A),
+ ),
+}
+
+/**
+ * Sorting
+ */
+
+// Sort returns a sorted array.
+var Sort = &Builtin{
+ Name: "sort",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.NewArray(nil, types.A),
+ types.NewSet(types.A),
+ ),
+ ),
+ types.NewArray(nil, types.A),
+ ),
+}
+
+/**
+ * Type
+ */
+
+// IsNumber returns true if the input value is a number
+var IsNumber = &Builtin{
+ Name: "is_number",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+// IsString returns true if the input value is a string.
+var IsString = &Builtin{
+ Name: "is_string",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+// IsBoolean returns true if the input value is a boolean.
+var IsBoolean = &Builtin{
+ Name: "is_boolean",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+// IsArray returns true if the input value is an array.
+var IsArray = &Builtin{
+ Name: "is_array",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+// IsSet returns true if the input value is a set.
+var IsSet = &Builtin{
+ Name: "is_set",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+// IsObject returns true if the input value is an object.
+var IsObject = &Builtin{
+ Name: "is_object",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+// IsNull returns true if the input value is null.
+var IsNull = &Builtin{
+ Name: "is_null",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+/**
+ * Type Name
+ */
+
+// TypeNameBuiltin returns the type of the input.
+var TypeNameBuiltin = &Builtin{
+ Name: "type_name",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewAny(
+ types.A,
+ ),
+ ),
+ types.S,
+ ),
+}
+
+/**
+ * HTTP Request
+ */
+
+// HTTPSend returns a HTTP response to the given HTTP request.
+var HTTPSend = &Builtin{
+ Name: "http.send",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)),
+ ),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ ),
+}
+
+/**
+ * Rego
+ */
+
+// RegoParseModule parses the input Rego file and returns a JSON representation
+// of the AST.
+var RegoParseModule = &Builtin{
+ Name: "rego.parse_module",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)), // TODO(tsandall): import AST schema
+ ),
+}
+
+/**
+ * OPA
+ */
+
+// OPARuntime returns an object containing OPA runtime information such as the
+// configuration that OPA was booted with.
+var OPARuntime = &Builtin{
+ Name: "opa.runtime",
+ Decl: types.NewFunction(
+ nil,
+ types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)),
+ ),
+}
+
+/**
+ * Trace
+ */
+
+// Trace prints a note that is included in the query explanation.
+var Trace = &Builtin{
+ Name: "trace",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+/**
+ * Set
+ */
+
+// Intersection returns the intersection of the given input sets
+var Intersection = &Builtin{
+ Name: "intersection",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewSet(types.NewSet(types.A)),
+ ),
+ types.NewSet(types.A),
+ ),
+}
+
+// Union returns the union of the given input sets
+var Union = &Builtin{
+ Name: "union",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewSet(types.NewSet(types.A)),
+ ),
+ types.NewSet(types.A),
+ ),
+}
+
+/**
+ * Glob
+ */
+
+// GlobMatch - not to be confused with regex.globs_match - parses and matches strings against the glob notation.
+var GlobMatch = &Builtin{
+ Name: "glob.match",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.NewArray(nil, types.S),
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// GlobQuoteMeta returns a string which represents a version of the pattern where all asterisks have been escaped.
+var GlobQuoteMeta = &Builtin{
+ Name: "glob.quote_meta",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ ),
+ types.S,
+ ),
+}
+
+/**
+ * Net CIDR
+ */
+
+// NetCIDRIntersects checks if a cidr intersects with another cidr and returns true or false
+var NetCIDRIntersects = &Builtin{
+ Name: "net.cidr_intersects",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// NetCIDRExpand returns a set of hosts inside the specified cidr.
+var NetCIDRExpand = &Builtin{
+ Name: "net.cidr_expand",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ ),
+ types.NewSet(types.S),
+ ),
+}
+
+// NetCIDRContains checks if a cidr or ip is contained within another cidr and returns true or false
+var NetCIDRContains = &Builtin{
+ Name: "net.cidr_contains",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// NetCIDRContainsMatches checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches.
+var NetCIDRContainsMatches = &Builtin{
+ Name: "net.cidr_contains_matches",
+ Decl: types.NewFunction(
+ types.Args(netCidrContainsMatchesOperandType, netCidrContainsMatchesOperandType),
+ types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil)),
+ ),
+}
+
+// NetCIDRMerge merges IP addresses and subnets into the smallest possible list of CIDRs.
+var NetCIDRMerge = &Builtin{
+ Name: "net.cidr_merge",
+ Decl: types.NewFunction(
+ types.Args(netCidrMergeOperandType),
+ types.NewSet(types.S),
+ ),
+}
+
+var netCidrMergeOperandType = types.NewAny(
+ types.NewArray(nil, types.NewAny(types.S)),
+ types.NewSet(types.S),
+)
+
+var netCidrContainsMatchesOperandType = types.NewAny(
+ types.S,
+ types.NewArray(nil, types.NewAny(
+ types.S,
+ types.NewArray(nil, types.A),
+ )),
+ types.NewSet(types.NewAny(
+ types.S,
+ types.NewArray(nil, types.A),
+ )),
+ types.NewObject(nil, types.NewDynamicProperty(
+ types.S,
+ types.NewAny(
+ types.S,
+ types.NewArray(nil, types.A),
+ ),
+ )),
+)
+
+/**
+ * Semantic Versions
+ */
+
+// SemVerIsValid validiates a the term is a valid SemVer as a string, returns
+// false for all other input
+var SemVerIsValid = &Builtin{
+ Name: "semver.is_valid",
+ Decl: types.NewFunction(
+ types.Args(
+ types.A,
+ ),
+ types.B,
+ ),
+}
+
+// SemVerCompare compares valid SemVer formatted version strings. Given two
+// version strings, if A < B returns -1, if A > B returns 1. If A == B, returns
+// 0
+var SemVerCompare = &Builtin{
+ Name: "semver.compare",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.N,
+ ),
+}
+
+/**
+ * Deprecated built-ins.
+ */
+
+// SetDiff has been replaced by the minus built-in.
+var SetDiff = &Builtin{
+ Name: "set_diff",
+ Decl: types.NewFunction(
+ types.Args(
+ types.NewSet(types.A),
+ types.NewSet(types.A),
+ ),
+ types.NewSet(types.A),
+ ),
+}
+
+// NetCIDROverlap has been replaced by the `net.cidr_contains` built-in.
+var NetCIDROverlap = &Builtin{
+ Name: "net.cidr_overlap",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// CastArray checks the underlying type of the input. If it is array or set, an array
+// containing the values is returned. If it is not an array, an error is thrown.
+var CastArray = &Builtin{
+ Name: "cast_array",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.NewArray(nil, types.A),
+ ),
+}
+
+// CastSet checks the underlying type of the input.
+// If it is a set, the set is returned.
+// If it is an array, the array is returned in set form (all duplicates removed)
+// If neither, an error is thrown
+var CastSet = &Builtin{
+ Name: "cast_set",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.NewSet(types.A),
+ ),
+}
+
+// CastString returns input if it is a string; if not returns error.
+// For formatting variables, see sprintf
+var CastString = &Builtin{
+ Name: "cast_string",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.S,
+ ),
+}
+
+// CastBoolean returns input if it is a boolean; if not returns error.
+var CastBoolean = &Builtin{
+ Name: "cast_boolean",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.B,
+ ),
+}
+
+// CastNull returns null if input is null; if not returns error.
+var CastNull = &Builtin{
+ Name: "cast_null",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.NewNull(),
+ ),
+}
+
+// CastObject returns the given object if it is null; throws an error otherwise
+var CastObject = &Builtin{
+ Name: "cast_object",
+ Decl: types.NewFunction(
+ types.Args(types.A),
+ types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)),
+ ),
+}
+
+// RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead.
+var RegexMatchDeprecated = &Builtin{
+ Name: "re_match",
+ Decl: types.NewFunction(
+ types.Args(
+ types.S,
+ types.S,
+ ),
+ types.B,
+ ),
+}
+
+// Builtin represents a built-in function supported by OPA. Every built-in
+// function is uniquely identified by a name.
+type Builtin struct {
+ Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN)
+ Decl *types.Function `json:"decl"` // Built-in function type declaration.
+ Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset.
+ Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation.
+}
+
+// Expr creates a new expression for the built-in with the given operands.
+func (b *Builtin) Expr(operands ...*Term) *Expr {
+ ts := make([]*Term, len(operands)+1)
+ ts[0] = NewTerm(b.Ref())
+ for i := range operands {
+ ts[i+1] = operands[i]
+ }
+ return &Expr{
+ Terms: ts,
+ }
+}
+
+// Call creates a new term for the built-in with the given operands.
+func (b *Builtin) Call(operands ...*Term) *Term {
+ call := make(Call, len(operands)+1)
+ call[0] = NewTerm(b.Ref())
+ for i := range operands {
+ call[i+1] = operands[i]
+ }
+ return NewTerm(call)
+}
+
+// Ref returns a Ref that refers to the built-in function.
+func (b *Builtin) Ref() Ref {
+ parts := strings.Split(b.Name, ".")
+ ref := make(Ref, len(parts))
+ ref[0] = VarTerm(parts[0])
+ for i := 1; i < len(parts); i++ {
+ ref[i] = StringTerm(parts[i])
+ }
+ return ref
+}
+
+// IsTargetPos returns true if a variable in the i-th position will be bound by
+// evaluating the call expression.
+func (b *Builtin) IsTargetPos(i int) bool {
+ return len(b.Decl.Args()) == i
+}
+
+func init() {
+ BuiltinMap = map[string]*Builtin{}
+ for _, b := range DefaultBuiltins {
+ RegisterBuiltin(b)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/capabilities.go b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go
new file mode 100644
index 00000000..50765b61
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/capabilities.go
@@ -0,0 +1,43 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "io"
+ "sort"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+// Capabilities defines a structure containing data that describes the capablilities
+// or features supported by a particular version of OPA.
+type Capabilities struct {
+ Builtins []*Builtin `json:"builtins"` // builtins is a set of built-in functions that are supported.
+}
+
+// CapabilitiesForThisVersion returns the capabilities of this version of OPA.
+func CapabilitiesForThisVersion() *Capabilities {
+
+ f := &Capabilities{
+ Builtins: []*Builtin{},
+ }
+
+ for _, bi := range Builtins {
+ f.Builtins = append(f.Builtins, bi)
+ }
+
+ sort.Slice(f.Builtins, func(i, j int) bool {
+ return f.Builtins[i].Name < f.Builtins[j].Name
+ })
+
+ return f
+}
+
+// LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r.
+func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) {
+ d := util.NewJSONDecoder(r)
+ var c Capabilities
+ return &c, d.Decode(&c)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/check.go b/vendor/github.com/open-policy-agent/opa/ast/check.go
new file mode 100644
index 00000000..b77a291a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/check.go
@@ -0,0 +1,992 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/open-policy-agent/opa/types"
+ "github.com/open-policy-agent/opa/util"
+)
+
+type rewriteVars func(x Ref) Ref
+
+// exprChecker defines the interface for executing type checking on a single
+// expression. The exprChecker must update the provided TypeEnv with inferred
+// types of vars.
+type exprChecker func(*TypeEnv, *Expr) *Error
+
+// typeChecker implements type checking on queries and rules. Errors are
+// accumulated on the typeChecker so that a single run can report multiple
+// issues.
+type typeChecker struct {
+ errs Errors
+ exprCheckers map[string]exprChecker
+ varRewriter rewriteVars
+}
+
+// newTypeChecker returns a new typeChecker object that has no errors.
+func newTypeChecker() *typeChecker {
+ tc := &typeChecker{}
+ tc.exprCheckers = map[string]exprChecker{
+ "eq": tc.checkExprEq,
+ }
+ return tc
+}
+
+func (tc *typeChecker) WithVarRewriter(f rewriteVars) *typeChecker {
+ tc.varRewriter = f
+ return tc
+}
+
+// CheckBody runs type checking on the body and returns a TypeEnv if no errors
+// are found. The resulting TypeEnv wraps the provided one. The resulting
+// TypeEnv will be able to resolve types of vars contained in the body.
+func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) {
+
+ errors := []*Error{}
+
+ if env == nil {
+ env = NewTypeEnv()
+ } else {
+ env = env.wrap()
+ }
+
+ WalkExprs(body, func(expr *Expr) bool {
+
+ closureErrs := tc.checkClosures(env, expr)
+ for _, err := range closureErrs {
+ errors = append(errors, err)
+ }
+
+ hasClosureErrors := len(closureErrs) > 0
+
+ vis := newRefChecker(env, tc.varRewriter)
+ NewGenericVisitor(vis.Visit).Walk(expr)
+ for _, err := range vis.errs {
+ errors = append(errors, err)
+ }
+
+ hasRefErrors := len(vis.errs) > 0
+
+ if err := tc.checkExpr(env, expr); err != nil {
+ // Suppress this error if a more actionable one has occurred. In
+ // this case, if an error occurred in a ref or closure contained in
+ // this expression, and the error is due to a nil type, then it's
+ // likely to be the result of the more specific error.
+ skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err)
+ if !skip {
+ errors = append(errors, err)
+ }
+ }
+ return true
+ })
+
+ tc.err(errors)
+ return env, errors
+}
+
+// CheckTypes runs type checking on the rules returns a TypeEnv if no errors
+// are found. The resulting TypeEnv wraps the provided one. The resulting
+// TypeEnv will be able to resolve types of refs that refer to rules.
+func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T) (*TypeEnv, Errors) {
+ if env == nil {
+ env = NewTypeEnv()
+ } else {
+ env = env.wrap()
+ }
+ for _, s := range sorted {
+ tc.checkRule(env, s.(*Rule))
+ }
+ tc.errs.Sort()
+ return env, tc.errs
+}
+
+func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors {
+ var result Errors
+ WalkClosures(expr, func(x interface{}) bool {
+ switch x := x.(type) {
+ case *ArrayComprehension:
+ _, errs := newTypeChecker().WithVarRewriter(tc.varRewriter).CheckBody(env, x.Body)
+ if len(errs) > 0 {
+ result = errs
+ return true
+ }
+ case *SetComprehension:
+ _, errs := newTypeChecker().WithVarRewriter(tc.varRewriter).CheckBody(env, x.Body)
+ if len(errs) > 0 {
+ result = errs
+ return true
+ }
+ case *ObjectComprehension:
+ _, errs := newTypeChecker().WithVarRewriter(tc.varRewriter).CheckBody(env, x.Body)
+ if len(errs) > 0 {
+ result = errs
+ return true
+ }
+ }
+ return false
+ })
+ return result
+}
+
+func (tc *typeChecker) checkLanguageBuiltins(env *TypeEnv, builtins map[string]*Builtin) *TypeEnv {
+ if env == nil {
+ env = NewTypeEnv()
+ } else {
+ env = env.wrap()
+ }
+ for _, bi := range builtins {
+ env.tree.Put(bi.Ref(), bi.Decl)
+ }
+ return env
+}
+
+func (tc *typeChecker) checkRule(env *TypeEnv, rule *Rule) {
+
+ cpy, err := tc.CheckBody(env, rule.Body)
+
+ if len(err) == 0 {
+
+ path := rule.Path()
+ var tpe types.Type
+
+ if len(rule.Head.Args) > 0 {
+
+ // If args are not referred to in body, infer as any.
+ WalkVars(rule.Head.Args, func(v Var) bool {
+ if cpy.Get(v) == nil {
+ cpy.tree.PutOne(v, types.A)
+ }
+ return false
+ })
+
+ // Construct function type.
+ args := make([]types.Type, len(rule.Head.Args))
+ for i := 0; i < len(rule.Head.Args); i++ {
+ args[i] = cpy.Get(rule.Head.Args[i])
+ }
+
+ f := types.NewFunction(args, cpy.Get(rule.Head.Value))
+
+ // Union with existing.
+ exist := env.tree.Get(path)
+ tpe = types.Or(exist, f)
+
+ } else {
+ switch rule.Head.DocKind() {
+ case CompleteDoc:
+ typeV := cpy.Get(rule.Head.Value)
+ if typeV != nil {
+ exist := env.tree.Get(path)
+ tpe = types.Or(typeV, exist)
+ }
+ case PartialObjectDoc:
+ typeK := cpy.Get(rule.Head.Key)
+ typeV := cpy.Get(rule.Head.Value)
+ if typeK != nil && typeV != nil {
+ exist := env.tree.Get(path)
+ typeV = types.Or(types.Values(exist), typeV)
+ typeK = types.Or(types.Keys(exist), typeK)
+ tpe = types.NewObject(nil, types.NewDynamicProperty(typeK, typeV))
+ }
+ case PartialSetDoc:
+ typeK := cpy.Get(rule.Head.Key)
+ if typeK != nil {
+ exist := env.tree.Get(path)
+ typeK = types.Or(types.Keys(exist), typeK)
+ tpe = types.NewSet(typeK)
+ }
+ }
+ }
+
+ if tpe != nil {
+ env.tree.Put(path, tpe)
+ }
+ } else {
+ // if the rule/function contains an error, add it to the type env
+ // so that expressions that refer to this rule/function
+ // do not encounter type errors
+ env.tree.Put(rule.Path(), types.A)
+ }
+}
+
+func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error {
+ if !expr.IsCall() {
+ return nil
+ }
+
+ checker := tc.exprCheckers[expr.Operator().String()]
+ if checker != nil {
+ return checker(env, expr)
+ }
+
+ return tc.checkExprBuiltin(env, expr)
+}
+
+func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error {
+
+ args := expr.Operands()
+ pre := getArgTypes(env, args)
+
+ // NOTE(tsandall): undefined functions will have been caught earlier in the
+ // compiler. We check for undefined functions before the safety check so
+ // that references to non-existent functions result in undefined function
+ // errors as opposed to unsafe var errors.
+ //
+ // We cannot run type checking before the safety check because part of the
+ // type checker relies on reordering (in particular for references to local
+ // vars).
+ name := expr.Operator()
+ tpe := env.Get(name)
+
+ if tpe == nil {
+ return NewError(TypeErr, expr.Location, "undefined function %v", name)
+ }
+
+ // check if the expression refers to a function that contains an error
+ _, ok := tpe.(types.Any)
+ if ok {
+ return nil
+ }
+
+ ftpe, ok := tpe.(*types.Function)
+ if !ok {
+ return NewError(TypeErr, expr.Location, "undefined function %v", name)
+ }
+
+ maxArgs := len(ftpe.Args())
+ expArgs := ftpe.Args()
+
+ if ftpe.Result() != nil {
+ maxArgs++
+ expArgs = append(expArgs, ftpe.Result())
+ }
+
+ if len(args) > maxArgs {
+ return newArgError(expr.Location, name, "too many arguments", pre, expArgs)
+ } else if len(args) < len(ftpe.Args()) {
+ return newArgError(expr.Location, name, "too few arguments", pre, expArgs)
+ }
+
+ for i := range args {
+ if !unify1(env, args[i], expArgs[i], false) {
+ post := make([]types.Type, len(args))
+ for i := range args {
+ post[i] = env.Get(args[i])
+ }
+ return newArgError(expr.Location, name, "invalid argument(s)", post, expArgs)
+ }
+ }
+
+ return nil
+}
+
+func (tc *typeChecker) checkExprEq(env *TypeEnv, expr *Expr) *Error {
+
+ pre := getArgTypes(env, expr.Operands())
+ exp := Equality.Decl.Args()
+
+ if len(pre) < len(exp) {
+ return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, exp)
+ } else if len(exp) < len(pre) {
+ return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, exp)
+ }
+
+ a, b := expr.Operand(0), expr.Operand(1)
+ typeA, typeB := env.Get(a), env.Get(b)
+
+ if !unify2(env, a, typeA, b, typeB) {
+ err := NewError(TypeErr, expr.Location, "match error")
+ err.Details = &UnificationErrDetail{
+ Left: typeA,
+ Right: typeB,
+ }
+ return err
+ }
+
+ return nil
+}
+
+func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
+
+ nilA := types.Nil(typeA)
+ nilB := types.Nil(typeB)
+
+ if nilA && !nilB {
+ return unify1(env, a, typeB, false)
+ } else if nilB && !nilA {
+ return unify1(env, b, typeA, false)
+ } else if !nilA && !nilB {
+ return unifies(typeA, typeB)
+ }
+
+ switch a.Value.(type) {
+ case *Array:
+ return unify2Array(env, a, typeA, b, typeB)
+ case *object:
+ return unify2Object(env, a, typeA, b, typeB)
+ case Var:
+ switch b.Value.(type) {
+ case Var:
+ return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
+ case *Array:
+ return unify2Array(env, b, typeB, a, typeA)
+ case *object:
+ return unify2Object(env, b, typeB, a, typeA)
+ }
+ }
+
+ return false
+}
+
+func unify2Array(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
+ arr := a.Value.(*Array)
+ switch bv := b.Value.(type) {
+ case *Array:
+ if arr.Len() == bv.Len() {
+ for i := 0; i < arr.Len(); i++ {
+ if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) {
+ return false
+ }
+ }
+ return true
+ }
+ case Var:
+ return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
+ }
+ return false
+}
+
+func unify2Object(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool {
+ obj := a.Value.(Object)
+ switch bv := b.Value.(type) {
+ case *object:
+ cv := obj.Intersect(bv)
+ if obj.Len() == bv.Len() && bv.Len() == len(cv) {
+ for i := range cv {
+ if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) {
+ return false
+ }
+ }
+ return true
+ }
+ case Var:
+ return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false)
+ }
+ return false
+}
+
+func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool {
+ switch v := term.Value.(type) {
+ case *Array:
+ switch tpe := tpe.(type) {
+ case *types.Array:
+ return unify1Array(env, v, tpe, union)
+ case types.Any:
+ if types.Compare(tpe, types.A) == 0 {
+ for i := 0; i < v.Len(); i++ {
+ unify1(env, v.Elem(i), types.A, true)
+ }
+ return true
+ }
+ unifies := false
+ for i := range tpe {
+ unifies = unify1(env, term, tpe[i], true) || unifies
+ }
+ return unifies
+ }
+ return false
+ case *object:
+ switch tpe := tpe.(type) {
+ case *types.Object:
+ return unify1Object(env, v, tpe, union)
+ case types.Any:
+ if types.Compare(tpe, types.A) == 0 {
+ v.Foreach(func(key, value *Term) {
+ unify1(env, key, types.A, true)
+ unify1(env, value, types.A, true)
+ })
+ return true
+ }
+ unifies := false
+ for i := range tpe {
+ unifies = unify1(env, term, tpe[i], true) || unifies
+ }
+ return unifies
+ }
+ return false
+ case Set:
+ switch tpe := tpe.(type) {
+ case *types.Set:
+ return unify1Set(env, v, tpe, union)
+ case types.Any:
+ if types.Compare(tpe, types.A) == 0 {
+ v.Foreach(func(elem *Term) {
+ unify1(env, elem, types.A, true)
+ })
+ return true
+ }
+ unifies := false
+ for i := range tpe {
+ unifies = unify1(env, term, tpe[i], true) || unifies
+ }
+ return unifies
+ }
+ return false
+ case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return unifies(env.Get(v), tpe)
+ case Var:
+ if !union {
+ if exist := env.Get(v); exist != nil {
+ return unifies(exist, tpe)
+ }
+ env.tree.PutOne(term.Value, tpe)
+ } else {
+ env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe))
+ }
+ return true
+ default:
+ if !IsConstant(v) {
+ panic("unreachable")
+ }
+ return unifies(env.Get(term), tpe)
+ }
+}
+
+func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool {
+ if val.Len() != tpe.Len() && tpe.Dynamic() == nil {
+ return false
+ }
+ for i := 0; i < val.Len(); i++ {
+ if !unify1(env, val.Elem(i), tpe.Select(i), union) {
+ return false
+ }
+ }
+ return true
+}
+
+func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool {
+ if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil {
+ return false
+ }
+ stop := val.Until(func(k, v *Term) bool {
+ if IsConstant(k.Value) {
+ if child := selectConstant(tpe, k); child != nil {
+ if !unify1(env, v, child, union) {
+ return true
+ }
+ } else {
+ return true
+ }
+ } else {
+ // Inferring type of value under dynamic key would involve unioning
+ // with all property values of tpe whose keys unify. For now, type
+ // these values as Any. We can investigate stricter inference in
+ // the future.
+ unify1(env, v, types.A, union)
+ }
+ return false
+ })
+ return !stop
+}
+
+func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool {
+ of := types.Values(tpe)
+ return !val.Until(func(elem *Term) bool {
+ return !unify1(env, elem, of, union)
+ })
+}
+
+func (tc *typeChecker) err(errors []*Error) {
+ tc.errs = append(tc.errs, errors...)
+}
+
+type refChecker struct {
+ env *TypeEnv
+ errs Errors
+ varRewriter rewriteVars
+}
+
+func newRefChecker(env *TypeEnv, f rewriteVars) *refChecker {
+
+ if f == nil {
+ f = rewriteVarsNop
+ }
+
+ return &refChecker{
+ env: env,
+ errs: nil,
+ varRewriter: f,
+ }
+}
+
+func (rc *refChecker) Visit(x interface{}) bool {
+ switch x := x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return true
+ case *Expr:
+ switch terms := x.Terms.(type) {
+ case []*Term:
+ for i := 1; i < len(terms); i++ {
+ NewGenericVisitor(rc.Visit).Walk(terms[i])
+ }
+ return true
+ case *Term:
+ NewGenericVisitor(rc.Visit).Walk(terms)
+ return true
+ }
+ case Ref:
+ if err := rc.checkApply(rc.env, x); err != nil {
+ rc.errs = append(rc.errs, err)
+ return true
+ }
+ if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil {
+ rc.errs = append(rc.errs, err)
+ }
+ }
+ return false
+}
+
+func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error {
+ if tpe := curr.Get(ref); tpe != nil {
+ if _, ok := tpe.(*types.Function); ok {
+ return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe)
+ }
+ }
+ return nil
+}
+
+func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error {
+
+ if idx == len(ref) {
+ return nil
+ }
+
+ head := ref[idx]
+
+ // Handle constant ref operands, i.e., strings or the ref head.
+ if _, ok := head.Value.(String); ok || idx == 0 {
+
+ child := node.Child(head.Value)
+ if child == nil {
+
+ if curr.next != nil {
+ next := curr.next
+ return rc.checkRef(next, next.tree, ref, 0)
+ }
+
+ if RootDocumentNames.Contains(ref[0]) {
+ return rc.checkRefLeaf(types.A, ref, 1)
+ }
+
+ return rc.checkRefLeaf(types.A, ref, 0)
+ }
+
+ if child.Leaf() {
+ return rc.checkRefLeaf(child.Value(), ref, idx+1)
+ }
+
+ return rc.checkRef(curr, child, ref, idx+1)
+ }
+
+ // Handle dynamic ref operands.
+ switch value := head.Value.(type) {
+
+ case Var:
+
+ if exist := rc.env.Get(value); exist != nil {
+ if !unifies(types.S, exist) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, types.S, getOneOfForNode(node))
+ }
+ } else {
+ rc.env.tree.PutOne(value, types.S)
+ }
+
+ case Ref:
+
+ exist := rc.env.Get(value)
+ if exist == nil {
+ // If ref type is unknown, an error will already be reported so
+ // stop here.
+ return nil
+ }
+
+ if !unifies(types.S, exist) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, types.S, getOneOfForNode(node))
+ }
+
+ // Catch other ref operand types here. Non-leaf nodes must be referred to
+ // with string values.
+ default:
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.S, getOneOfForNode(node))
+ }
+
+ // Run checking on remaining portion of the ref. Note, since the ref
+ // potentially refers to data for which no type information exists,
+ // checking should never fail.
+ node.Children().Iter(func(_, child util.T) bool {
+ rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1)
+ return false
+ })
+
+ return nil
+}
+
+func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error {
+
+ if idx == len(ref) {
+ return nil
+ }
+
+ head := ref[idx]
+
+ keys := types.Keys(tpe)
+ if keys == nil {
+ return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe)
+ }
+
+ switch value := head.Value.(type) {
+
+ case Var:
+ if exist := rc.env.Get(value); exist != nil {
+ if !unifies(exist, keys) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
+ }
+ } else {
+ rc.env.tree.PutOne(value, types.Keys(tpe))
+ }
+
+ case Ref:
+ if exist := rc.env.Get(value); exist != nil {
+ if !unifies(exist, keys) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe))
+ }
+ }
+
+ case *Array, Object, Set:
+ if !unify1(rc.env, head, keys, false) {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil)
+ }
+
+ default:
+ child := selectConstant(tpe, head)
+ if child == nil {
+ return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe))
+ }
+ return rc.checkRefLeaf(child, ref, idx+1)
+ }
+
+ return rc.checkRefLeaf(types.Values(tpe), ref, idx+1)
+}
+
+func unifies(a, b types.Type) bool {
+
+ if a == nil || b == nil {
+ return false
+ }
+
+ anyA, ok1 := a.(types.Any)
+ if ok1 {
+ if unifiesAny(anyA, b) {
+ return true
+ }
+ }
+
+ anyB, ok2 := b.(types.Any)
+ if ok2 {
+ if unifiesAny(anyB, a) {
+ return true
+ }
+ }
+
+ if ok1 || ok2 {
+ return false
+ }
+
+ switch a := a.(type) {
+ case types.Null:
+ _, ok := b.(types.Null)
+ return ok
+ case types.Boolean:
+ _, ok := b.(types.Boolean)
+ return ok
+ case types.Number:
+ _, ok := b.(types.Number)
+ return ok
+ case types.String:
+ _, ok := b.(types.String)
+ return ok
+ case *types.Array:
+ b, ok := b.(*types.Array)
+ if !ok {
+ return false
+ }
+ return unifiesArrays(a, b)
+ case *types.Object:
+ b, ok := b.(*types.Object)
+ if !ok {
+ return false
+ }
+ return unifiesObjects(a, b)
+ case *types.Set:
+ b, ok := b.(*types.Set)
+ if !ok {
+ return false
+ }
+ return unifies(types.Values(a), types.Values(b))
+ case *types.Function:
+ // TODO(tsandall): revisit once functions become first-class values.
+ return false
+ default:
+ panic("unreachable")
+ }
+}
+
+func unifiesAny(a types.Any, b types.Type) bool {
+ if _, ok := b.(*types.Function); ok {
+ return false
+ }
+ for i := range a {
+ if unifies(a[i], b) {
+ return true
+ }
+ }
+ return len(a) == 0
+}
+
+func unifiesArrays(a, b *types.Array) bool {
+
+ if !unifiesArraysStatic(a, b) {
+ return false
+ }
+
+ if !unifiesArraysStatic(b, a) {
+ return false
+ }
+
+ return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic())
+}
+
+func unifiesArraysStatic(a, b *types.Array) bool {
+ if a.Len() != 0 {
+ for i := 0; i < a.Len(); i++ {
+ if !unifies(a.Select(i), b.Select(i)) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func unifiesObjects(a, b *types.Object) bool {
+ if !unifiesObjectsStatic(a, b) {
+ return false
+ }
+
+ if !unifiesObjectsStatic(b, a) {
+ return false
+ }
+
+ return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue())
+}
+
+func unifiesObjectsStatic(a, b *types.Object) bool {
+ for _, k := range a.Keys() {
+ if !unifies(a.Select(k), b.Select(k)) {
+ return false
+ }
+ }
+ return true
+}
+
+// typeErrorCause defines an interface to determine the reason for a type
+// error. The type error details implement this interface so that type checking
+// can report more actionable errors.
+type typeErrorCause interface {
+ nilType() bool
+}
+
+func causedByNilType(err *Error) bool {
+ cause, ok := err.Details.(typeErrorCause)
+ if !ok {
+ return false
+ }
+ return cause.nilType()
+}
+
+// ArgErrDetail represents a generic argument error.
+type ArgErrDetail struct {
+ Have []types.Type `json:"have"`
+ Want []types.Type `json:"want"`
+}
+
+// Lines returns the string representation of the detail.
+func (d *ArgErrDetail) Lines() []string {
+ lines := make([]string, 2)
+ lines[0] = fmt.Sprint("have: ", formatArgs(d.Have))
+ lines[1] = fmt.Sprint("want: ", formatArgs(d.Want))
+ return lines
+}
+
+func (d *ArgErrDetail) nilType() bool {
+ for i := range d.Have {
+ if types.Nil(d.Have[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+// UnificationErrDetail describes a type mismatch error when two values are
+// unified (e.g., x = [1,2,y]).
+type UnificationErrDetail struct {
+ Left types.Type `json:"a"`
+ Right types.Type `json:"b"`
+}
+
+func (a *UnificationErrDetail) nilType() bool {
+ return types.Nil(a.Left) || types.Nil(a.Right)
+}
+
+// Lines returns the string representation of the detail.
+func (a *UnificationErrDetail) Lines() []string {
+ lines := make([]string, 2)
+ lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left))
+ lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right))
+ return lines
+}
+
+// RefErrUnsupportedDetail describes an undefined reference error where the
+// referenced value does not support dereferencing (e.g., scalars).
+type RefErrUnsupportedDetail struct {
+ Ref Ref `json:"ref"` // invalid ref
+ Pos int `json:"pos"` // invalid element
+ Have types.Type `json:"have"` // referenced type
+}
+
+// Lines returns the string representation of the detail.
+func (r *RefErrUnsupportedDetail) Lines() []string {
+ lines := []string{
+ r.Ref.String(),
+ strings.Repeat("^", len(r.Ref[:r.Pos+1].String())),
+ fmt.Sprintf("have: %v", r.Have),
+ }
+ return lines
+}
+
+// RefErrInvalidDetail describes an undefined reference error where the referenced
+// value does not support the reference operand (e.g., missing object key,
+// invalid key type, etc.)
+type RefErrInvalidDetail struct {
+ Ref Ref `json:"ref"` // invalid ref
+ Pos int `json:"pos"` // invalid element
+ Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements)
+ Want types.Type `json:"want"` // allowed type (for non-object values)
+ OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys)
+}
+
+// Lines returns the string representation of the detail.
+func (r *RefErrInvalidDetail) Lines() []string {
+ lines := []string{r.Ref.String()}
+ offset := len(r.Ref[:r.Pos].String()) + 1
+ pad := strings.Repeat(" ", offset)
+ lines = append(lines, fmt.Sprintf("%s^", pad))
+ if r.Have != nil {
+ lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have))
+ } else {
+ lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos]))
+ }
+ if len(r.OneOf) > 0 {
+ lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf))
+ } else {
+ lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want))
+ }
+ return lines
+}
+
+func formatArgs(args []types.Type) string {
+ buf := make([]string, len(args))
+ for i := range args {
+ buf[i] = types.Sprint(args[i])
+ }
+ return "(" + strings.Join(buf, ", ") + ")"
+}
+
+func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error {
+ err := newRefError(loc, ref)
+ err.Details = &RefErrInvalidDetail{
+ Ref: ref,
+ Pos: idx,
+ Have: have,
+ Want: want,
+ OneOf: oneOf,
+ }
+ return err
+}
+
+func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error {
+ err := newRefError(loc, ref)
+ err.Details = &RefErrUnsupportedDetail{
+ Ref: ref,
+ Pos: idx,
+ Have: have,
+ }
+ return err
+}
+
+func newRefError(loc *Location, ref Ref) *Error {
+ return NewError(TypeErr, loc, "undefined ref: %v", ref)
+}
+
+func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want []types.Type) *Error {
+ err := NewError(TypeErr, loc, "%v: %v", builtinName, msg)
+ err.Details = &ArgErrDetail{
+ Have: have,
+ Want: want,
+ }
+ return err
+}
+
+func getOneOfForNode(node *typeTreeNode) (result []Value) {
+ node.Children().Iter(func(k, _ util.T) bool {
+ result = append(result, k.(Value))
+ return false
+ })
+
+ sortValueSlice(result)
+ return result
+}
+
+func getOneOfForType(tpe types.Type) (result []Value) {
+ switch tpe := tpe.(type) {
+ case *types.Object:
+ for _, k := range tpe.Keys() {
+ v, err := InterfaceToValue(k)
+ if err != nil {
+ panic(err)
+ }
+ result = append(result, v)
+ }
+ }
+ sortValueSlice(result)
+ return result
+}
+
+func sortValueSlice(sl []Value) {
+ sort.Slice(sl, func(i, j int) bool {
+ return sl[i].Compare(sl[j]) < 0
+ })
+}
+
+func getArgTypes(env *TypeEnv, args []*Term) []types.Type {
+ pre := make([]types.Type, len(args))
+ for i := range args {
+ pre[i] = env.Get(args[i])
+ }
+ return pre
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compare.go b/vendor/github.com/open-policy-agent/opa/ast/compare.go
new file mode 100644
index 00000000..627ca240
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/compare.go
@@ -0,0 +1,327 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/big"
+)
+
+// Compare returns an integer indicating whether two AST values are less than,
+// equal to, or greater than each other.
+//
+// If a is less than b, the return value is negative. If a is greater than b,
+// the return value is positive. If a is equal to b, the return value is zero.
+//
+// Different types are never equal to each other. For comparison purposes, types
+// are sorted as follows:
+//
+// nil < Null < Boolean < Number < String < Var < Ref < Array < Object < Set <
+// ArrayComprehension < ObjectComprehension < SetComprehension < Expr < SomeDecl
+// < With < Body < Rule < Import < Package < Module.
+//
+// Arrays and Refs are equal iff both a and b have the same length and all
+// corresponding elements are equal. If one element is not equal, the return
+// value is the same as for the first differing element. If all elements are
+// equal but a and b have different lengths, the shorter is considered less than
+// the other.
+//
+// Objects are considered equal iff both a and b have the same sorted (key,
+// value) pairs and are of the same length. Other comparisons are consistent but
+// not defined.
+//
+// Sets are considered equal iff the symmetric difference of a and b is empty.
+// Other comparisons are consistent but not defined.
+func Compare(a, b interface{}) int {
+
+ if t, ok := a.(*Term); ok {
+ if t == nil {
+ a = nil
+ } else {
+ a = t.Value
+ }
+ }
+
+ if t, ok := b.(*Term); ok {
+ if t == nil {
+ b = nil
+ } else {
+ b = t.Value
+ }
+ }
+
+ if a == nil {
+ if b == nil {
+ return 0
+ }
+ return -1
+ }
+ if b == nil {
+ return 1
+ }
+
+ sortA := sortOrder(a)
+ sortB := sortOrder(b)
+
+ if sortA < sortB {
+ return -1
+ } else if sortB < sortA {
+ return 1
+ }
+
+ switch a := a.(type) {
+ case Null:
+ return 0
+ case Boolean:
+ b := b.(Boolean)
+ if a.Equal(b) {
+ return 0
+ }
+ if !a {
+ return -1
+ }
+ return 1
+ case Number:
+ if ai, err := json.Number(a).Int64(); err == nil {
+ if bi, err := json.Number(b.(Number)).Int64(); err == nil {
+ if ai == bi {
+ return 0
+ }
+ if ai < bi {
+ return -1
+ }
+ return 1
+ }
+ }
+
+ bigA, ok := new(big.Float).SetString(string(a))
+ if !ok {
+ panic("illegal value")
+ }
+ bigB, ok := new(big.Float).SetString(string(b.(Number)))
+ if !ok {
+ panic("illegal value")
+ }
+ return bigA.Cmp(bigB)
+ case String:
+ b := b.(String)
+ if a.Equal(b) {
+ return 0
+ }
+ if a < b {
+ return -1
+ }
+ return 1
+ case Var:
+ b := b.(Var)
+ if a.Equal(b) {
+ return 0
+ }
+ if a < b {
+ return -1
+ }
+ return 1
+ case Ref:
+ b := b.(Ref)
+ return termSliceCompare(a, b)
+ case *Array:
+ b := b.(*Array)
+ return termSliceCompare(a.elems, b.elems)
+ case *object:
+ b := b.(*object)
+ return a.Compare(b)
+ case Set:
+ b := b.(Set)
+ return a.Compare(b)
+ case *ArrayComprehension:
+ b := b.(*ArrayComprehension)
+ if cmp := Compare(a.Term, b.Term); cmp != 0 {
+ return cmp
+ }
+ return Compare(a.Body, b.Body)
+ case *ObjectComprehension:
+ b := b.(*ObjectComprehension)
+ if cmp := Compare(a.Key, b.Key); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(a.Value, b.Value); cmp != 0 {
+ return cmp
+ }
+ return Compare(a.Body, b.Body)
+ case *SetComprehension:
+ b := b.(*SetComprehension)
+ if cmp := Compare(a.Term, b.Term); cmp != 0 {
+ return cmp
+ }
+ return Compare(a.Body, b.Body)
+ case Call:
+ b := b.(Call)
+ return termSliceCompare(a, b)
+ case *Expr:
+ b := b.(*Expr)
+ return a.Compare(b)
+ case *SomeDecl:
+ b := b.(*SomeDecl)
+ return a.Compare(b)
+ case *With:
+ b := b.(*With)
+ return a.Compare(b)
+ case Body:
+ b := b.(Body)
+ return a.Compare(b)
+ case *Head:
+ b := b.(*Head)
+ return a.Compare(b)
+ case *Rule:
+ b := b.(*Rule)
+ return a.Compare(b)
+ case Args:
+ b := b.(Args)
+ return termSliceCompare(a, b)
+ case *Import:
+ b := b.(*Import)
+ return a.Compare(b)
+ case *Package:
+ b := b.(*Package)
+ return a.Compare(b)
+ case *Module:
+ b := b.(*Module)
+ return a.Compare(b)
+ }
+ panic(fmt.Sprintf("illegal value: %T", a))
+}
+
+type termSlice []*Term
+
+func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 }
+func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
+func (s termSlice) Len() int { return len(s) }
+
+func sortOrder(x interface{}) int {
+ switch x.(type) {
+ case Null:
+ return 0
+ case Boolean:
+ return 1
+ case Number:
+ return 2
+ case String:
+ return 3
+ case Var:
+ return 4
+ case Ref:
+ return 5
+ case *Array:
+ return 6
+ case Object:
+ return 7
+ case Set:
+ return 8
+ case *ArrayComprehension:
+ return 9
+ case *ObjectComprehension:
+ return 10
+ case *SetComprehension:
+ return 11
+ case Call:
+ return 12
+ case Args:
+ return 13
+ case *Expr:
+ return 100
+ case *SomeDecl:
+ return 101
+ case *With:
+ return 110
+ case *Head:
+ return 120
+ case Body:
+ return 200
+ case *Rule:
+ return 1000
+ case *Import:
+ return 1001
+ case *Package:
+ return 1002
+ case *Module:
+ return 10000
+ }
+ panic(fmt.Sprintf("illegal value: %T", x))
+}
+
+func importsCompare(a, b []*Import) int {
+ minLen := len(a)
+ if len(b) < minLen {
+ minLen = len(b)
+ }
+ for i := 0; i < minLen; i++ {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func rulesCompare(a, b []*Rule) int {
+ minLen := len(a)
+ if len(b) < minLen {
+ minLen = len(b)
+ }
+ for i := 0; i < minLen; i++ {
+ if cmp := a[i].Compare(b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ }
+ if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func termSliceCompare(a, b []*Term) int {
+ minLen := len(a)
+ if len(b) < minLen {
+ minLen = len(b)
+ }
+ for i := 0; i < minLen; i++ {
+ if cmp := Compare(a[i], b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ } else if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func withSliceCompare(a, b []*With) int {
+ minLen := len(a)
+ if len(b) < minLen {
+ minLen = len(b)
+ }
+ for i := 0; i < minLen; i++ {
+ if cmp := Compare(a[i], b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ } else if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compile.go b/vendor/github.com/open-policy-agent/opa/ast/compile.go
new file mode 100644
index 00000000..74c3f805
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/compile.go
@@ -0,0 +1,3797 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/util"
+)
+
+// CompileErrorLimitDefault is the default number errors a compiler will allow before
+// exiting.
+const CompileErrorLimitDefault = 10
+
+var errLimitReached = NewError(CompileErr, nil, "error limit reached")
+
+// Compiler contains the state of a compilation process.
+type Compiler struct {
+
+ // Errors contains errors that occurred during the compilation process.
+ // If there are one or more errors, the compilation process is considered
+ // "failed".
+ Errors Errors
+
+ // Modules contains the compiled modules. The compiled modules are the
+ // output of the compilation process. If the compilation process failed,
+ // there is no guarantee about the state of the modules.
+ Modules map[string]*Module
+
+ // ModuleTree organizes the modules into a tree where each node is keyed by
+ // an element in the module's package path. E.g., given modules containing
+ // the following package directives: "a", "a.b", "a.c", and "a.b", the
+ // resulting module tree would be:
+ //
+ // root
+ // |
+ // +--- data (no modules)
+ // |
+ // +--- a (1 module)
+ // |
+ // +--- b (2 modules)
+ // |
+ // +--- c (1 module)
+ //
+ ModuleTree *ModuleTreeNode
+
+ // RuleTree organizes rules into a tree where each node is keyed by an
+ // element in the rule's path. The rule path is the concatenation of the
+ // containing package and the stringified rule name. E.g., given the
+ // following module:
+ //
+ // package ex
+ // p[1] { true }
+ // p[2] { true }
+ // q = true
+ //
+ // root
+ // |
+ // +--- data (no rules)
+ // |
+ // +--- ex (no rules)
+ // |
+ // +--- p (2 rules)
+ // |
+ // +--- q (1 rule)
+ RuleTree *TreeNode
+
+ // Graph contains dependencies between rules. An edge (u,v) is added to the
+ // graph if rule 'u' refers to the virtual document defined by 'v'.
+ Graph *Graph
+
+ // TypeEnv holds type information for values inferred by the compiler.
+ TypeEnv *TypeEnv
+
+ // RewrittenVars is a mapping of variables that have been rewritten
+ // with the key being the generated name and value being the original.
+ RewrittenVars map[Var]Var
+
+ localvargen *localVarGenerator
+ moduleLoader ModuleLoader
+ ruleIndices *util.HashMap
+ stages []struct {
+ name string
+ metricName string
+ f func()
+ }
+ maxErrs int
+ sorted []string // list of sorted module names
+ pathExists func([]string) (bool, error)
+ after map[string][]CompilerStageDefinition
+ metrics metrics.Metrics
+ capabilities *Capabilities // user-supplied capabilities
+ builtins map[string]*Builtin // universe of built-in functions
+ customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities)
+ unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities)
+ comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index
+ initialized bool // indicates if init() has been called
+}
+
+// CompilerStage defines the interface for stages in the compiler.
+type CompilerStage func(*Compiler) *Error
+
+// CompilerStageDefinition defines a compiler stage
+type CompilerStageDefinition struct {
+ Name string
+ MetricName string
+ Stage CompilerStage
+}
+
+// QueryContext contains contextual information for running an ad-hoc query.
+//
+// Ad-hoc queries can be run in the context of a package and imports may be
+// included to provide concise access to data.
+type QueryContext struct {
+ Package *Package
+ Imports []*Import
+}
+
+// NewQueryContext returns a new QueryContext object.
+func NewQueryContext() *QueryContext {
+ return &QueryContext{}
+}
+
+// WithPackage sets the pkg on qc.
+func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext {
+ if qc == nil {
+ qc = NewQueryContext()
+ }
+ qc.Package = pkg
+ return qc
+}
+
+// WithImports sets the imports on qc.
+func (qc *QueryContext) WithImports(imports []*Import) *QueryContext {
+ if qc == nil {
+ qc = NewQueryContext()
+ }
+ qc.Imports = imports
+ return qc
+}
+
+// Copy returns a deep copy of qc.
+func (qc *QueryContext) Copy() *QueryContext {
+ if qc == nil {
+ return nil
+ }
+ cpy := *qc
+ if cpy.Package != nil {
+ cpy.Package = qc.Package.Copy()
+ }
+ cpy.Imports = make([]*Import, len(qc.Imports))
+ for i := range qc.Imports {
+ cpy.Imports[i] = qc.Imports[i].Copy()
+ }
+ return &cpy
+}
+
+// QueryCompiler defines the interface for compiling ad-hoc queries.
+type QueryCompiler interface {
+
+ // Compile should be called to compile ad-hoc queries. The return value is
+ // the compiled version of the query.
+ Compile(q Body) (Body, error)
+
+ // TypeEnv returns the type environment built after running type checking
+ // on the query.
+ TypeEnv() *TypeEnv
+
+ // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls
+ // to Compile will take the QueryContext into account.
+ WithContext(qctx *QueryContext) QueryCompiler
+
+ // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not
+ // allow inside of queries. By default the query compiler inherits the
+ // compiler's unsafe built-in functions. This function allows callers to
+ // override that set. If an empty (non-nil) map is provided, all built-ins
+ // are allowed.
+ WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler
+
+ // WithStageAfter registers a stage to run during query compilation after
+ // the named stage.
+ WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler
+
+ // RewrittenVars maps generated vars in the compiled query to vars from the
+ // parsed query. For example, given the query "input := 1" the rewritten
+ // query would be "__local0__ = 1". The mapping would then be {__local0__: input}.
+ RewrittenVars() map[Var]Var
+
+ // ComprehensionIndex returns an index data structure for the given comprehension
+ // term. If no index is found, returns nil.
+ ComprehensionIndex(term *Term) *ComprehensionIndex
+}
+
+// QueryCompilerStage defines the interface for stages in the query compiler.
+type QueryCompilerStage func(QueryCompiler, Body) (Body, error)
+
+// QueryCompilerStageDefinition defines a QueryCompiler stage
+type QueryCompilerStageDefinition struct {
+ Name string
+ MetricName string
+ Stage QueryCompilerStage
+}
+
+const compileStageMetricPrefex = "ast_compile_stage_"
+
+// NewCompiler returns a new empty compiler.
+func NewCompiler() *Compiler {
+
+ c := &Compiler{
+ Modules: map[string]*Module{},
+ TypeEnv: NewTypeEnv(),
+ RewrittenVars: map[Var]Var{},
+ ruleIndices: util.NewHashMap(func(a, b util.T) bool {
+ r1, r2 := a.(Ref), b.(Ref)
+ return r1.Equal(r2)
+ }, func(x util.T) int {
+ return x.(Ref).Hash()
+ }),
+ maxErrs: CompileErrorLimitDefault,
+ after: map[string][]CompilerStageDefinition{},
+ unsafeBuiltinsMap: map[string]struct{}{},
+ comprehensionIndices: map[*Term]*ComprehensionIndex{},
+ }
+
+ c.ModuleTree = NewModuleTree(nil)
+ c.RuleTree = NewRuleTree(c.ModuleTree)
+
+ c.stages = []struct {
+ name string
+ metricName string
+ f func()
+ }{
+ // Reference resolution should run first as it may be used to lazily
+ // load additional modules. If any stages run before resolution, they
+ // need to be re-run after resolution.
+ {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs},
+ {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree},
+ {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree},
+ // The local variable generator must be initialized after references are
+ // resolved and the dynamic module loader has run but before subsequent
+ // stages that need to generate variables.
+ {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen},
+ {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars},
+ {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms},
+ {"SetGraph", "compile_stage_set_graph", c.setGraph},
+ {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms},
+ {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead},
+ {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers},
+ {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts},
+ {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs},
+ {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads},
+ {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies},
+ {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals},
+ {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms},
+ {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion},
+ {"CheckTypes", "compile_stage_check_types", c.checkTypes},
+ {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins},
+ {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices},
+ {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices},
+ }
+
+ return c
+}
+
+// SetErrorLimit sets the number of errors the compiler can encounter before it
+// quits. Zero or a negative number indicates no limit.
+func (c *Compiler) SetErrorLimit(limit int) *Compiler {
+ c.maxErrs = limit
+ return c
+}
+
+// WithPathConflictsCheck enables base-virtual document conflict
+// detection. The compiler will check that rules don't overlap with
+// paths that exist as determined by the provided callable.
+func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler {
+ c.pathExists = fn
+ return c
+}
+
+// WithStageAfter registers a stage to run during compilation after
+// the named stage.
+func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler {
+ c.after[after] = append(c.after[after], stage)
+ return c
+}
+
+// WithMetrics will set a metrics.Metrics and be used for profiling
+// the Compiler instance.
+func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler {
+ c.metrics = metrics
+ return c
+}
+
+// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller
+// to specify the set of built-in functions available to the policy. In the future, capabilities
+// may be able to restrict access to other language features. Capabilities allow callers to check
+// if policies are compatible with a particular version of OPA. If policies are a compiled for a
+// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them
+// successfully.
+func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler {
+ c.capabilities = capabilities
+ return c
+}
+
+// WithBuiltins is deprecated. Use WithCapabilities instead.
+func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler {
+ c.customBuiltins = make(map[string]*Builtin)
+ for k, v := range builtins {
+ c.customBuiltins[k] = v
+ }
+ return c
+}
+
+// WithUnsafeBuiltins is deprecated. Use WithCapabilities instead.
+func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler {
+ for name := range unsafeBuiltins {
+ c.unsafeBuiltinsMap[name] = struct{}{}
+ }
+ return c
+}
+
+// QueryCompiler returns a new QueryCompiler object.
+func (c *Compiler) QueryCompiler() QueryCompiler {
+ c.init()
+ return newQueryCompiler(c)
+}
+
+// Compile runs the compilation process on the input modules. The compiled
+// version of the modules and associated data structures are stored on the
+// compiler. If the compilation process fails for any reason, the compiler will
+// contain a slice of errors.
+func (c *Compiler) Compile(modules map[string]*Module) {
+
+ c.init()
+
+ c.Modules = make(map[string]*Module, len(modules))
+
+ for k, v := range modules {
+ c.Modules[k] = v.Copy()
+ c.sorted = append(c.sorted, k)
+ }
+
+ sort.Strings(c.sorted)
+
+ c.compile()
+}
+
+// Failed returns true if a compilation error has been encountered.
+func (c *Compiler) Failed() bool {
+ return len(c.Errors) > 0
+}
+
+// ComprehensionIndex returns a data structure specifying how to index comprehension
+// results so that callers do not have to recompute the comprehension more than once.
+// If no index is found, returns nil.
+func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex {
+ return c.comprehensionIndices[term]
+}
+
+// GetArity returns the number of args a function referred to by ref takes. If
+// ref refers to built-in function, the built-in declaration is consulted,
+// otherwise, the ref is used to perform a ruleset lookup.
+func (c *Compiler) GetArity(ref Ref) int {
+ if bi := c.builtins[ref.String()]; bi != nil {
+ return len(bi.Decl.Args())
+ }
+ rules := c.GetRulesExact(ref)
+ if len(rules) == 0 {
+ return -1
+ }
+ return len(rules[0].Head.Args)
+}
+
+// GetRulesExact returns a slice of rules referred to by the reference.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[k] = v { ... } # rule1
+// p[k1] = v1 { ... } # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesExact("data.a.b.c.p") => [rule1, rule2]
+// GetRulesExact("data.a.b.c.p.x") => nil
+// GetRulesExact("data.a.b.c") => nil
+func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) {
+ node := c.RuleTree
+
+ for _, x := range ref {
+ if node = node.Child(x.Value); node == nil {
+ return nil
+ }
+ }
+
+ return extractRules(node.Values)
+}
+
+// GetRulesForVirtualDocument returns a slice of rules that produce the virtual
+// document referred to by the reference.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[k] = v { ... } # rule1
+// p[k1] = v1 { ... } # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2]
+// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2]
+// GetRulesForVirtualDocument("data.a.b.c") => nil
+func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) {
+
+ node := c.RuleTree
+
+ for _, x := range ref {
+ if node = node.Child(x.Value); node == nil {
+ return nil
+ }
+ if len(node.Values) > 0 {
+ return extractRules(node.Values)
+ }
+ }
+
+ return extractRules(node.Values)
+}
+
+// GetRulesWithPrefix returns a slice of rules that share the prefix ref.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[x] = y { ... } # rule1
+// p[k] = v { ... } # rule2
+// q { ... } # rule3
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2]
+// GetRulesWithPrefix("data.a.b.c.p.a") => nil
+// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3]
+func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) {
+
+ node := c.RuleTree
+
+ for _, x := range ref {
+ if node = node.Child(x.Value); node == nil {
+ return nil
+ }
+ }
+
+ var acc func(node *TreeNode)
+
+ acc = func(node *TreeNode) {
+ rules = append(rules, extractRules(node.Values)...)
+ for _, child := range node.Children {
+ if child.Hide {
+ continue
+ }
+ acc(child)
+ }
+ }
+
+ acc(node)
+
+ return rules
+}
+
+func extractRules(s []util.T) (rules []*Rule) {
+ for _, r := range s {
+ rules = append(rules, r.(*Rule))
+ }
+ return rules
+}
+
+// GetRules returns a slice of rules that are referred to by ref.
+//
+// E.g., given the following module:
+//
+// package a.b.c
+//
+// p[x] = y { q[x] = y; ... } # rule1
+// q[x] = y { ... } # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRules("data.a.b.c.p") => [rule1]
+// GetRules("data.a.b.c.p.x") => [rule1]
+// GetRules("data.a.b.c.q") => [rule2]
+// GetRules("data.a.b.c") => [rule1, rule2]
+// GetRules("data.a.b.d") => nil
+func (c *Compiler) GetRules(ref Ref) (rules []*Rule) {
+
+ set := map[*Rule]struct{}{}
+
+ for _, rule := range c.GetRulesForVirtualDocument(ref) {
+ set[rule] = struct{}{}
+ }
+
+ for _, rule := range c.GetRulesWithPrefix(ref) {
+ set[rule] = struct{}{}
+ }
+
+ for rule := range set {
+ rules = append(rules, rule)
+ }
+
+ return rules
+}
+
+// GetRulesDynamic returns a slice of rules that could be referred to by a ref.
+// When parts of the ref are statically known, we use that information to narrow
+// down which rules the ref could refer to, but in the most general case this
+// will be an over-approximation.
+//
+// E.g., given the following modules:
+//
+// package a.b.c
+//
+// r1 = 1 # rule1
+//
+// and:
+//
+// package a.d.c
+//
+// r2 = 2 # rule2
+//
+// The following calls yield the rules on the right.
+//
+// GetRulesDynamic("data.a[x].c[y]") => [rule1, rule2]
+// GetRulesDynamic("data.a[x].c.r2") => [rule2]
+// GetRulesDynamic("data.a.b[x][y]") => [rule1]
+func (c *Compiler) GetRulesDynamic(ref Ref) (rules []*Rule) {
+ node := c.RuleTree
+
+ set := map[*Rule]struct{}{}
+ var walk func(node *TreeNode, i int)
+ walk = func(node *TreeNode, i int) {
+ if i >= len(ref) {
+ // We've reached the end of the reference and want to collect everything
+ // under this "prefix".
+ node.DepthFirst(func(descendant *TreeNode) bool {
+ insertRules(set, descendant.Values)
+ return descendant.Hide
+ })
+ } else if i == 0 || IsConstant(ref[i].Value) {
+ // The head of the ref is always grounded. In case another part of the
+ // ref is also grounded, we can lookup the exact child. If it's not found
+ // we can immediately return...
+ if child := node.Child(ref[i].Value); child == nil {
+ return
+ } else if len(child.Values) > 0 {
+ // If there are any rules at this position, it's what the ref would
+ // refer to. We can just append those and stop here.
+ insertRules(set, child.Values)
+ } else {
+ // Otherwise, we continue using the child node.
+ walk(child, i+1)
+ }
+ } else {
+ // This part of the ref is a dynamic term. We can't know what it refers
+ // to and will just need to try all of the children.
+ for _, child := range node.Children {
+ if child.Hide {
+ continue
+ }
+ insertRules(set, child.Values)
+ walk(child, i+1)
+ }
+ }
+ }
+
+ walk(node, 0)
+ for rule := range set {
+ rules = append(rules, rule)
+ }
+ return rules
+}
+
+// Utility: add all rule values to the set.
+func insertRules(set map[*Rule]struct{}, rules []util.T) {
+ for _, rule := range rules {
+ set[rule.(*Rule)] = struct{}{}
+ }
+}
+
+// RuleIndex returns a RuleIndex built for the rule set referred to by path.
+// The path must refer to the rule set exactly, i.e., given a rule set at path
+// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a
+// RuleIndex built for the rule.
+func (c *Compiler) RuleIndex(path Ref) RuleIndex {
+ r, ok := c.ruleIndices.Get(path)
+ if !ok {
+ return nil
+ }
+ return r.(RuleIndex)
+}
+
+// PassesTypeCheck determines whether the given body passes type checking
+func (c *Compiler) PassesTypeCheck(body Body) bool {
+ checker := newTypeChecker()
+ env := c.TypeEnv
+ _, errs := checker.CheckBody(env, body)
+ return len(errs) == 0
+}
+
+// ModuleLoader defines the interface that callers can implement to enable lazy
+// loading of modules during compilation.
+type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error)
+
+// WithModuleLoader sets f as the ModuleLoader on the compiler.
+//
+// The compiler will invoke the ModuleLoader after resolving all references in
+// the current set of input modules. The ModuleLoader can return a new
+// collection of parsed modules that are to be included in the compilation
+// process. This process will repeat until the ModuleLoader returns an empty
+// collection or an error. If an error is returned, compilation will stop
+// immediately.
+func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler {
+ c.moduleLoader = f
+ return c
+}
+
+func (c *Compiler) counterAdd(name string, n uint64) {
+ if c.metrics == nil {
+ return
+ }
+ c.metrics.Counter(name).Add(n)
+}
+
+func (c *Compiler) buildRuleIndices() {
+
+ c.RuleTree.DepthFirst(func(node *TreeNode) bool {
+ if len(node.Values) == 0 {
+ return false
+ }
+ index := newBaseDocEqIndex(func(ref Ref) bool {
+ return isVirtual(c.RuleTree, ref.GroundPrefix())
+ })
+ if rules := extractRules(node.Values); index.Build(rules) {
+ c.ruleIndices.Put(rules[0].Path(), index)
+ }
+ return false
+ })
+
+}
+
+func (c *Compiler) buildComprehensionIndices() {
+ for _, name := range c.sorted {
+ WalkRules(c.Modules[name], func(r *Rule) bool {
+ candidates := r.Head.Args.Vars()
+ candidates.Update(ReservedVars)
+ n := buildComprehensionIndices(c.GetArity, candidates, r.Body, c.comprehensionIndices)
+ c.counterAdd(compileStageComprehensionIndexBuild, n)
+ return false
+ })
+ }
+}
+
+// checkRecursion ensures that there are no recursive definitions, i.e., there are
+// no cycles in the Graph.
+func (c *Compiler) checkRecursion() {
+ eq := func(a, b util.T) bool {
+ return a.(*Rule) == b.(*Rule)
+ }
+
+ c.RuleTree.DepthFirst(func(node *TreeNode) bool {
+ for _, rule := range node.Values {
+ for node := rule.(*Rule); node != nil; node = node.Else {
+ c.checkSelfPath(node.Loc(), eq, node, node)
+ }
+ }
+ return false
+ })
+}
+
+func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) {
+ tr := NewGraphTraversal(c.Graph)
+ if p := util.DFSPath(tr, eq, a, b); len(p) > 0 {
+ n := []string{}
+ for _, x := range p {
+ n = append(n, astNodeToString(x))
+ }
+ c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> ")))
+ }
+}
+
+func astNodeToString(x interface{}) string {
+ switch x := x.(type) {
+ case *Rule:
+ return string(x.Head.Name)
+ default:
+ panic("not reached")
+ }
+}
+
+// checkRuleConflicts ensures that rules definitions are not in conflict.
+func (c *Compiler) checkRuleConflicts() {
+ c.RuleTree.DepthFirst(func(node *TreeNode) bool {
+ if len(node.Values) == 0 {
+ return false
+ }
+
+ kinds := map[DocKind]struct{}{}
+ defaultRules := 0
+ arities := map[int]struct{}{}
+ declared := false
+
+ for _, rule := range node.Values {
+ r := rule.(*Rule)
+ kinds[r.Head.DocKind()] = struct{}{}
+ arities[len(r.Head.Args)] = struct{}{}
+ if r.Head.Assign {
+ declared = true
+ }
+ if r.Default {
+ defaultRules++
+ }
+ }
+
+ name := Var(node.Key.(String))
+
+ if declared && len(node.Values) > 1 {
+ c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule named %v redeclared at %v", name, node.Values[1].(*Rule).Loc()))
+ } else if len(kinds) > 1 || len(arities) > 1 {
+ c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules named %v found", name))
+ } else if defaultRules > 1 {
+ c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "multiple default rules named %s found", name))
+ }
+
+ return false
+ })
+
+ if c.pathExists != nil {
+ for _, err := range CheckPathConflicts(c, c.pathExists) {
+ c.err(err)
+ }
+ }
+
+ c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool {
+ for _, mod := range node.Modules {
+ for _, rule := range mod.Rules {
+ if childNode, ok := node.Children[String(rule.Head.Name)]; ok {
+ for _, childMod := range childNode.Modules {
+ msg := fmt.Sprintf("%v conflicts with rule defined at %v", childMod.Package, rule.Loc())
+ c.err(NewError(TypeErr, mod.Package.Loc(), msg))
+ }
+ }
+ }
+ }
+ return false
+ })
+}
+
+func (c *Compiler) checkUndefinedFuncs() {
+ for _, name := range c.sorted {
+ m := c.Modules[name]
+ for _, err := range checkUndefinedFuncs(m, c.GetArity) {
+ c.err(err)
+ }
+ }
+}
+
+func checkUndefinedFuncs(x interface{}, arity func(Ref) int) Errors {
+
+ var errs Errors
+
+ WalkExprs(x, func(expr *Expr) bool {
+ if !expr.IsCall() {
+ return false
+ }
+ ref := expr.Operator()
+ if arity(ref) >= 0 {
+ return false
+ }
+ errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref))
+ return true
+ })
+
+ return errs
+}
+
+// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target
+// positions of built-in expressions will be bound when evaluating the rule from left
+// to right, re-ordering as necessary.
+func (c *Compiler) checkSafetyRuleBodies() {
+ for _, name := range c.sorted {
+ m := c.Modules[name]
+ WalkRules(m, func(r *Rule) bool {
+ safe := ReservedVars.Copy()
+ safe.Update(r.Head.Args.Vars())
+ r.Body = c.checkBodySafety(safe, m, r.Body)
+ return false
+ })
+ }
+}
+
+func (c *Compiler) checkBodySafety(safe VarSet, m *Module, b Body) Body {
+ reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b)
+ if errs := safetyErrorSlice(unsafe); len(errs) > 0 {
+ for _, err := range errs {
+ c.err(err)
+ }
+ return b
+ }
+ return reordered
+}
+
+// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting
+// variables during the safety check. This has to be exported because it's relied on
+// by the copy propagation implementation in topdown.
+var SafetyCheckVisitorParams = VarVisitorParams{
+ SkipRefCallHead: true,
+ SkipClosures: true,
+}
+
+// checkSafetyRuleHeads ensures that variables appearing in the head of a
+// rule also appear in the body.
+func (c *Compiler) checkSafetyRuleHeads() {
+
+ for _, name := range c.sorted {
+ m := c.Modules[name]
+ WalkRules(m, func(r *Rule) bool {
+ safe := r.Body.Vars(SafetyCheckVisitorParams)
+ safe.Update(r.Head.Args.Vars())
+ unsafe := r.Head.Vars().Diff(safe)
+ for v := range unsafe {
+ if !v.IsGenerated() {
+ c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v))
+ }
+ }
+ return false
+ })
+ }
+}
+
+// checkTypes runs the type checker on all rules. The type checker builds a
+// TypeEnv that is stored on the compiler.
+func (c *Compiler) checkTypes() {
+ // Recursion is caught in earlier step, so this cannot fail.
+ sorted, _ := c.Graph.Sort()
+ checker := newTypeChecker().WithVarRewriter(rewriteVarsInRef(c.RewrittenVars))
+ env, errs := checker.CheckTypes(c.TypeEnv, sorted)
+ for _, err := range errs {
+ c.err(err)
+ }
+ c.TypeEnv = env
+}
+
+func (c *Compiler) checkUnsafeBuiltins() {
+ for _, name := range c.sorted {
+ errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name])
+ for _, err := range errs {
+ c.err(err)
+ }
+ }
+}
+
+func (c *Compiler) runStage(metricName string, f func()) {
+ if c.metrics != nil {
+ c.metrics.Timer(metricName).Start()
+ defer c.metrics.Timer(metricName).Stop()
+ }
+ f()
+}
+
+func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error {
+ if c.metrics != nil {
+ c.metrics.Timer(metricName).Start()
+ defer c.metrics.Timer(metricName).Stop()
+ }
+ return s(c)
+}
+
+func (c *Compiler) compile() {
+
+ defer func() {
+ if r := recover(); r != nil && r != errLimitReached {
+ panic(r)
+ }
+ }()
+
+ for _, s := range c.stages {
+ c.runStage(s.metricName, s.f)
+ if c.Failed() {
+ return
+ }
+ for _, s := range c.after[s.name] {
+ err := c.runStageAfter(s.MetricName, s.Stage)
+ if err != nil {
+ c.err(err)
+ }
+ }
+ }
+}
+
+func (c *Compiler) init() {
+
+ if c.initialized {
+ return
+ }
+
+ if c.capabilities == nil {
+ c.capabilities = CapabilitiesForThisVersion()
+ }
+
+ c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins))
+
+ for _, bi := range c.capabilities.Builtins {
+ c.builtins[bi.Name] = bi
+ }
+
+ for name, bi := range c.customBuiltins {
+ c.builtins[name] = bi
+ }
+
+ tc := newTypeChecker()
+ c.TypeEnv = tc.checkLanguageBuiltins(nil, c.builtins)
+ c.initialized = true
+}
+
+func (c *Compiler) err(err *Error) {
+ if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs {
+ c.Errors = append(c.Errors, errLimitReached)
+ panic(errLimitReached)
+ }
+ c.Errors = append(c.Errors, err)
+}
+
+func (c *Compiler) getExports() *util.HashMap {
+
+ rules := util.NewHashMap(func(a, b util.T) bool {
+ r1 := a.(Ref)
+ r2 := a.(Ref)
+ return r1.Equal(r2)
+ }, func(v util.T) int {
+ return v.(Ref).Hash()
+ })
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ rv, ok := rules.Get(mod.Package.Path)
+ if !ok {
+ rv = []Var{}
+ }
+ rvs := rv.([]Var)
+
+ for _, rule := range mod.Rules {
+ rvs = append(rvs, rule.Head.Name)
+ }
+ rules.Put(mod.Package.Path, rvs)
+ }
+
+ return rules
+}
+
+// resolveAllRefs resolves references in expressions to their fully qualified values.
+//
+// For instance, given the following module:
+//
+// package a.b
+// import data.foo.bar
+// p[x] { bar[_] = x }
+//
+// The reference "bar[_]" would be resolved to "data.foo.bar[_]".
+func (c *Compiler) resolveAllRefs() {
+
+ rules := c.getExports()
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+
+ var ruleExports []Var
+ if x, ok := rules.Get(mod.Package.Path); ok {
+ ruleExports = x.([]Var)
+ }
+
+ globals := getGlobals(mod.Package, ruleExports, mod.Imports)
+
+ WalkRules(mod, func(rule *Rule) bool {
+ err := resolveRefsInRule(globals, rule)
+ if err != nil {
+ c.err(NewError(CompileErr, rule.Location, err.Error()))
+ }
+ return false
+ })
+
+ // Once imports have been resolved, they are no longer needed.
+ mod.Imports = nil
+ }
+
+ if c.moduleLoader != nil {
+
+ parsed, err := c.moduleLoader(c.Modules)
+ if err != nil {
+ c.err(NewError(CompileErr, nil, err.Error()))
+ return
+ }
+
+ if len(parsed) == 0 {
+ return
+ }
+
+ for id, module := range parsed {
+ c.Modules[id] = module.Copy()
+ c.sorted = append(c.sorted, id)
+ }
+
+ sort.Strings(c.sorted)
+ c.resolveAllRefs()
+ }
+}
+
+func (c *Compiler) initLocalVarGen() {
+ c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules)
+}
+
+func (c *Compiler) rewriteComprehensionTerms() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ rewriteComprehensionTerms(f, mod)
+ }
+}
+
+func (c *Compiler) rewriteExprTerms() {
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkRules(mod, func(rule *Rule) bool {
+ rewriteExprTermsInHead(c.localvargen, rule)
+ rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body)
+ return false
+ })
+ }
+}
+
+// rewriteTermsInHead will rewrite rules so that the head does not contain any
+// terms that require evaluation (e.g., refs or comprehensions). If the key or
+// value contains or more of these terms, the key or value will be moved into
+// the body and assigned to a new variable. The new variable will replace the
+// key or value in the head.
+//
+// For instance, given the following rule:
+//
+// p[{"foo": data.foo[i]}] { i < 100 }
+//
+// The rule would be re-written as:
+//
+// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} }
+func (c *Compiler) rewriteRefsInHead() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkRules(mod, func(rule *Rule) bool {
+ if requiresEval(rule.Head.Key) {
+ expr := f.Generate(rule.Head.Key)
+ rule.Head.Key = expr.Operand(0)
+ rule.Body.Append(expr)
+ }
+ if requiresEval(rule.Head.Value) {
+ expr := f.Generate(rule.Head.Value)
+ rule.Head.Value = expr.Operand(0)
+ rule.Body.Append(expr)
+ }
+ for i := 0; i < len(rule.Head.Args); i++ {
+ if requiresEval(rule.Head.Args[i]) {
+ expr := f.Generate(rule.Head.Args[i])
+ rule.Head.Args[i] = expr.Operand(0)
+ rule.Body.Append(expr)
+ }
+ }
+ return false
+ })
+ }
+}
+
+func (c *Compiler) rewriteEquals() {
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ rewriteEquals(mod)
+ }
+}
+
+func (c *Compiler) rewriteDynamicTerms() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ WalkRules(mod, func(rule *Rule) bool {
+ rule.Body = rewriteDynamics(f, rule.Body)
+ return false
+ })
+ }
+}
+
+func (c *Compiler) rewriteLocalVars() {
+
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ gen := c.localvargen
+
+ WalkRules(mod, func(rule *Rule) bool {
+
+ // Rewrite assignments contained in head of rule. Assignments can
+ // occur in rule head if they're inside a comprehension. Note,
+ // assigned vars in comprehensions in the head will be rewritten
+ // first to preserve scoping rules. For example:
+ //
+ // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 }
+ //
+ // This behaviour is consistent scoping inside the body. For example:
+ //
+ // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] }
+ nestedXform := &rewriteNestedHeadVarLocalTransform{
+ gen: gen,
+ RewrittenVars: c.RewrittenVars,
+ }
+
+ NewGenericVisitor(nestedXform.Visit).Walk(rule.Head)
+
+ for _, err := range nestedXform.errs {
+ c.err(err)
+ }
+
+ // Rewrite assignments in body.
+ used := NewVarSet()
+
+ if rule.Head.Key != nil {
+ used.Update(rule.Head.Key.Vars())
+ }
+
+ if rule.Head.Value != nil {
+ used.Update(rule.Head.Value.Vars())
+ }
+
+ stack := newLocalDeclaredVars()
+
+ c.rewriteLocalArgVars(gen, stack, rule)
+
+ body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body)
+ for _, err := range errs {
+ c.err(err)
+ }
+
+ // For rewritten vars use the collection of all variables that
+ // were in the stack at some point in time.
+ for k, v := range stack.rewritten {
+ c.RewrittenVars[k] = v
+ }
+
+ rule.Body = body
+
+ // Rewrite vars in head that refer to locally declared vars in the body.
+ localXform := rewriteHeadVarLocalTransform{declared: declared}
+
+ for i := range rule.Head.Args {
+ rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i])
+ }
+
+ if rule.Head.Key != nil {
+ rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key)
+ }
+
+ if rule.Head.Value != nil {
+ rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value)
+ }
+
+ return false
+ })
+ }
+}
+
+type rewriteNestedHeadVarLocalTransform struct {
+ gen *localVarGenerator
+ errs Errors
+ RewrittenVars map[Var]Var
+}
+
+func (xform *rewriteNestedHeadVarLocalTransform) Visit(x interface{}) bool {
+
+ if term, ok := x.(*Term); ok {
+
+ stop := false
+ stack := newLocalDeclaredVars()
+
+ switch x := term.Value.(type) {
+ case *object:
+ cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) {
+ kcpy := k.Copy()
+ NewGenericVisitor(xform.Visit).Walk(kcpy)
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return kcpy, vcpy, nil
+ })
+ term.Value = cpy
+ stop = true
+ case *set:
+ cpy, _ := x.Map(func(v *Term) (*Term, error) {
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return vcpy, nil
+ })
+ term.Value = cpy
+ stop = true
+ case *ArrayComprehension:
+ xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs)
+ stop = true
+ case *SetComprehension:
+ xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs)
+ stop = true
+ case *ObjectComprehension:
+ xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs)
+ stop = true
+ }
+
+ for k, v := range stack.rewritten {
+ xform.RewrittenVars[k] = v
+ }
+
+ return stop
+ }
+
+ return false
+}
+
+type rewriteHeadVarLocalTransform struct {
+ declared map[Var]Var
+}
+
+func (xform rewriteHeadVarLocalTransform) Transform(x interface{}) (interface{}, error) {
+ if v, ok := x.(Var); ok {
+ if gv, ok := xform.declared[v]; ok {
+ return gv, nil
+ }
+ }
+ return x, nil
+}
+
+func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) {
+
+ vis := &ruleArgLocalRewriter{
+ stack: stack,
+ gen: gen,
+ }
+
+ for i := range rule.Head.Args {
+ Walk(vis, rule.Head.Args[i])
+ }
+
+ for i := range vis.errs {
+ c.err(vis.errs[i])
+ }
+}
+
+type ruleArgLocalRewriter struct {
+ stack *localDeclaredVars
+ gen *localVarGenerator
+ errs []*Error
+}
+
+func (vis *ruleArgLocalRewriter) Visit(x interface{}) Visitor {
+
+ t, ok := x.(*Term)
+ if !ok {
+ return vis
+ }
+
+ switch v := t.Value.(type) {
+ case Var:
+ gv, ok := vis.stack.Declared(v)
+ if !ok {
+ gv = vis.gen.Generate()
+ vis.stack.Insert(v, gv, argVar)
+ }
+ t.Value = gv
+ return nil
+ case *object:
+ if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) {
+ vcpy := v.Copy()
+ Walk(vis, vcpy)
+ return k, vcpy, nil
+ }); err != nil {
+ vis.errs = append(vis.errs, NewError(CompileErr, t.Location, err.Error()))
+ } else {
+ t.Value = cpy
+ }
+ return nil
+ case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set:
+ // Scalars are no-ops. Comprehensions are handled above. Sets must not
+ // contain variables.
+ return nil
+ case Call:
+ vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls"))
+ return nil
+ default:
+ // Recurse on refs and arrays. Any embedded
+ // variables can be rewritten.
+ return vis
+ }
+}
+
+func (c *Compiler) rewriteWithModifiers() {
+ f := newEqualityFactory(c.localvargen)
+ for _, name := range c.sorted {
+ mod := c.Modules[name]
+ t := NewGenericTransformer(func(x interface{}) (interface{}, error) {
+ body, ok := x.(Body)
+ if !ok {
+ return x, nil
+ }
+ body, err := rewriteWithModifiersInBody(c, f, body)
+ if err != nil {
+ c.err(err)
+ }
+
+ return body, nil
+ })
+ Transform(t, mod)
+ }
+}
+
+func (c *Compiler) setModuleTree() {
+ c.ModuleTree = NewModuleTree(c.Modules)
+}
+
+func (c *Compiler) setRuleTree() {
+ c.RuleTree = NewRuleTree(c.ModuleTree)
+}
+
+func (c *Compiler) setGraph() {
+ c.Graph = NewGraph(c.Modules, c.GetRulesDynamic)
+}
+
+type queryCompiler struct {
+ compiler *Compiler
+ qctx *QueryContext
+ typeEnv *TypeEnv
+ rewritten map[Var]Var
+ after map[string][]QueryCompilerStageDefinition
+ unsafeBuiltins map[string]struct{}
+ comprehensionIndices map[*Term]*ComprehensionIndex
+}
+
+func newQueryCompiler(compiler *Compiler) QueryCompiler {
+ qc := &queryCompiler{
+ compiler: compiler,
+ qctx: nil,
+ after: map[string][]QueryCompilerStageDefinition{},
+ comprehensionIndices: map[*Term]*ComprehensionIndex{},
+ }
+ return qc
+}
+
+func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler {
+ qc.qctx = qctx
+ return qc
+}
+
+func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler {
+ qc.after[after] = append(qc.after[after], stage)
+ return qc
+}
+
+func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler {
+ qc.unsafeBuiltins = unsafe
+ return qc
+}
+
+func (qc *queryCompiler) RewrittenVars() map[Var]Var {
+ return qc.rewritten
+}
+
+func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex {
+ if result, ok := qc.comprehensionIndices[term]; ok {
+ return result
+ } else if result, ok := qc.compiler.comprehensionIndices[term]; ok {
+ return result
+ }
+ return nil
+}
+
+func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) {
+ if qc.compiler.metrics != nil {
+ qc.compiler.metrics.Timer(metricName).Start()
+ defer qc.compiler.metrics.Timer(metricName).Stop()
+ }
+ return s(qctx, query)
+}
+
+func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) {
+ if qc.compiler.metrics != nil {
+ qc.compiler.metrics.Timer(metricName).Start()
+ defer qc.compiler.metrics.Timer(metricName).Stop()
+ }
+ return s(qc, query)
+}
+
+func (qc *queryCompiler) Compile(query Body) (Body, error) {
+
+ query = query.Copy()
+
+ stages := []struct {
+ name string
+ metricName string
+ f func(*QueryContext, Body) (Body, error)
+ }{
+ {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs},
+ {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars},
+ {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms},
+ {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms},
+ {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers},
+ {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs},
+ {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety},
+ {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms},
+ {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes},
+ {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins},
+ {"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices},
+ }
+
+ qctx := qc.qctx.Copy()
+
+ for _, s := range stages {
+ var err error
+ query, err = qc.runStage(s.metricName, qctx, query, s.f)
+ if err != nil {
+ return nil, qc.applyErrorLimit(err)
+ }
+ for _, s := range qc.after[s.name] {
+ query, err = qc.runStageAfter(s.MetricName, query, s.Stage)
+ if err != nil {
+ return nil, qc.applyErrorLimit(err)
+ }
+ }
+ }
+
+ return query, nil
+}
+
+func (qc *queryCompiler) TypeEnv() *TypeEnv {
+ return qc.typeEnv
+}
+
+func (qc *queryCompiler) applyErrorLimit(err error) error {
+ if errs, ok := err.(Errors); ok {
+ if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs {
+ err = append(errs[:qc.compiler.maxErrs], errLimitReached)
+ }
+ }
+ return err
+}
+
+func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) {
+
+ var globals map[Var]Ref
+
+ if qctx != nil && qctx.Package != nil {
+ var ruleExports []Var
+ rules := qc.compiler.getExports()
+ if exist, ok := rules.Get(qctx.Package.Path); ok {
+ ruleExports = exist.([]Var)
+ }
+
+ globals = getGlobals(qctx.Package, ruleExports, qc.qctx.Imports)
+ qctx.Imports = nil
+ }
+
+ ignore := &declaredVarStack{declaredVars(body)}
+
+ return resolveRefsInBody(globals, ignore, body), nil
+}
+
+func (qc *queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ f := newEqualityFactory(gen)
+ node, err := rewriteComprehensionTerms(f, body)
+ if err != nil {
+ return nil, err
+ }
+ return node.(Body), nil
+}
+
+func (qc *queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ f := newEqualityFactory(gen)
+ return rewriteDynamics(f, body), nil
+}
+
+func (qc *queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ return rewriteExprTermsInBody(gen, body), nil
+}
+
+func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) {
+ gen := newLocalVarGenerator("q", body)
+ stack := newLocalDeclaredVars()
+ body, _, err := rewriteLocalVars(gen, stack, nil, body)
+ if len(err) != 0 {
+ return nil, err
+ }
+ qc.rewritten = make(map[Var]Var, len(stack.rewritten))
+ for k, v := range stack.rewritten {
+ // The vars returned during the rewrite will include all seen vars,
+ // even if they're not declared with an assignment operation. We don't
+ // want to include these inside the rewritten set though.
+ qc.rewritten[k] = v
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) {
+ if errs := checkUndefinedFuncs(body, qc.compiler.GetArity); len(errs) > 0 {
+ return nil, errs
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) {
+ safe := ReservedVars.Copy()
+ reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body)
+ if errs := safetyErrorSlice(unsafe); len(errs) > 0 {
+ return nil, errs
+ }
+ return reordered, nil
+}
+
+func (qc *queryCompiler) checkTypes(qctx *QueryContext, body Body) (Body, error) {
+ var errs Errors
+ checker := newTypeChecker().WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars))
+ qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) checkUnsafeBuiltins(qctx *QueryContext, body Body) (Body, error) {
+ var unsafe map[string]struct{}
+ if qc.unsafeBuiltins != nil {
+ unsafe = qc.unsafeBuiltins
+ } else {
+ unsafe = qc.compiler.unsafeBuiltinsMap
+ }
+ errs := checkUnsafeBuiltins(unsafe, body)
+ if len(errs) > 0 {
+ return nil, errs
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) rewriteWithModifiers(qctx *QueryContext, body Body) (Body, error) {
+ f := newEqualityFactory(newLocalVarGenerator("q", body))
+ body, err := rewriteWithModifiersInBody(qc.compiler, f, body)
+ if err != nil {
+ return nil, Errors{err}
+ }
+ return body, nil
+}
+
+func (qc *queryCompiler) buildComprehensionIndices(qctx *QueryContext, body Body) (Body, error) {
+ // NOTE(tsandall): The query compiler does not have a metrics object so we
+ // cannot record index metrics currently.
+ _ = buildComprehensionIndices(qc.compiler.GetArity, ReservedVars, body, qc.comprehensionIndices)
+ return body, nil
+}
+
+// ComprehensionIndex specifies how the comprehension term can be indexed. The keys
+// tell the evaluator what variables to use for indexing. In the future, the index
+// could be expanded with more information that would allow the evaluator to index
+// a larger fragment of comprehensions (e.g., by closing over variables in the outer
+// query.)
+type ComprehensionIndex struct {
+ Term *Term
+ Keys []*Term
+}
+
+func (ci *ComprehensionIndex) String() string {
+ if ci == nil {
+ return ""
+ }
+ return fmt.Sprintf("", NewArray(ci.Keys...))
+}
+
+func buildComprehensionIndices(arity func(Ref) int, candidates VarSet, node interface{}, result map[*Term]*ComprehensionIndex) (n uint64) {
+ WalkBodies(node, func(b Body) bool {
+ cpy := candidates.Copy()
+ for _, expr := range b {
+ if index := getComprehensionIndex(arity, cpy, expr); index != nil {
+ result[index.Term] = index
+ n++
+ }
+ // Any variables appearing in the expressions leading up to the comprehension
+ // are fair-game to be used as index keys.
+ cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true}))
+ }
+ return false
+ })
+ return n
+}
+
+func getComprehensionIndex(arity func(Ref) int, candidates VarSet, expr *Expr) *ComprehensionIndex {
+
+ // Ignore everything except = expressions. Extract
+ // the comprehension term from the expression.
+ if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 {
+ return nil
+ }
+
+ var term *Term
+
+ lhs, rhs := expr.Operand(0), expr.Operand(1)
+
+ if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) {
+ term = rhs
+ } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) {
+ term = lhs
+ }
+
+ if term == nil {
+ return nil
+ }
+
+ // Ignore comprehensions that contain expressions that close over variables
+ // in the outer body if those variables are not also output variables in the
+ // comprehension body. In other words, ignore comprehensions that we cannot
+ // safely evaluate without bindings from the outer body. For example:
+ //
+ // x = [1]
+ // [true | data.y[z] = x] # safe to evaluate w/o outer body
+ // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe.
+ //
+ // By identifying output variables in the body we also know what to index on by
+ // intersecting with candidate variables from the outer query.
+ //
+ // For example:
+ //
+ // x = data.foo[_]
+ // _ = [y | data.bar[y] = x] # index on 'x'
+ //
+ // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar).
+ var body Body
+
+ switch x := term.Value.(type) {
+ case *ArrayComprehension:
+ body = x.Body
+ case *SetComprehension:
+ body = x.Body
+ case *ObjectComprehension:
+ body = x.Body
+ }
+
+ outputs := outputVarsForBody(body, arity, ReservedVars)
+ unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars)
+
+ if len(unsafe) > 0 {
+ return nil
+ }
+
+ // Similarly, ignore comprehensions that contain references with output variables
+ // that intersect with the candidates. Indexing these comprehensions could worsen
+ // performance.
+ regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates)
+ regressionVis.Walk(body)
+ if regressionVis.worse {
+ return nil
+ }
+
+ // Check if any nested comprehensions close over candidates. If any intersection is found
+ // the comprehension cannot be cached because it would require closing over the candidates
+ // which the evaluator does not support today.
+ nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates)
+ nestedVis.Walk(body)
+ if nestedVis.found {
+ return nil
+ }
+
+ // Make a sorted set of variable names that will serve as the index key set.
+ // Sort to ensure deterministic indexing. In future this could be relaxed
+ // if we can decide that one ordering is better than another. If the set is
+ // empty, there is no indexing to do.
+ indexVars := candidates.Intersect(outputs)
+ if len(indexVars) == 0 {
+ return nil
+ }
+
+ result := make([]*Term, 0, len(indexVars))
+
+ for v := range indexVars {
+ result = append(result, NewTerm(v))
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return result[i].Value.Compare(result[j].Value) < 0
+ })
+
+ return &ComprehensionIndex{Term: term, Keys: result}
+}
+
+type comprehensionIndexRegressionCheckVisitor struct {
+ candidates VarSet
+ seen VarSet
+ worse bool
+}
+
+// TOOD(tsandall): Improve this so that users can either supply this list explicitly
+// or the information is maintained on the built-in function declaration. What we really
+// need to know is whether the built-in function allows callers to push down output
+// values or not. It's unlikely that anything outside of OPA does this today so this
+// solution is fine for now.
+var comprehensionIndexBlacklist = map[string]int{
+ WalkBuiltin.Name: len(WalkBuiltin.Decl.Args()),
+}
+
+func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor {
+ return &comprehensionIndexRegressionCheckVisitor{
+ candidates: candidates,
+ seen: NewVarSet(),
+ }
+}
+
+func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x interface{}) {
+ NewGenericVisitor(vis.visit).Walk(x)
+}
+
+func (vis *comprehensionIndexRegressionCheckVisitor) visit(x interface{}) bool {
+ if !vis.worse {
+ switch x := x.(type) {
+ case *Expr:
+ operands := x.Operands()
+ if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) {
+ vis.assertEmptyIntersection(operands[pos].Vars())
+ }
+ case Ref:
+ vis.assertEmptyIntersection(x.OutputVars())
+ case Var:
+ vis.seen.Add(x)
+ // Always skip comprehensions. We do not have to visit their bodies here.
+ case *ArrayComprehension, *SetComprehension, *ObjectComprehension:
+ return true
+ }
+ }
+ return vis.worse
+}
+
+func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) {
+ for v := range vs {
+ if vis.candidates.Contains(v) && !vis.seen.Contains(v) {
+ vis.worse = true
+ return
+ }
+ }
+}
+
+type comprehensionIndexNestedCandidateVisitor struct {
+ candidates VarSet
+ nested bool
+ found bool
+}
+
+func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor {
+ return &comprehensionIndexNestedCandidateVisitor{
+ candidates: candidates,
+ }
+}
+
+func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x interface{}) {
+ NewGenericVisitor(vis.visit).Walk(x)
+}
+
+func (vis *comprehensionIndexNestedCandidateVisitor) visit(x interface{}) bool {
+
+ if vis.found {
+ return true
+ }
+
+ if v, ok := x.(Value); ok && IsComprehension(v) {
+ varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true})
+ varVis.Walk(v)
+ vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0
+ return true
+ }
+
+ return false
+}
+
+// ModuleTreeNode represents a node in the module tree. The module
+// tree is keyed by the package path.
+type ModuleTreeNode struct {
+ Key Value
+ Modules []*Module
+ Children map[Value]*ModuleTreeNode
+ Hide bool
+}
+
+// NewModuleTree returns a new ModuleTreeNode that represents the root
+// of the module tree populated with the given modules.
+func NewModuleTree(mods map[string]*Module) *ModuleTreeNode {
+ root := &ModuleTreeNode{
+ Children: map[Value]*ModuleTreeNode{},
+ }
+ for _, m := range mods {
+ node := root
+ for i, x := range m.Package.Path {
+ c, ok := node.Children[x.Value]
+ if !ok {
+ var hide bool
+ if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 {
+ hide = true
+ }
+ c = &ModuleTreeNode{
+ Key: x.Value,
+ Children: map[Value]*ModuleTreeNode{},
+ Hide: hide,
+ }
+ node.Children[x.Value] = c
+ }
+ node = c
+ }
+ node.Modules = append(node.Modules, m)
+ }
+ return root
+}
+
+// Size returns the number of modules in the tree.
+func (n *ModuleTreeNode) Size() int {
+ s := len(n.Modules)
+ for _, c := range n.Children {
+ s += c.Size()
+ }
+ return s
+}
+
+// DepthFirst performs a depth-first traversal of the module tree rooted at n.
+// If f returns true, traversal will not continue to the children of n.
+func (n *ModuleTreeNode) DepthFirst(f func(node *ModuleTreeNode) bool) {
+ if !f(n) {
+ for _, node := range n.Children {
+ node.DepthFirst(f)
+ }
+ }
+}
+
+// TreeNode represents a node in the rule tree. The rule tree is keyed by
+// rule path.
+type TreeNode struct {
+ Key Value
+ Values []util.T
+ Children map[Value]*TreeNode
+ Sorted []Value
+ Hide bool
+}
+
+// NewRuleTree returns a new TreeNode that represents the root
+// of the rule tree populated with the given rules.
+func NewRuleTree(mtree *ModuleTreeNode) *TreeNode {
+
+ ruleSets := map[String][]util.T{}
+
+ // Build rule sets for this package.
+ for _, mod := range mtree.Modules {
+ for _, rule := range mod.Rules {
+ key := String(rule.Head.Name)
+ ruleSets[key] = append(ruleSets[key], rule)
+ }
+ }
+
+ // Each rule set becomes a leaf node.
+ children := map[Value]*TreeNode{}
+ sorted := make([]Value, 0, len(ruleSets))
+
+ for key, rules := range ruleSets {
+ sorted = append(sorted, key)
+ children[key] = &TreeNode{
+ Key: key,
+ Children: nil,
+ Values: rules,
+ }
+ }
+
+ // Each module in subpackage becomes child node.
+ for key, child := range mtree.Children {
+ sorted = append(sorted, key)
+ children[child.Key] = NewRuleTree(child)
+ }
+
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Compare(sorted[j]) < 0
+ })
+
+ return &TreeNode{
+ Key: mtree.Key,
+ Values: nil,
+ Children: children,
+ Sorted: sorted,
+ Hide: mtree.Hide,
+ }
+}
+
+// Size returns the number of rules in the tree.
+func (n *TreeNode) Size() int {
+ s := len(n.Values)
+ for _, c := range n.Children {
+ s += c.Size()
+ }
+ return s
+}
+
+// Child returns n's child with key k.
+func (n *TreeNode) Child(k Value) *TreeNode {
+ switch k.(type) {
+ case String, Var:
+ return n.Children[k]
+ }
+ return nil
+}
+
+// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If
+// f returns true, traversal will not continue to the children of n.
+func (n *TreeNode) DepthFirst(f func(node *TreeNode) bool) {
+ if !f(n) {
+ for _, node := range n.Children {
+ node.DepthFirst(f)
+ }
+ }
+}
+
+// Graph represents the graph of dependencies between rules.
+type Graph struct {
+ adj map[util.T]map[util.T]struct{}
+ radj map[util.T]map[util.T]struct{}
+ nodes map[util.T]struct{}
+ sorted []util.T
+}
+
+// NewGraph returns a new Graph based on modules. The list function must return
+// the rules referred to directly by the ref.
+func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph {
+
+ graph := &Graph{
+ adj: map[util.T]map[util.T]struct{}{},
+ radj: map[util.T]map[util.T]struct{}{},
+ nodes: map[util.T]struct{}{},
+ sorted: nil,
+ }
+
+ // Create visitor to walk a rule AST and add edges to the rule graph for
+ // each dependency.
+ vis := func(a *Rule) *GenericVisitor {
+ stop := false
+ return NewGenericVisitor(func(x interface{}) bool {
+ switch x := x.(type) {
+ case Ref:
+ for _, b := range list(x) {
+ for node := b; node != nil; node = node.Else {
+ graph.addDependency(a, node)
+ }
+ }
+ case *Rule:
+ if stop {
+ // Do not recurse into else clauses (which will be handled
+ // by the outer visitor.)
+ return true
+ }
+ stop = true
+ }
+ return false
+ })
+ }
+
+ // Walk over all rules, add them to graph, and build adjencency lists.
+ for _, module := range modules {
+ WalkRules(module, func(a *Rule) bool {
+ graph.addNode(a)
+ vis(a).Walk(a)
+ return false
+ })
+ }
+
+ return graph
+}
+
+// Dependencies returns the set of rules that x depends on.
+func (g *Graph) Dependencies(x util.T) map[util.T]struct{} {
+ return g.adj[x]
+}
+
+// Dependents returns the set of rules that depend on x.
+func (g *Graph) Dependents(x util.T) map[util.T]struct{} {
+ return g.radj[x]
+}
+
+// Sort returns a slice of rules sorted by dependencies. If a cycle is found,
+// ok is set to false.
+func (g *Graph) Sort() (sorted []util.T, ok bool) {
+ if g.sorted != nil {
+ return g.sorted, true
+ }
+
+ sort := &graphSort{
+ sorted: make([]util.T, 0, len(g.nodes)),
+ deps: g.Dependencies,
+ marked: map[util.T]struct{}{},
+ temp: map[util.T]struct{}{},
+ }
+
+ for node := range g.nodes {
+ if !sort.Visit(node) {
+ return nil, false
+ }
+ }
+
+ g.sorted = sort.sorted
+ return g.sorted, true
+}
+
+func (g *Graph) addDependency(u util.T, v util.T) {
+
+ if _, ok := g.nodes[u]; !ok {
+ g.addNode(u)
+ }
+
+ if _, ok := g.nodes[v]; !ok {
+ g.addNode(v)
+ }
+
+ edges, ok := g.adj[u]
+ if !ok {
+ edges = map[util.T]struct{}{}
+ g.adj[u] = edges
+ }
+
+ edges[v] = struct{}{}
+
+ edges, ok = g.radj[v]
+ if !ok {
+ edges = map[util.T]struct{}{}
+ g.radj[v] = edges
+ }
+
+ edges[u] = struct{}{}
+}
+
+func (g *Graph) addNode(n util.T) {
+ g.nodes[n] = struct{}{}
+}
+
+type graphSort struct {
+ sorted []util.T
+ deps func(util.T) map[util.T]struct{}
+ marked map[util.T]struct{}
+ temp map[util.T]struct{}
+}
+
+func (sort *graphSort) Marked(node util.T) bool {
+ _, marked := sort.marked[node]
+ return marked
+}
+
+func (sort *graphSort) Visit(node util.T) (ok bool) {
+ if _, ok := sort.temp[node]; ok {
+ return false
+ }
+ if sort.Marked(node) {
+ return true
+ }
+ sort.temp[node] = struct{}{}
+ for other := range sort.deps(node) {
+ if !sort.Visit(other) {
+ return false
+ }
+ }
+ sort.marked[node] = struct{}{}
+ delete(sort.temp, node)
+ sort.sorted = append(sort.sorted, node)
+ return true
+}
+
+// GraphTraversal is a Traversal that understands the dependency graph
+type GraphTraversal struct {
+ graph *Graph
+ visited map[util.T]struct{}
+}
+
+// NewGraphTraversal returns a Traversal for the dependency graph
+func NewGraphTraversal(graph *Graph) *GraphTraversal {
+ return &GraphTraversal{
+ graph: graph,
+ visited: map[util.T]struct{}{},
+ }
+}
+
+// Edges lists all dependency connections for a given node
+func (g *GraphTraversal) Edges(x util.T) []util.T {
+ r := []util.T{}
+ for v := range g.graph.Dependencies(x) {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Visited returns whether a node has been visited, setting a node to visited if not
+func (g *GraphTraversal) Visited(u util.T) bool {
+ _, ok := g.visited[u]
+ g.visited[u] = struct{}{}
+ return ok
+}
+
+type unsafePair struct {
+ Expr *Expr
+ Vars VarSet
+}
+
+type unsafeVarLoc struct {
+ Var Var
+ Loc *Location
+}
+
+type unsafeVars map[*Expr]VarSet
+
+func (vs unsafeVars) Add(e *Expr, v Var) {
+ if u, ok := vs[e]; ok {
+ u[v] = struct{}{}
+ } else {
+ vs[e] = VarSet{v: struct{}{}}
+ }
+}
+
+func (vs unsafeVars) Set(e *Expr, s VarSet) {
+ vs[e] = s
+}
+
+func (vs unsafeVars) Update(o unsafeVars) {
+ for k, v := range o {
+ if _, ok := vs[k]; !ok {
+ vs[k] = VarSet{}
+ }
+ vs[k].Update(v)
+ }
+}
+
+func (vs unsafeVars) Vars() (result []unsafeVarLoc) {
+
+ locs := map[Var]*Location{}
+
+ // If var appears in multiple sets then pick first by location.
+ for expr, vars := range vs {
+ for v := range vars {
+ if locs[v].Compare(expr.Location) > 0 {
+ locs[v] = expr.Location
+ }
+ }
+ }
+
+ for v, loc := range locs {
+ result = append(result, unsafeVarLoc{
+ Var: v,
+ Loc: loc,
+ })
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return result[i].Loc.Compare(result[j].Loc) < 0
+ })
+
+ return result
+}
+
+func (vs unsafeVars) Slice() (result []unsafePair) {
+ for expr, vs := range vs {
+ result = append(result, unsafePair{
+ Expr: expr,
+ Vars: vs,
+ })
+ }
+ return
+}
+
+// reorderBodyForSafety returns a copy of the body ordered such that
+// left to right evaluation of the body will not encounter unbound variables
+// in input positions or negated expressions.
+//
+// Expressions are added to the re-ordered body as soon as they are considered
+// safe. If multiple expressions become safe in the same pass, they are added
+// in their original order. This results in minimal re-ordering of the body.
+//
+// If the body cannot be reordered to ensure safety, the second return value
+// contains a mapping of expressions to unsafe variables in those expressions.
+func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) {
+
+ body, unsafe := reorderBodyForClosures(arity, globals, body)
+ if len(unsafe) != 0 {
+ return nil, unsafe
+ }
+
+ reordered := Body{}
+ safe := VarSet{}
+
+ for _, e := range body {
+ for v := range e.Vars(SafetyCheckVisitorParams) {
+ if globals.Contains(v) {
+ safe.Add(v)
+ } else {
+ unsafe.Add(e, v)
+ }
+ }
+ }
+
+ for {
+ n := len(reordered)
+
+ for _, e := range body {
+ if reordered.Contains(e) {
+ continue
+ }
+
+ safe.Update(outputVarsForExpr(e, arity, safe))
+
+ for v := range unsafe[e] {
+ if safe.Contains(v) {
+ delete(unsafe[e], v)
+ }
+ }
+
+ if len(unsafe[e]) == 0 {
+ delete(unsafe, e)
+ reordered.Append(e)
+ }
+ }
+
+ if len(reordered) == n {
+ break
+ }
+ }
+
+ // Recursively visit closures and perform the safety checks on them.
+ // Update the globals at each expression to include the variables that could
+ // be closed over.
+ g := globals.Copy()
+ for i, e := range reordered {
+ if i > 0 {
+ g.Update(reordered[i-1].Vars(SafetyCheckVisitorParams))
+ }
+ xform := &bodySafetyTransformer{
+ builtins: builtins,
+ arity: arity,
+ current: e,
+ globals: g,
+ unsafe: unsafe,
+ }
+ NewGenericVisitor(xform.Visit).Walk(e)
+ }
+
+ return reordered, unsafe
+}
+
+type bodySafetyTransformer struct {
+ builtins map[string]*Builtin
+ arity func(Ref) int
+ current *Expr
+ globals VarSet
+ unsafe unsafeVars
+}
+
+func (xform *bodySafetyTransformer) Visit(x interface{}) bool {
+ if term, ok := x.(*Term); ok {
+ switch x := term.Value.(type) {
+ case *object:
+ cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) {
+ kcpy := k.Copy()
+ NewGenericVisitor(xform.Visit).Walk(kcpy)
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return kcpy, vcpy, nil
+ })
+ term.Value = cpy
+ return true
+ case *set:
+ cpy, _ := x.Map(func(v *Term) (*Term, error) {
+ vcpy := v.Copy()
+ NewGenericVisitor(xform.Visit).Walk(vcpy)
+ return vcpy, nil
+ })
+ term.Value = cpy
+ return true
+ case *ArrayComprehension:
+ xform.reorderArrayComprehensionSafety(x)
+ return true
+ case *ObjectComprehension:
+ xform.reorderObjectComprehensionSafety(x)
+ return true
+ case *SetComprehension:
+ xform.reorderSetComprehensionSafety(x)
+ return true
+ }
+ }
+ return false
+}
+
+func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body {
+ bv := body.Vars(SafetyCheckVisitorParams)
+ bv.Update(xform.globals)
+ uv := tv.Diff(bv)
+ for v := range uv {
+ xform.unsafe.Add(xform.current, v)
+ }
+
+ r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body)
+ if len(u) == 0 {
+ return r
+ }
+
+ xform.unsafe.Update(u)
+ return body
+}
+
+func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) {
+ ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body)
+}
+
+func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) {
+ tv := oc.Key.Vars()
+ tv.Update(oc.Value.Vars())
+ oc.Body = xform.reorderComprehensionSafety(tv, oc.Body)
+}
+
+func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) {
+ sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body)
+}
+
+// reorderBodyForClosures returns a copy of the body ordered such that
+// expressions (such as array comprehensions) that close over variables are ordered
+// after other expressions that contain the same variable in an output position.
+func reorderBodyForClosures(arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) {
+
+ reordered := Body{}
+ unsafe := unsafeVars{}
+
+ for {
+ n := len(reordered)
+
+ for _, e := range body {
+ if reordered.Contains(e) {
+ continue
+ }
+
+ // Collect vars that are contained in closures within this
+ // expression.
+ vs := VarSet{}
+ WalkClosures(e, func(x interface{}) bool {
+ vis := &VarVisitor{vars: vs}
+ vis.Walk(x)
+ return true
+ })
+
+ // Compute vars that are closed over from the body but not yet
+ // contained in the output position of an expression in the reordered
+ // body. These vars are considered unsafe.
+ cv := vs.Intersect(body.Vars(SafetyCheckVisitorParams)).Diff(globals)
+ uv := cv.Diff(outputVarsForBody(reordered, arity, globals))
+
+ if len(uv) == 0 {
+ reordered = append(reordered, e)
+ delete(unsafe, e)
+ } else {
+ unsafe.Set(e, uv)
+ }
+ }
+
+ if len(reordered) == n {
+ break
+ }
+ }
+
+ return reordered, unsafe
+}
+
+// OutputVarsFromBody returns all variables which are the "output" for
+// the given body. For safety checks this means that they would be
+// made safe by the body.
+func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet {
+ return outputVarsForBody(body, c.GetArity, safe)
+}
+
+func outputVarsForBody(body Body, getArity func(Ref) int, safe VarSet) VarSet {
+ o := safe.Copy()
+ for _, e := range body {
+ o.Update(outputVarsForExpr(e, getArity, o))
+ }
+ return o.Diff(safe)
+}
+
+// OutputVarsFromExpr returns all variables which are the "output" for
+// the given expression. For safety checks this means that they would be
+// made safe by the expr.
+func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet {
+ return outputVarsForExpr(expr, c.GetArity, safe)
+}
+
+func outputVarsForExpr(expr *Expr, getArity func(Ref) int, safe VarSet) VarSet {
+
+ // Negated expressions must be safe.
+ if expr.Negated {
+ return VarSet{}
+ }
+
+ // With modifier inputs must be safe.
+ for _, with := range expr.With {
+ unsafe := false
+ WalkVars(with, func(v Var) bool {
+ if !safe.Contains(v) {
+ unsafe = true
+ return true
+ }
+ return false
+ })
+ if unsafe {
+ return VarSet{}
+ }
+ }
+
+ switch terms := expr.Terms.(type) {
+ case *Term:
+ return outputVarsForTerms(expr, safe)
+ case []*Term:
+ if expr.IsEquality() {
+ return outputVarsForExprEq(expr, safe)
+ }
+
+ operator, ok := terms[0].Value.(Ref)
+ if !ok {
+ return VarSet{}
+ }
+
+ arity := getArity(operator)
+ if arity < 0 {
+ return VarSet{}
+ }
+
+ return outputVarsForExprCall(expr, arity, safe, terms)
+ default:
+ panic("illegal expression")
+ }
+}
+
+func outputVarsForExprEq(expr *Expr, safe VarSet) VarSet {
+
+ if !validEqAssignArgCount(expr) {
+ return safe
+ }
+
+ output := outputVarsForTerms(expr, safe)
+ output.Update(safe)
+ output.Update(Unify(output, expr.Operand(0), expr.Operand(1)))
+
+ return output.Diff(safe)
+}
+
+func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term) VarSet {
+
+ output := outputVarsForTerms(expr, safe)
+
+ numInputTerms := arity + 1
+ if numInputTerms >= len(terms) {
+ return output
+ }
+
+ vis := NewVarVisitor().WithParams(VarVisitorParams{
+ SkipClosures: true,
+ SkipSets: true,
+ SkipObjectKeys: true,
+ SkipRefHead: true,
+ })
+
+ vis.Walk(Args(terms[:numInputTerms]))
+ unsafe := vis.Vars().Diff(output).Diff(safe)
+
+ if len(unsafe) > 0 {
+ return VarSet{}
+ }
+
+ vis = NewVarVisitor().WithParams(VarVisitorParams{
+ SkipRefHead: true,
+ SkipSets: true,
+ SkipObjectKeys: true,
+ SkipClosures: true,
+ })
+
+ vis.Walk(Args(terms[numInputTerms:]))
+ output.Update(vis.vars)
+ return output
+}
+
+func outputVarsForTerms(expr *Expr, safe VarSet) VarSet {
+ output := VarSet{}
+ WalkTerms(expr, func(x *Term) bool {
+ switch r := x.Value.(type) {
+ case *SetComprehension, *ArrayComprehension, *ObjectComprehension:
+ return true
+ case Ref:
+ if v, ok := r[0].Value.(Var); ok {
+ if !safe.Contains(v) {
+ return true
+ }
+ } else {
+ for k := range r[0].Vars() {
+ if !safe.Contains(k) {
+ return true
+ }
+ }
+ }
+ output.Update(r.OutputVars())
+ return false
+ }
+ return false
+ })
+ return output
+}
+
+type equalityFactory struct {
+ gen *localVarGenerator
+}
+
+func newEqualityFactory(gen *localVarGenerator) *equalityFactory {
+ return &equalityFactory{gen}
+}
+
+func (f *equalityFactory) Generate(other *Term) *Expr {
+ term := NewTerm(f.gen.Generate()).SetLocation(other.Location)
+ expr := Equality.Expr(term, other)
+ expr.Generated = true
+ expr.Location = other.Location
+ return expr
+}
+
+type localVarGenerator struct {
+ exclude VarSet
+ suffix string
+ next int
+}
+
+func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator {
+ exclude := NewVarSet()
+ vis := &VarVisitor{vars: exclude}
+ for _, key := range sorted {
+ vis.Walk(modules[key])
+ }
+ return &localVarGenerator{exclude: exclude, next: 0}
+}
+
+func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator {
+ exclude := NewVarSet()
+ vis := &VarVisitor{vars: exclude}
+ vis.Walk(node)
+ return &localVarGenerator{exclude: exclude, suffix: suffix, next: 0}
+}
+
+func (l *localVarGenerator) Generate() Var {
+ for {
+ result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__")
+ l.next++
+ if !l.exclude.Contains(result) {
+ return result
+ }
+ }
+}
+
+func getGlobals(pkg *Package, rules []Var, imports []*Import) map[Var]Ref {
+
+ globals := map[Var]Ref{}
+
+ // Populate globals with exports within the package.
+ for _, v := range rules {
+ global := append(Ref{}, pkg.Path...)
+ global = append(global, &Term{Value: String(v)})
+ globals[v] = global
+ }
+
+ // Populate globals with imports.
+ for _, i := range imports {
+ if len(i.Alias) > 0 {
+ path := i.Path.Value.(Ref)
+ globals[i.Alias] = path
+ } else {
+ path := i.Path.Value.(Ref)
+ if len(path) == 1 {
+ globals[path[0].Value.(Var)] = path
+ } else {
+ v := path[len(path)-1].Value.(String)
+ globals[Var(v)] = path
+ }
+ }
+ }
+
+ return globals
+}
+
+func requiresEval(x *Term) bool {
+ if x == nil {
+ return false
+ }
+ return ContainsRefs(x) || ContainsComprehensions(x)
+}
+
+func resolveRef(globals map[Var]Ref, ignore *declaredVarStack, ref Ref) Ref {
+
+ r := Ref{}
+ for i, x := range ref {
+ switch v := x.Value.(type) {
+ case Var:
+ if g, ok := globals[v]; ok && !ignore.Contains(v) {
+ cpy := g.Copy()
+ for i := range cpy {
+ cpy[i].SetLocation(x.Location)
+ }
+ if i == 0 {
+ r = cpy
+ } else {
+ r = append(r, NewTerm(cpy).SetLocation(x.Location))
+ }
+ } else {
+ r = append(r, x)
+ }
+ case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
+ r = append(r, resolveRefsInTerm(globals, ignore, x))
+ default:
+ r = append(r, x)
+ }
+ }
+
+ return r
+}
+
+func resolveRefsInRule(globals map[Var]Ref, rule *Rule) error {
+ ignore := &declaredVarStack{}
+
+ vars := NewVarSet()
+ var vis *GenericVisitor
+ var err error
+
+ // Walk args to collect vars and transform body so that callers can shadow
+ // root documents.
+ vis = NewGenericVisitor(func(x interface{}) bool {
+ if err != nil {
+ return true
+ }
+ switch x := x.(type) {
+ case Var:
+ vars.Add(x)
+
+ // Object keys cannot be pattern matched so only walk values.
+ case *object:
+ x.Foreach(func(k, v *Term) {
+ vis.Walk(v)
+ })
+
+ // Skip terms that could contain vars that cannot be pattern matched.
+ case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
+ return true
+
+ case *Term:
+ if _, ok := x.Value.(Ref); ok {
+ if RootDocumentRefs.Contains(x) {
+ // We could support args named input, data, etc. however
+ // this would require rewriting terms in the head and body.
+ // Preventing root document shadowing is simpler, and
+ // arguably, will prevent confusing names from being used.
+ err = fmt.Errorf("args must not shadow %v (use a different variable name)", x)
+ return true
+ }
+ }
+ }
+ return false
+ })
+
+ vis.Walk(rule.Head.Args)
+
+ if err != nil {
+ return err
+ }
+
+ ignore.Push(vars)
+ ignore.Push(declaredVars(rule.Body))
+
+ if rule.Head.Key != nil {
+ rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key)
+ }
+
+ if rule.Head.Value != nil {
+ rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value)
+ }
+
+ rule.Body = resolveRefsInBody(globals, ignore, rule.Body)
+ return nil
+}
+
+func resolveRefsInBody(globals map[Var]Ref, ignore *declaredVarStack, body Body) Body {
+ r := Body{}
+ for _, expr := range body {
+ r = append(r, resolveRefsInExpr(globals, ignore, expr))
+ }
+ return r
+}
+
+func resolveRefsInExpr(globals map[Var]Ref, ignore *declaredVarStack, expr *Expr) *Expr {
+ cpy := *expr
+ switch ts := expr.Terms.(type) {
+ case *Term:
+ cpy.Terms = resolveRefsInTerm(globals, ignore, ts)
+ case []*Term:
+ buf := make([]*Term, len(ts))
+ for i := 0; i < len(ts); i++ {
+ buf[i] = resolveRefsInTerm(globals, ignore, ts[i])
+ }
+ cpy.Terms = buf
+ }
+ for _, w := range cpy.With {
+ w.Target = resolveRefsInTerm(globals, ignore, w.Target)
+ w.Value = resolveRefsInTerm(globals, ignore, w.Value)
+ }
+ return &cpy
+}
+
+func resolveRefsInTerm(globals map[Var]Ref, ignore *declaredVarStack, term *Term) *Term {
+ switch v := term.Value.(type) {
+ case Var:
+ if g, ok := globals[v]; ok && !ignore.Contains(v) {
+ cpy := g.Copy()
+ for i := range cpy {
+ cpy[i].SetLocation(term.Location)
+ }
+ return NewTerm(cpy).SetLocation(term.Location)
+ }
+ return term
+ case Ref:
+ fqn := resolveRef(globals, ignore, v)
+ cpy := *term
+ cpy.Value = fqn
+ return &cpy
+ case *object:
+ cpy := *term
+ cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) {
+ k = resolveRefsInTerm(globals, ignore, k)
+ v = resolveRefsInTerm(globals, ignore, v)
+ return k, v, nil
+ })
+ return &cpy
+ case *Array:
+ cpy := *term
+ cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...)
+ return &cpy
+ case Call:
+ cpy := *term
+ cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v))
+ return &cpy
+ case Set:
+ s, _ := v.Map(func(e *Term) (*Term, error) {
+ return resolveRefsInTerm(globals, ignore, e), nil
+ })
+ cpy := *term
+ cpy.Value = s
+ return &cpy
+ case *ArrayComprehension:
+ ac := &ArrayComprehension{}
+ ignore.Push(declaredVars(v.Body))
+ ac.Term = resolveRefsInTerm(globals, ignore, v.Term)
+ ac.Body = resolveRefsInBody(globals, ignore, v.Body)
+ cpy := *term
+ cpy.Value = ac
+ ignore.Pop()
+ return &cpy
+ case *ObjectComprehension:
+ oc := &ObjectComprehension{}
+ ignore.Push(declaredVars(v.Body))
+ oc.Key = resolveRefsInTerm(globals, ignore, v.Key)
+ oc.Value = resolveRefsInTerm(globals, ignore, v.Value)
+ oc.Body = resolveRefsInBody(globals, ignore, v.Body)
+ cpy := *term
+ cpy.Value = oc
+ ignore.Pop()
+ return &cpy
+ case *SetComprehension:
+ sc := &SetComprehension{}
+ ignore.Push(declaredVars(v.Body))
+ sc.Term = resolveRefsInTerm(globals, ignore, v.Term)
+ sc.Body = resolveRefsInBody(globals, ignore, v.Body)
+ cpy := *term
+ cpy.Value = sc
+ ignore.Pop()
+ return &cpy
+ default:
+ return term
+ }
+}
+
+func resolveRefsInTermArray(globals map[Var]Ref, ignore *declaredVarStack, terms *Array) []*Term {
+ cpy := make([]*Term, terms.Len())
+ for i := 0; i < terms.Len(); i++ {
+ cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i))
+ }
+ return cpy
+}
+
+func resolveRefsInTermSlice(globals map[Var]Ref, ignore *declaredVarStack, terms []*Term) []*Term {
+ cpy := make([]*Term, len(terms))
+ for i := 0; i < len(terms); i++ {
+ cpy[i] = resolveRefsInTerm(globals, ignore, terms[i])
+ }
+ return cpy
+}
+
+type declaredVarStack []VarSet
+
+func (s declaredVarStack) Contains(v Var) bool {
+ for i := len(s) - 1; i >= 0; i-- {
+ if _, ok := s[i][v]; ok {
+ return ok
+ }
+ }
+ return false
+}
+
+func (s declaredVarStack) Add(v Var) {
+ s[len(s)-1].Add(v)
+}
+
+func (s *declaredVarStack) Push(vs VarSet) {
+ *s = append(*s, vs)
+}
+
+func (s *declaredVarStack) Pop() {
+ curr := *s
+ *s = curr[:len(curr)-1]
+}
+
+func declaredVars(x interface{}) VarSet {
+ vars := NewVarSet()
+ vis := NewGenericVisitor(func(x interface{}) bool {
+ switch x := x.(type) {
+ case *Expr:
+ if x.IsAssignment() && validEqAssignArgCount(x) {
+ WalkVars(x.Operand(0), func(v Var) bool {
+ vars.Add(v)
+ return false
+ })
+ } else if decl, ok := x.Terms.(*SomeDecl); ok {
+ for i := range decl.Symbols {
+ vars.Add(decl.Symbols[i].Value.(Var))
+ }
+ }
+ case *ArrayComprehension, *SetComprehension, *ObjectComprehension:
+ return true
+ }
+ return false
+ })
+ vis.Walk(x)
+ return vars
+}
+
+// rewriteComprehensionTerms will rewrite comprehensions so that the term part
+// is bound to a variable in the body. This allows any type of term to be used
+// in the term part (even if the term requires evaluation.)
+//
+// For instance, given the following comprehension:
+//
+// [x[0] | x = y[_]; y = [1,2,3]]
+//
+// The comprehension would be rewritten as:
+//
+// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]]
+func rewriteComprehensionTerms(f *equalityFactory, node interface{}) (interface{}, error) {
+ return TransformComprehensions(node, func(x interface{}) (Value, error) {
+ switch x := x.(type) {
+ case *ArrayComprehension:
+ if requiresEval(x.Term) {
+ expr := f.Generate(x.Term)
+ x.Term = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ return x, nil
+ case *SetComprehension:
+ if requiresEval(x.Term) {
+ expr := f.Generate(x.Term)
+ x.Term = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ return x, nil
+ case *ObjectComprehension:
+ if requiresEval(x.Key) {
+ expr := f.Generate(x.Key)
+ x.Key = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ if requiresEval(x.Value) {
+ expr := f.Generate(x.Value)
+ x.Value = expr.Operand(0)
+ x.Body.Append(expr)
+ }
+ return x, nil
+ }
+ panic("illegal type")
+ })
+}
+
+// rewriteEquals will rewrite exprs under x as unification calls instead of ==
+// calls. For example:
+//
+// data.foo == data.bar is rewritten as data.foo = data.bar
+//
+// This stage should only run the safety check (since == is a built-in with no
+// outputs, so the inputs must not be marked as safe.)
+//
+// This stage is not executed by the query compiler by default because when
+// callers specify == instead of = they expect to receive a true/false/undefined
+// result back whereas with = the result is only ever true/undefined. For
+// partial evaluation cases we do want to rewrite == to = to simplify the
+// result.
+func rewriteEquals(x interface{}) {
+ doubleEq := Equal.Ref()
+ unifyOp := Equality.Ref()
+ WalkExprs(x, func(x *Expr) bool {
+ if x.IsCall() {
+ operator := x.Operator()
+ if operator.Equal(doubleEq) && len(x.Operands()) == 2 {
+ x.SetOperator(NewTerm(unifyOp))
+ }
+ }
+ return false
+ })
+}
+
+// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and
+// comprehensions) are bound to vars earlier in the query. This translation
+// results in eager evaluation.
+//
+// For instance, given the following query:
+//
+// foo(data.bar) = 1
+//
+// The rewritten version will be:
+//
+// __local0__ = data.bar; foo(__local0__) = 1
+func rewriteDynamics(f *equalityFactory, body Body) Body {
+ result := make(Body, 0, len(body))
+ for _, expr := range body {
+ if expr.IsEquality() {
+ result = rewriteDynamicsEqExpr(f, expr, result)
+ } else if expr.IsCall() {
+ result = rewriteDynamicsCallExpr(f, expr, result)
+ } else {
+ result = rewriteDynamicsTermExpr(f, expr, result)
+ }
+ }
+ return result
+}
+
+func appendExpr(body Body, expr *Expr) Body {
+ body.Append(expr)
+ return body
+}
+
+func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body {
+ if !validEqAssignArgCount(expr) {
+ return appendExpr(result, expr)
+ }
+ terms := expr.Terms.([]*Term)
+ result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result)
+ result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result)
+ return appendExpr(result, expr)
+}
+
+func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body {
+ terms := expr.Terms.([]*Term)
+ for i := 1; i < len(terms); i++ {
+ result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result)
+ }
+ return appendExpr(result, expr)
+}
+
+func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body {
+ term := expr.Terms.(*Term)
+ result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result)
+ return appendExpr(result, expr)
+}
+
+func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
+ switch v := term.Value.(type) {
+ case Ref:
+ for i := 1; i < len(v); i++ {
+ result, v[i] = rewriteDynamicsOne(original, f, v[i], result)
+ }
+ case *ArrayComprehension:
+ v.Body = rewriteDynamics(f, v.Body)
+ case *SetComprehension:
+ v.Body = rewriteDynamics(f, v.Body)
+ case *ObjectComprehension:
+ v.Body = rewriteDynamics(f, v.Body)
+ default:
+ result, term = rewriteDynamicsOne(original, f, term, result)
+ }
+ return result, term
+}
+
+func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) {
+ switch v := term.Value.(type) {
+ case Ref:
+ for i := 1; i < len(v); i++ {
+ result, v[i] = rewriteDynamicsOne(original, f, v[i], result)
+ }
+ generated := f.Generate(term)
+ generated.With = original.With
+ result.Append(generated)
+ return result, result[len(result)-1].Operand(0)
+ case *Array:
+ for i := 0; i < v.Len(); i++ {
+ var t *Term
+ result, t = rewriteDynamicsOne(original, f, v.Elem(i), result)
+ v.set(i, t)
+ }
+ return result, term
+ case *object:
+ cpy := NewObject()
+ v.Foreach(func(key, value *Term) {
+ result, key = rewriteDynamicsOne(original, f, key, result)
+ result, value = rewriteDynamicsOne(original, f, value, result)
+ cpy.Insert(key, value)
+ })
+ return result, NewTerm(cpy).SetLocation(term.Location)
+ case Set:
+ cpy := NewSet()
+ for _, term := range v.Slice() {
+ var rw *Term
+ result, rw = rewriteDynamicsOne(original, f, term, result)
+ cpy.Add(rw)
+ }
+ return result, NewTerm(cpy).SetLocation(term.Location)
+ case *ArrayComprehension:
+ var extra *Expr
+ v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
+ result.Append(extra)
+ return result, result[len(result)-1].Operand(0)
+ case *SetComprehension:
+ var extra *Expr
+ v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
+ result.Append(extra)
+ return result, result[len(result)-1].Operand(0)
+ case *ObjectComprehension:
+ var extra *Expr
+ v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term)
+ result.Append(extra)
+ return result, result[len(result)-1].Operand(0)
+ }
+ return result, term
+}
+
+func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) {
+ body = rewriteDynamics(f, body)
+ generated := f.Generate(term)
+ generated.With = original.With
+ return body, generated
+}
+
+func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) {
+ for i := range rule.Head.Args {
+ support, output := expandExprTerm(gen, rule.Head.Args[i])
+ for j := range support {
+ rule.Body.Append(support[j])
+ }
+ rule.Head.Args[i] = output
+ }
+ if rule.Head.Key != nil {
+ support, output := expandExprTerm(gen, rule.Head.Key)
+ for i := range support {
+ rule.Body.Append(support[i])
+ }
+ rule.Head.Key = output
+ }
+ if rule.Head.Value != nil {
+ support, output := expandExprTerm(gen, rule.Head.Value)
+ for i := range support {
+ rule.Body.Append(support[i])
+ }
+ rule.Head.Value = output
+ }
+}
+
+func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body {
+ cpy := make(Body, 0, len(body))
+ for i := 0; i < len(body); i++ {
+ for _, expr := range expandExpr(gen, body[i]) {
+ cpy.Append(expr)
+ }
+ }
+ return cpy
+}
+
+func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) {
+ for i := range expr.With {
+ extras, value := expandExprTerm(gen, expr.With[i].Value)
+ expr.With[i].Value = value
+ result = append(result, extras...)
+ }
+ switch terms := expr.Terms.(type) {
+ case *Term:
+ extras, term := expandExprTerm(gen, terms)
+ if len(expr.With) > 0 {
+ for i := range extras {
+ extras[i].With = expr.With
+ }
+ }
+ result = append(result, extras...)
+ expr.Terms = term
+ result = append(result, expr)
+ case []*Term:
+ for i := 1; i < len(terms); i++ {
+ var extras []*Expr
+ extras, terms[i] = expandExprTerm(gen, terms[i])
+ if len(expr.With) > 0 {
+ for i := range extras {
+ extras[i].With = expr.With
+ }
+ }
+ result = append(result, extras...)
+ }
+ result = append(result, expr)
+ }
+ return
+}
+
+func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) {
+ output = term
+ switch v := term.Value.(type) {
+ case Call:
+ for i := 1; i < len(v); i++ {
+ var extras []*Expr
+ extras, v[i] = expandExprTerm(gen, v[i])
+ support = append(support, extras...)
+ }
+ output = NewTerm(gen.Generate()).SetLocation(term.Location)
+ expr := v.MakeExpr(output).SetLocation(term.Location)
+ expr.Generated = true
+ support = append(support, expr)
+ case Ref:
+ support = expandExprRef(gen, v)
+ case *Array:
+ support = expandExprTermArray(gen, v)
+ case *object:
+ cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) {
+ extras1, expandedKey := expandExprTerm(gen, k)
+ extras2, expandedValue := expandExprTerm(gen, v)
+ support = append(support, extras1...)
+ support = append(support, extras2...)
+ return expandedKey, expandedValue, nil
+ })
+ output = NewTerm(cpy).SetLocation(term.Location)
+ case Set:
+ cpy, _ := v.Map(func(x *Term) (*Term, error) {
+ extras, expanded := expandExprTerm(gen, x)
+ support = append(support, extras...)
+ return expanded, nil
+ })
+ output = NewTerm(cpy).SetLocation(term.Location)
+ case *ArrayComprehension:
+ support, term := expandExprTerm(gen, v.Term)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Term = term
+ v.Body = rewriteExprTermsInBody(gen, v.Body)
+ case *SetComprehension:
+ support, term := expandExprTerm(gen, v.Term)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Term = term
+ v.Body = rewriteExprTermsInBody(gen, v.Body)
+ case *ObjectComprehension:
+ support, key := expandExprTerm(gen, v.Key)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Key = key
+ support, value := expandExprTerm(gen, v.Value)
+ for i := range support {
+ v.Body.Append(support[i])
+ }
+ v.Value = value
+ v.Body = rewriteExprTermsInBody(gen, v.Body)
+ }
+ return
+}
+
+func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) {
+ // Start by calling a normal expandExprTerm on all terms.
+ support = expandExprTermSlice(gen, v)
+
+ // Rewrite references in order to support indirect references. We rewrite
+ // e.g.
+ //
+ // [1, 2, 3][i]
+ //
+ // to
+ //
+ // __local_var = [1, 2, 3]
+ // __local_var[i]
+ //
+ // to support these. This only impacts the reference subject, i.e. the
+ // first item in the slice.
+ var subject = v[0]
+ switch subject.Value.(type) {
+ case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call:
+ f := newEqualityFactory(gen)
+ assignToLocal := f.Generate(subject)
+ support = append(support, assignToLocal)
+ v[0] = assignToLocal.Operand(0)
+ }
+ return
+}
+
+func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) {
+ for i := 0; i < arr.Len(); i++ {
+ extras, v := expandExprTerm(gen, arr.Elem(i))
+ arr.set(i, v)
+ support = append(support, extras...)
+ }
+ return
+}
+
+func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) {
+ for i := 0; i < len(v); i++ {
+ var extras []*Expr
+ extras, v[i] = expandExprTerm(gen, v[i])
+ support = append(support, extras...)
+ }
+ return
+}
+
+type localDeclaredVars struct {
+ vars []*declaredVarSet
+
+ // rewritten contains a mapping of *all* user-defined variables
+ // that have been rewritten whereas vars contains the state
+ // from the current query (not not any nested queries, and all
+ // vars seen).
+ rewritten map[Var]Var
+}
+
+type varOccurrence int
+
+const (
+ newVar varOccurrence = iota
+ argVar
+ seenVar
+ assignedVar
+ declaredVar
+)
+
+type declaredVarSet struct {
+ vs map[Var]Var
+ reverse map[Var]Var
+ occurrence map[Var]varOccurrence
+}
+
+func newDeclaredVarSet() *declaredVarSet {
+ return &declaredVarSet{
+ vs: map[Var]Var{},
+ reverse: map[Var]Var{},
+ occurrence: map[Var]varOccurrence{},
+ }
+}
+
+func newLocalDeclaredVars() *localDeclaredVars {
+ return &localDeclaredVars{
+ vars: []*declaredVarSet{newDeclaredVarSet()},
+ rewritten: map[Var]Var{},
+ }
+}
+
+func (s *localDeclaredVars) Push() {
+ s.vars = append(s.vars, newDeclaredVarSet())
+}
+
+func (s *localDeclaredVars) Pop() *declaredVarSet {
+ sl := s.vars
+ curr := sl[len(sl)-1]
+ s.vars = sl[:len(sl)-1]
+ return curr
+}
+
+func (s localDeclaredVars) Peek() *declaredVarSet {
+ return s.vars[len(s.vars)-1]
+}
+
+func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) {
+ elem := s.vars[len(s.vars)-1]
+ elem.vs[x] = y
+ elem.reverse[y] = x
+ elem.occurrence[x] = occurrence
+
+ // If the variable has been rewritten (where x != y, with y being
+ // the generated value), store it in the map of rewritten vars.
+ // Assume that the generated values are unique for the compilation.
+ if !x.Equal(y) {
+ s.rewritten[y] = x
+ }
+}
+
+func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) {
+ for i := len(s.vars) - 1; i >= 0; i-- {
+ if y, ok = s.vars[i].vs[x]; ok {
+ return
+ }
+ }
+ return
+}
+
+// Occurrence returns a flag that indicates whether x has occurred in the
+// current scope.
+func (s localDeclaredVars) Occurrence(x Var) varOccurrence {
+ return s.vars[len(s.vars)-1].occurrence[x]
+}
+
+// GlobalOccurrence returns a flag that indicates whether x has occurred in the
+// global scope.
+func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) {
+ for i := len(s.vars) - 1; i >= 0; i-- {
+ if occ, ok := s.vars[i].occurrence[x]; ok {
+ return occ, true
+ }
+ }
+ return newVar, false
+}
+
+// rewriteLocalVars rewrites bodies to remove assignment/declaration
+// expressions. For example:
+//
+// a := 1; p[a]
+//
+// Is rewritten to:
+//
+// __local0__ = 1; p[__local0__]
+//
+// During rewriting, assignees are validated to prevent use before declaration.
+func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body) (Body, map[Var]Var, Errors) {
+ var errs Errors
+ body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs)
+ return body, stack.Pop().vs, errs
+}
+
+func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors) (Body, Errors) {
+
+ var cpy Body
+
+ for i := range body {
+ var expr *Expr
+ if body[i].IsAssignment() {
+ expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs)
+ } else if decl, ok := body[i].Terms.(*SomeDecl); ok {
+ errs = rewriteSomeDeclStatement(g, stack, decl, errs)
+ } else {
+ expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs)
+ }
+ if expr != nil {
+ cpy.Append(expr)
+ }
+ }
+
+ // If the body only contained a var statement it will be empty at this
+ // point. Append true to the body to ensure that it's non-empty (zero length
+ // bodies are not supported.)
+ if len(cpy) == 0 {
+ cpy.Append(NewExpr(BooleanTerm(true)))
+ }
+
+ return cpy, checkUnusedDeclaredVars(body[0].Loc(), stack, used, cpy, errs)
+}
+
+func checkUnusedDeclaredVars(loc *Location, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors {
+
+ // NOTE(tsandall): Do not generate more errors if there are existing
+ // declaration errors.
+ if len(errs) > 0 {
+ return errs
+ }
+
+ dvs := stack.Peek()
+ declared := NewVarSet()
+
+ for v, occ := range dvs.occurrence {
+ if occ == declaredVar {
+ declared.Add(dvs.vs[v])
+ }
+ }
+
+ bodyvars := cpy.Vars(VarVisitorParams{})
+
+ for v := range used {
+ if gv, ok := stack.Declared(v); ok {
+ bodyvars.Add(gv)
+ } else {
+ bodyvars.Add(v)
+ }
+ }
+
+ unused := declared.Diff(bodyvars).Diff(used)
+
+ for _, gv := range unused.Sorted() {
+ errs = append(errs, NewError(CompileErr, loc, "declared var %v unused", dvs.reverse[gv]))
+ }
+
+ return errs
+}
+
+func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, decl *SomeDecl, errs Errors) Errors {
+ for i := range decl.Symbols {
+ v := decl.Symbols[i].Value.(Var)
+ if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil {
+ errs = append(errs, NewError(CompileErr, decl.Loc(), err.Error()))
+ }
+ }
+ return errs
+}
+
+func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors) (*Expr, Errors) {
+ vis := NewGenericVisitor(func(x interface{}) bool {
+ var stop bool
+ switch x := x.(type) {
+ case *Term:
+ stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs)
+ case *With:
+ _, errs = rewriteDeclaredVarsInTerm(g, stack, x.Value, errs)
+ stop = true
+ }
+ return stop
+ })
+ vis.Walk(expr)
+ return expr, errs
+}
+
+func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors) (*Expr, Errors) {
+
+ if expr.Negated {
+ errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression"))
+ return expr, errs
+ }
+
+ numErrsBefore := len(errs)
+
+ if !validEqAssignArgCount(expr) {
+ return expr, errs
+ }
+
+ // Rewrite terms on right hand side capture seen vars and recursively
+ // process comprehensions before left hand side is processed. Also
+ // rewrite with modifier.
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs)
+
+ for _, w := range expr.With {
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs)
+ }
+
+ // Rewrite vars on left hand side with unique names. Catch redeclaration
+ // and invalid term types here.
+ var vis func(t *Term) bool
+
+ vis = func(t *Term) bool {
+ switch v := t.Value.(type) {
+ case Var:
+ if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil {
+ errs = append(errs, NewError(CompileErr, t.Location, err.Error()))
+ } else {
+ t.Value = gv
+ }
+ return true
+ case *Array:
+ return false
+ case *object:
+ v.Foreach(func(_, v *Term) {
+ WalkTerms(v, vis)
+ })
+ return true
+ case Ref:
+ if RootDocumentRefs.Contains(t) {
+ if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil {
+ errs = append(errs, NewError(CompileErr, t.Location, err.Error()))
+ } else {
+ t.Value = gv
+ }
+ return true
+ }
+ }
+ errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", TypeName(t.Value)))
+ return true
+ }
+
+ WalkTerms(expr.Operand(0), vis)
+
+ if len(errs) == numErrsBefore {
+ loc := expr.Operator()[0].Location
+ expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc))
+ }
+
+ return expr, errs
+}
+
+func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors) (bool, Errors) {
+ switch v := term.Value.(type) {
+ case Var:
+ if gv, ok := stack.Declared(v); ok {
+ term.Value = gv
+ } else if stack.Occurrence(v) == newVar {
+ stack.Insert(v, v, seenVar)
+ }
+ case Ref:
+ if RootDocumentRefs.Contains(term) {
+ x := v[0].Value.(Var)
+ if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar {
+ gv, _ := stack.Declared(x)
+ term.Value = gv
+ }
+
+ return true, errs
+ }
+ return false, errs
+ case *object:
+ cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) {
+ kcpy := k.Copy()
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs)
+ return kcpy, v, nil
+ })
+ term.Value = cpy
+ case Set:
+ cpy, _ := v.Map(func(elem *Term) (*Term, error) {
+ elemcpy := elem.Copy()
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs)
+ return elemcpy, nil
+ })
+ term.Value = cpy
+ case *ArrayComprehension:
+ errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs)
+ case *SetComprehension:
+ errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs)
+ case *ObjectComprehension:
+ errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs)
+ default:
+ return false, errs
+ }
+ return true, errs
+}
+
+func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors) Errors {
+ WalkNodes(term, func(n Node) bool {
+ var stop bool
+ switch n := n.(type) {
+ case *With:
+ _, errs = rewriteDeclaredVarsInTerm(g, stack, n.Value, errs)
+ stop = true
+ case *Term:
+ stop, errs = rewriteDeclaredVarsInTerm(g, stack, n, errs)
+ }
+ return stop
+ })
+ return errs
+}
+
+func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors) Errors {
+ stack.Push()
+ v.Body, errs = rewriteDeclaredVarsInBody(g, stack, nil, v.Body, errs)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs)
+ stack.Pop()
+ return errs
+}
+
+func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors) Errors {
+ stack.Push()
+ v.Body, errs = rewriteDeclaredVarsInBody(g, stack, nil, v.Body, errs)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs)
+ stack.Pop()
+ return errs
+}
+
+func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors) Errors {
+ stack.Push()
+ v.Body, errs = rewriteDeclaredVarsInBody(g, stack, nil, v.Body, errs)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs)
+ errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs)
+ stack.Pop()
+ return errs
+}
+
+func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) {
+ switch stack.Occurrence(v) {
+ case seenVar:
+ return gv, fmt.Errorf("var %v referenced above", v)
+ case assignedVar:
+ return gv, fmt.Errorf("var %v assigned above", v)
+ case declaredVar:
+ return gv, fmt.Errorf("var %v declared above", v)
+ case argVar:
+ return gv, fmt.Errorf("arg %v redeclared", v)
+ }
+ gv = g.Generate()
+ stack.Insert(v, gv, occ)
+ return
+}
+
+// rewriteWithModifiersInBody will rewrite the body so that with modifiers do
+// not contain terms that require evaluation as values. If this function
+// encounters an invalid with modifier target then it will raise an error.
+func rewriteWithModifiersInBody(c *Compiler, f *equalityFactory, body Body) (Body, *Error) {
+ var result Body
+ for i := range body {
+ exprs, err := rewriteWithModifier(c, f, body[i])
+ if err != nil {
+ return nil, err
+ }
+ if len(exprs) > 0 {
+ for _, expr := range exprs {
+ result.Append(expr)
+ }
+ } else {
+ result.Append(body[i])
+ }
+ }
+ return result, nil
+}
+
+func rewriteWithModifier(c *Compiler, f *equalityFactory, expr *Expr) ([]*Expr, *Error) {
+
+ var result []*Expr
+ for i := range expr.With {
+ err := validateTarget(c, expr.With[i].Target)
+ if err != nil {
+ return nil, err
+ }
+
+ if requiresEval(expr.With[i].Value) {
+ eq := f.Generate(expr.With[i].Value)
+ result = append(result, eq)
+ expr.With[i].Value = eq.Operand(0)
+ }
+ }
+
+ // If any of the with modifiers in this expression were rewritten then result
+ // will be non-empty. In this case, the expression will have been modified and
+ // it should also be added to the result.
+ if len(result) > 0 {
+ result = append(result, expr)
+ }
+ return result, nil
+}
+
+func validateTarget(c *Compiler, term *Term) *Error {
+ if !isInputRef(term) && !isDataRef(term) {
+ return NewError(TypeErr, term.Location, "with keyword target must start with %v or %v", InputRootDocument, DefaultRootDocument)
+ }
+
+ if isDataRef(term) {
+ ref := term.Value.(Ref)
+ node := c.RuleTree
+ for i := 0; i < len(ref)-1; i++ {
+ child := node.Child(ref[i].Value)
+ if child == nil {
+ break
+ } else if len(child.Values) > 0 {
+ return NewError(CompileErr, term.Loc(), "with keyword cannot partially replace virtual document(s)")
+ }
+ node = child
+ }
+
+ if node != nil {
+ if child := node.Child(ref[len(ref)-1].Value); child != nil {
+ for _, value := range child.Values {
+ if len(value.(*Rule).Head.Args) > 0 {
+ return NewError(CompileErr, term.Loc(), "with keyword cannot replace functions")
+ }
+ }
+ }
+ }
+
+ }
+ return nil
+}
+
+func isInputRef(term *Term) bool {
+ if ref, ok := term.Value.(Ref); ok {
+ if ref.HasPrefix(InputRootRef) {
+ return true
+ }
+ }
+ return false
+}
+
+func isDataRef(term *Term) bool {
+ if ref, ok := term.Value.(Ref); ok {
+ if ref.HasPrefix(DefaultRootRef) {
+ return true
+ }
+ }
+ return false
+}
+
+func isVirtual(node *TreeNode, ref Ref) bool {
+ for i := 0; i < len(ref); i++ {
+ child := node.Child(ref[i].Value)
+ if child == nil {
+ return false
+ } else if len(child.Values) > 0 {
+ return true
+ }
+ node = child
+ }
+ return true
+}
+
+func safetyErrorSlice(unsafe unsafeVars) (result Errors) {
+
+ if len(unsafe) == 0 {
+ return
+ }
+
+ for _, pair := range unsafe.Vars() {
+ if !pair.Var.IsGenerated() {
+ result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", pair.Var))
+ }
+ }
+
+ if len(result) > 0 {
+ return
+ }
+
+ // If the expression contains unsafe generated variables, report which
+ // expressions are unsafe instead of the variables that are unsafe (since
+ // the latter are not meaningful to the user.)
+ pairs := unsafe.Slice()
+
+ sort.Slice(pairs, func(i, j int) bool {
+ return pairs[i].Expr.Location.Compare(pairs[j].Expr.Location) < 0
+ })
+
+ // Report at most one error per generated variable.
+ seen := NewVarSet()
+
+ for _, expr := range pairs {
+ before := len(seen)
+ for v := range expr.Vars {
+ if v.IsGenerated() {
+ seen.Add(v)
+ }
+ }
+ if len(seen) > before {
+ result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe"))
+ }
+ }
+
+ return
+}
+
+func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node interface{}) Errors {
+ errs := make(Errors, 0)
+ WalkExprs(node, func(x *Expr) bool {
+ if x.IsCall() {
+ operator := x.Operator().String()
+ if _, ok := unsafeBuiltinsMap[operator]; ok {
+ errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator))
+ }
+ }
+ return false
+ })
+ return errs
+}
+
+func rewriteVarsInRef(vars ...map[Var]Var) func(Ref) Ref {
+ return func(node Ref) Ref {
+ i, _ := TransformVars(node, func(v Var) (Value, error) {
+ for _, m := range vars {
+ if u, ok := m[v]; ok {
+ return u, nil
+ }
+ }
+ return v, nil
+ })
+ return i.(Ref)
+ }
+}
+
+func rewriteVarsNop(node Ref) Ref {
+ return node
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
new file mode 100644
index 00000000..37a81ddc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go
@@ -0,0 +1,42 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+// CompileModules takes a set of Rego modules represented as strings and
+// compiles them for evaluation. The keys of the map are used as filenames.
+func CompileModules(modules map[string]string) (*Compiler, error) {
+
+ parsed := make(map[string]*Module, len(modules))
+
+ for f, module := range modules {
+ var pm *Module
+ var err error
+ if pm, err = ParseModule(f, module); err != nil {
+ return nil, err
+ }
+ parsed[f] = pm
+ }
+
+ compiler := NewCompiler()
+ compiler.Compile(parsed)
+
+ if compiler.Failed() {
+ return nil, compiler.Errors
+ }
+
+ return compiler, nil
+}
+
+// MustCompileModules compiles a set of Rego modules represented as strings. If
+// the compilation process fails, this function panics.
+func MustCompileModules(modules map[string]string) *Compiler {
+
+ compiler, err := CompileModules(modules)
+ if err != nil {
+ panic(err)
+ }
+
+ return compiler
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go b/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go
new file mode 100644
index 00000000..5d952258
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go
@@ -0,0 +1,9 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+const (
+ compileStageComprehensionIndexBuild = "compile_stage_comprehension_index_build"
+)
diff --git a/vendor/github.com/open-policy-agent/opa/ast/conflicts.go b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go
new file mode 100644
index 00000000..d1013cce
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/conflicts.go
@@ -0,0 +1,48 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "strings"
+)
+
+// CheckPathConflicts returns a set of errors indicating paths that
+// are in conflict with the result of the provided callable.
+func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors {
+ var errs Errors
+
+ root := c.RuleTree.Child(DefaultRootDocument.Value)
+ if root == nil {
+ return nil
+ }
+
+ for _, node := range root.Children {
+ errs = append(errs, checkDocumentConflicts(node, exists, nil)...)
+ }
+
+ return errs
+}
+
+func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors {
+
+ path = append(path, string(node.Key.(String)))
+
+ if len(node.Values) > 0 {
+ s := strings.Join(path, "/")
+ if ok, err := exists(path); err != nil {
+ return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())}
+ } else if ok {
+ return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)}
+ }
+ }
+
+ var errs Errors
+
+ for _, child := range node.Children {
+ errs = append(errs, checkDocumentConflicts(child, exists, path)...)
+ }
+
+ return errs
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/doc.go b/vendor/github.com/open-policy-agent/opa/ast/doc.go
new file mode 100644
index 00000000..363660cf
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/doc.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine.
+//
+// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc.
+//
+// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows:
+//
+// Module
+// |
+// +--- Package (Reference)
+// |
+// +--- Imports
+// | |
+// | +--- Import (Term)
+// |
+// +--- Rules
+// |
+// +--- Rule
+// |
+// +--- Head
+// | |
+// | +--- Name (Variable)
+// | |
+// | +--- Key (Term)
+// | |
+// | +--- Value (Term)
+// |
+// +--- Body
+// |
+// +--- Expression (Term | Terms | Variable Declaration)
+//
+// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports.
+package ast
diff --git a/vendor/github.com/open-policy-agent/opa/ast/env.go b/vendor/github.com/open-policy-agent/opa/ast/env.go
new file mode 100644
index 00000000..ab4f0e68
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/env.go
@@ -0,0 +1,327 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "github.com/open-policy-agent/opa/types"
+ "github.com/open-policy-agent/opa/util"
+)
+
+// TypeEnv contains type info for static analysis such as type checking.
+type TypeEnv struct {
+ tree *typeTreeNode
+ next *TypeEnv
+}
+
+// NewTypeEnv returns an empty TypeEnv.
+func NewTypeEnv() *TypeEnv {
+ return &TypeEnv{
+ tree: newTypeTree(),
+ }
+}
+
+// Get returns the type of x.
+func (env *TypeEnv) Get(x interface{}) types.Type {
+
+ if term, ok := x.(*Term); ok {
+ x = term.Value
+ }
+
+ switch x := x.(type) {
+
+ // Scalars.
+ case Null:
+ return types.NewNull()
+ case Boolean:
+ return types.NewBoolean()
+ case Number:
+ return types.NewNumber()
+ case String:
+ return types.NewString()
+
+ // Composites.
+ case *Array:
+ static := make([]types.Type, x.Len())
+ for i := range static {
+ tpe := env.Get(x.Elem(i).Value)
+ static[i] = tpe
+ }
+
+ var dynamic types.Type
+ if len(static) == 0 {
+ dynamic = types.A
+ }
+
+ return types.NewArray(static, dynamic)
+
+ case *object:
+ static := []*types.StaticProperty{}
+ var dynamic *types.DynamicProperty
+
+ x.Foreach(func(k, v *Term) {
+ if IsConstant(k.Value) {
+ kjson, err := JSON(k.Value)
+ if err == nil {
+ tpe := env.Get(v)
+ static = append(static, types.NewStaticProperty(kjson, tpe))
+ return
+ }
+ }
+ // Can't handle it as a static property, fallback to dynamic
+ typeK := env.Get(k.Value)
+ typeV := env.Get(v.Value)
+ dynamic = types.NewDynamicProperty(typeK, typeV)
+ })
+
+ if len(static) == 0 && dynamic == nil {
+ dynamic = types.NewDynamicProperty(types.A, types.A)
+ }
+
+ return types.NewObject(static, dynamic)
+
+ case Set:
+ var tpe types.Type
+ x.Foreach(func(elem *Term) {
+ other := env.Get(elem.Value)
+ tpe = types.Or(tpe, other)
+ })
+ if tpe == nil {
+ tpe = types.A
+ }
+ return types.NewSet(tpe)
+
+ // Comprehensions.
+ case *ArrayComprehension:
+ checker := newTypeChecker()
+ cpy, errs := checker.CheckBody(env, x.Body)
+ if len(errs) == 0 {
+ return types.NewArray(nil, cpy.Get(x.Term))
+ }
+ return nil
+ case *ObjectComprehension:
+ checker := newTypeChecker()
+ cpy, errs := checker.CheckBody(env, x.Body)
+ if len(errs) == 0 {
+ return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value)))
+ }
+ return nil
+ case *SetComprehension:
+ checker := newTypeChecker()
+ cpy, errs := checker.CheckBody(env, x.Body)
+ if len(errs) == 0 {
+ return types.NewSet(cpy.Get(x.Term))
+ }
+ return nil
+
+ // Refs.
+ case Ref:
+ return env.getRef(x)
+
+ // Vars.
+ case Var:
+ if node := env.tree.Child(x); node != nil {
+ return node.Value()
+ }
+ if env.next != nil {
+ return env.next.Get(x)
+ }
+ return nil
+
+ // Calls.
+ case Call:
+ return nil
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (env *TypeEnv) getRef(ref Ref) types.Type {
+
+ node := env.tree.Child(ref[0].Value)
+ if node == nil {
+ return env.getRefFallback(ref)
+ }
+
+ return env.getRefRec(node, ref, ref[1:])
+}
+
+func (env *TypeEnv) getRefFallback(ref Ref) types.Type {
+
+ if env.next != nil {
+ return env.next.Get(ref)
+ }
+
+ if RootDocumentNames.Contains(ref[0]) {
+ return types.A
+ }
+
+ return nil
+}
+
+func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type {
+ if len(tail) == 0 {
+ return env.getRefRecExtent(node)
+ }
+
+ if node.Leaf() {
+ return selectRef(node.Value(), tail)
+ }
+
+ if !IsConstant(tail[0].Value) {
+ return selectRef(env.getRefRecExtent(node), tail)
+ }
+
+ child := node.Child(tail[0].Value)
+ if child == nil {
+ return env.getRefFallback(ref)
+ }
+
+ return env.getRefRec(child, ref, tail[1:])
+}
+
+func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type {
+
+ if node.Leaf() {
+ return node.Value()
+ }
+
+ children := []*types.StaticProperty{}
+
+ node.Children().Iter(func(k, v util.T) bool {
+ key := k.(Value)
+ child := v.(*typeTreeNode)
+
+ tpe := env.getRefRecExtent(child)
+ // TODO(tsandall): handle non-string keys?
+ if s, ok := key.(String); ok {
+ children = append(children, types.NewStaticProperty(string(s), tpe))
+ }
+ return false
+ })
+
+ // TODO(tsandall): for now, these objects can have any dynamic properties
+ // because we don't have schema for base docs. Once schemas are supported
+ // we can improve this.
+ return types.NewObject(children, types.NewDynamicProperty(types.S, types.A))
+}
+
+func (env *TypeEnv) wrap() *TypeEnv {
+ cpy := *env
+ cpy.next = env
+ cpy.tree = newTypeTree()
+ return &cpy
+}
+
+// typeTreeNode is used to store type information in a tree.
+type typeTreeNode struct {
+ key Value
+ value types.Type
+ children *util.HashMap
+}
+
+func newTypeTree() *typeTreeNode {
+ return &typeTreeNode{
+ key: nil,
+ value: nil,
+ children: util.NewHashMap(valueEq, valueHash),
+ }
+}
+
+func (n *typeTreeNode) Child(key Value) *typeTreeNode {
+ value, ok := n.children.Get(key)
+ if !ok {
+ return nil
+ }
+ return value.(*typeTreeNode)
+}
+
+func (n *typeTreeNode) Children() *util.HashMap {
+ return n.children
+}
+
+func (n *typeTreeNode) Get(path Ref) types.Type {
+ curr := n
+ for _, term := range path {
+ child, ok := curr.children.Get(term.Value)
+ if !ok {
+ return nil
+ }
+ curr = child.(*typeTreeNode)
+ }
+ return curr.Value()
+}
+
+func (n *typeTreeNode) Leaf() bool {
+ return n.value != nil
+}
+
+func (n *typeTreeNode) PutOne(key Value, tpe types.Type) {
+ c, ok := n.children.Get(key)
+
+ var child *typeTreeNode
+ if !ok {
+ child = newTypeTree()
+ child.key = key
+ n.children.Put(key, child)
+ } else {
+ child = c.(*typeTreeNode)
+ }
+
+ child.value = tpe
+}
+
+func (n *typeTreeNode) Put(path Ref, tpe types.Type) {
+ curr := n
+ for _, term := range path {
+ c, ok := curr.children.Get(term.Value)
+
+ var child *typeTreeNode
+ if !ok {
+ child = newTypeTree()
+ child.key = term.Value
+ curr.children.Put(child.key, child)
+ } else {
+ child = c.(*typeTreeNode)
+ }
+
+ curr = child
+ }
+ curr.value = tpe
+}
+
+func (n *typeTreeNode) Value() types.Type {
+ return n.value
+}
+
+// selectConstant returns the attribute of the type referred to by the term. If
+// the attribute type cannot be determined, nil is returned.
+func selectConstant(tpe types.Type, term *Term) types.Type {
+ x, err := JSON(term.Value)
+ if err == nil {
+ return types.Select(tpe, x)
+ }
+ return nil
+}
+
+// selectRef returns the type of the nested attribute referred to by ref. If
+// the attribute type cannot be determined, nil is returned. If the ref
+// contains vars or refs, then the returned type will be a union of the
+// possible types.
+func selectRef(tpe types.Type, ref Ref) types.Type {
+
+ if tpe == nil || len(ref) == 0 {
+ return tpe
+ }
+
+ head, tail := ref[0], ref[1:]
+
+ switch head.Value.(type) {
+ case Var, Ref, *Array, Object, Set:
+ return selectRef(types.Values(tpe), tail)
+ default:
+ return selectRef(selectConstant(tpe, head), tail)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/errors.go b/vendor/github.com/open-policy-agent/opa/ast/errors.go
new file mode 100644
index 00000000..76a08421
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/errors.go
@@ -0,0 +1,133 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Errors represents a series of errors encountered during parsing, compiling,
+// etc.
+type Errors []*Error
+
+func (e Errors) Error() string {
+
+ if len(e) == 0 {
+ return "no error(s)"
+ }
+
+ if len(e) == 1 {
+ return fmt.Sprintf("1 error occurred: %v", e[0].Error())
+ }
+
+ s := []string{}
+ for _, err := range e {
+ s = append(s, err.Error())
+ }
+
+ return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n"))
+}
+
+// Sort sorts the error slice by location. If the locations are equal then the
+// error message is compared.
+func (e Errors) Sort() {
+ sort.Slice(e, func(i, j int) bool {
+ a := e[i]
+ b := e[j]
+
+ if cmp := a.Location.Compare(b.Location); cmp != 0 {
+ return cmp < 0
+ }
+
+ return a.Error() < b.Error()
+ })
+}
+
+const (
+ // ParseErr indicates an unclassified parse error occurred.
+ ParseErr = "rego_parse_error"
+
+ // CompileErr indicates an unclassified compile error occurred.
+ CompileErr = "rego_compile_error"
+
+ // TypeErr indicates a type error was caught.
+ TypeErr = "rego_type_error"
+
+ // UnsafeVarErr indicates an unsafe variable was found during compilation.
+ UnsafeVarErr = "rego_unsafe_var_error"
+
+ // RecursionErr indicates recursion was found during compilation.
+ RecursionErr = "rego_recursion_error"
+)
+
+// IsError returns true if err is an AST error with code.
+func IsError(code string, err error) bool {
+ if err, ok := err.(*Error); ok {
+ return err.Code == code
+ }
+ return false
+}
+
+// ErrorDetails defines the interface for detailed error messages.
+type ErrorDetails interface {
+ Lines() []string
+}
+
+// Error represents a single error caught during parsing, compiling, etc.
+type Error struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Location *Location `json:"location,omitempty"`
+ Details ErrorDetails `json:"details,omitempty"`
+}
+
+func (e *Error) Error() string {
+
+ var prefix string
+
+ if e.Location != nil {
+
+ if len(e.Location.File) > 0 {
+ prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row)
+ } else {
+ prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col)
+ }
+ }
+
+ msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
+
+ if len(prefix) > 0 {
+ msg = prefix + ": " + msg
+ }
+
+ if e.Details != nil {
+ for _, line := range e.Details.Lines() {
+ msg += "\n\t" + line
+ }
+ }
+
+ return msg
+}
+
+// NewError returns a new Error object.
+func NewError(code string, loc *Location, f string, a ...interface{}) *Error {
+ return &Error{
+ Code: code,
+ Location: loc,
+ Message: fmt.Sprintf(f, a...),
+ }
+}
+
+var (
+ errPartialRuleAssignOperator = fmt.Errorf("partial rules must use = operator (not := operator)")
+ errElseAssignOperator = fmt.Errorf("else keyword cannot be used on rule declared with := operator")
+ errFunctionAssignOperator = fmt.Errorf("functions must use = operator (not := operator)")
+)
+
+func errTermAssignOperator(x interface{}) error {
+ return fmt.Errorf("cannot assign to %v", TypeName(x))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/fuzz.go b/vendor/github.com/open-policy-agent/opa/ast/fuzz.go
new file mode 100644
index 00000000..6ff7e35a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/fuzz.go
@@ -0,0 +1,16 @@
+// +build gofuzz
+
+package ast
+
+func Fuzz(data []byte) int {
+
+ str := string(data)
+ _, _, err := ParseStatements("", str)
+
+ if err == nil {
+ CompileModules(map[string]string{"": str})
+ return 1
+ }
+
+ return 0
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/index.go b/vendor/github.com/open-policy-agent/opa/ast/index.go
new file mode 100644
index 00000000..3e9a9fc9
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/index.go
@@ -0,0 +1,827 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+// RuleIndex defines the interface for rule indices.
+type RuleIndex interface {
+
+ // Build tries to construct an index for the given rules. If the index was
+ // constructed, ok is true, otherwise false.
+ Build(rules []*Rule) (ok bool)
+
+ // Lookup searches the index for rules that will match the provided
+ // resolver. If the resolver returns an error, it is returned via err.
+ Lookup(resolver ValueResolver) (result *IndexResult, err error)
+
+ // AllRules traverses the index and returns all rules that will match
+ // the provided resolver without any optimizations (effectively with
+ // indexing disabled). If the resolver returns an error, it is returned
+ // via err.
+ AllRules(resolver ValueResolver) (result *IndexResult, err error)
+}
+
+// IndexResult contains the result of an index lookup.
+type IndexResult struct {
+ Kind DocKind
+ Rules []*Rule
+ Else map[*Rule][]*Rule
+ Default *Rule
+}
+
+// NewIndexResult returns a new IndexResult object.
+func NewIndexResult(kind DocKind) *IndexResult {
+ return &IndexResult{
+ Kind: kind,
+ Else: map[*Rule][]*Rule{},
+ }
+}
+
+// Empty returns true if there are no rules to evaluate.
+func (ir *IndexResult) Empty() bool {
+ return len(ir.Rules) == 0 && ir.Default == nil
+}
+
+type baseDocEqIndex struct {
+ isVirtual func(Ref) bool
+ root *trieNode
+ defaultRule *Rule
+ kind DocKind
+}
+
+func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex {
+ return &baseDocEqIndex{
+ isVirtual: isVirtual,
+ root: newTrieNodeImpl(),
+ }
+}
+
+func (i *baseDocEqIndex) Build(rules []*Rule) bool {
+ if len(rules) == 0 {
+ return false
+ }
+
+ i.kind = rules[0].Head.DocKind()
+ indices := newrefindices(i.isVirtual)
+
+ // build indices for each rule.
+ for idx := range rules {
+ WalkRules(rules[idx], func(rule *Rule) bool {
+ if rule.Default {
+ i.defaultRule = rule
+ return false
+ }
+ for _, expr := range rule.Body {
+ indices.Update(rule, expr)
+ }
+ return false
+ })
+ }
+
+ // build trie out of indices.
+ for idx := range rules {
+ var prio int
+ WalkRules(rules[idx], func(rule *Rule) bool {
+ if rule.Default {
+ return false
+ }
+ node := i.root
+ if indices.Indexed(rule) {
+ for _, ref := range indices.Sorted() {
+ node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref))
+ }
+ }
+ // Insert rule into trie with (insertion order, priority order)
+ // tuple. Retaining the insertion order allows us to return rules
+ // in the order they were passed to this function.
+ node.rules = append(node.rules, &ruleNode{[...]int{idx, prio}, rule})
+ prio++
+ return false
+ })
+
+ }
+
+ return true
+}
+
+func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) {
+
+ tr := newTrieTraversalResult()
+
+ err := i.root.Traverse(resolver, tr)
+ if err != nil {
+ return nil, err
+ }
+
+ result := NewIndexResult(i.kind)
+ result.Default = i.defaultRule
+ result.Rules = make([]*Rule, 0, len(tr.ordering))
+
+ for _, pos := range tr.ordering {
+ sort.Slice(tr.unordered[pos], func(i, j int) bool {
+ return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
+ })
+ nodes := tr.unordered[pos]
+ root := nodes[0].rule
+ result.Rules = append(result.Rules, root)
+ if len(nodes) > 1 {
+ result.Else[root] = make([]*Rule, len(nodes)-1)
+ for i := 1; i < len(nodes); i++ {
+ result.Else[root][i-1] = nodes[i].rule
+ }
+ }
+ }
+
+ return result, nil
+}
+
+func (i *baseDocEqIndex) AllRules(resolver ValueResolver) (*IndexResult, error) {
+ tr := newTrieTraversalResult()
+
+ // Walk over the rule trie and accumulate _all_ rules
+ rw := &ruleWalker{result: tr}
+ i.root.Do(rw)
+
+ result := NewIndexResult(i.kind)
+ result.Default = i.defaultRule
+ result.Rules = make([]*Rule, 0, len(tr.ordering))
+
+ for _, pos := range tr.ordering {
+ sort.Slice(tr.unordered[pos], func(i, j int) bool {
+ return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1]
+ })
+ nodes := tr.unordered[pos]
+ root := nodes[0].rule
+ result.Rules = append(result.Rules, root)
+ if len(nodes) > 1 {
+ result.Else[root] = make([]*Rule, len(nodes)-1)
+ for i := 1; i < len(nodes); i++ {
+ result.Else[root][i-1] = nodes[i].rule
+ }
+ }
+ }
+
+ return result, nil
+}
+
+type ruleWalker struct {
+ result *trieTraversalResult
+}
+
+func (r *ruleWalker) Do(x interface{}) trieWalker {
+ tn := x.(*trieNode)
+ for _, rn := range tn.rules {
+ r.result.Add(rn)
+ }
+ return r
+}
+
+type valueMapper struct {
+ Key string
+ MapValue func(Value) Value
+}
+
+type refindex struct {
+ Ref Ref
+ Value Value
+ Mapper *valueMapper
+}
+
+type refindices struct {
+ isVirtual func(Ref) bool
+ rules map[*Rule][]*refindex
+ frequency *util.HashMap
+ sorted []Ref
+}
+
+func newrefindices(isVirtual func(Ref) bool) *refindices {
+ return &refindices{
+ isVirtual: isVirtual,
+ rules: map[*Rule][]*refindex{},
+ frequency: util.NewHashMap(func(a, b util.T) bool {
+ r1, r2 := a.(Ref), b.(Ref)
+ return r1.Equal(r2)
+ }, func(x util.T) int {
+ return x.(Ref).Hash()
+ }),
+ }
+}
+
+// Update attempts to update the refindices for the given expression in the
+// given rule. If the expression cannot be indexed the update does not affect
+// the indices.
+func (i *refindices) Update(rule *Rule, expr *Expr) {
+
+ if expr.Negated {
+ return
+ }
+
+ if len(expr.With) > 0 {
+ // NOTE(tsandall): In the future, we may need to consider expressions
+ // that have with statements applied to them.
+ return
+ }
+
+ op := expr.Operator()
+
+ if op.Equal(Equality.Ref()) || op.Equal(Equal.Ref()) {
+
+ i.updateEq(rule, expr)
+
+ } else if op.Equal(GlobMatch.Ref()) {
+
+ i.updateGlobMatch(rule, expr)
+ }
+}
+
+// Sorted returns a sorted list of references that the indices were built from.
+// References that appear more frequently in the indexed rules are ordered
+// before less frequently appearing references.
+func (i *refindices) Sorted() []Ref {
+
+ if i.sorted == nil {
+ counts := make([]int, 0, i.frequency.Len())
+ i.sorted = make([]Ref, 0, i.frequency.Len())
+
+ i.frequency.Iter(func(k, v util.T) bool {
+ counts = append(counts, v.(int))
+ i.sorted = append(i.sorted, k.(Ref))
+ return false
+ })
+
+ sort.Slice(i.sorted, func(a, b int) bool {
+ if counts[a] > counts[b] {
+ return true
+ } else if counts[b] > counts[a] {
+ return false
+ }
+ return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0
+ })
+ }
+
+ return i.sorted
+}
+
+func (i *refindices) Indexed(rule *Rule) bool {
+ return len(i.rules[rule]) > 0
+}
+
+func (i *refindices) Value(rule *Rule, ref Ref) Value {
+ if index := i.index(rule, ref); index != nil {
+ return index.Value
+ }
+ return nil
+}
+
+func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper {
+ if index := i.index(rule, ref); index != nil {
+ return index.Mapper
+ }
+ return nil
+}
+
+func (i *refindices) updateEq(rule *Rule, expr *Expr) {
+ a, b := expr.Operand(0), expr.Operand(1)
+ if ref, value, ok := eqOperandsToRefAndValue(i.isVirtual, a, b); ok {
+ i.insert(rule, &refindex{
+ Ref: ref,
+ Value: value,
+ })
+ } else if ref, value, ok := eqOperandsToRefAndValue(i.isVirtual, b, a); ok {
+ i.insert(rule, &refindex{
+ Ref: ref,
+ Value: value,
+ })
+ }
+}
+
+func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) {
+
+ delim, ok := globDelimiterToString(expr.Operand(1))
+ if !ok {
+ return
+ }
+
+ if arr := globPatternToArray(expr.Operand(0), delim); arr != nil {
+ // The 3rd operand of glob.match is the value to match. We assume the
+ // 3rd operand was a reference that has been rewritten and bound to a
+ // variable earlier in the query.
+ match := expr.Operand(2)
+ if _, ok := match.Value.(Var); ok {
+ for _, other := range i.rules[rule] {
+ if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 {
+ i.insert(rule, &refindex{
+ Ref: other.Ref,
+ Value: arr.Value,
+ Mapper: &valueMapper{
+ Key: delim,
+ MapValue: func(v Value) Value {
+ if s, ok := v.(String); ok {
+ return stringSliceToArray(splitStringEscaped(string(s), delim))
+ }
+ return v
+ },
+ },
+ })
+ }
+ }
+ }
+ }
+}
+
+func (i *refindices) insert(rule *Rule, index *refindex) {
+
+ count, ok := i.frequency.Get(index.Ref)
+ if !ok {
+ count = 0
+ }
+
+ i.frequency.Put(index.Ref, count.(int)+1)
+
+ for pos, other := range i.rules[rule] {
+ if other.Ref.Equal(index.Ref) {
+ i.rules[rule][pos] = index
+ return
+ }
+ }
+
+ i.rules[rule] = append(i.rules[rule], index)
+}
+
+func (i *refindices) index(rule *Rule, ref Ref) *refindex {
+ for _, index := range i.rules[rule] {
+ if index.Ref.Equal(ref) {
+ return index
+ }
+ }
+ return nil
+}
+
+type trieWalker interface {
+ Do(x interface{}) trieWalker
+}
+
+type trieTraversalResult struct {
+ unordered map[int][]*ruleNode
+ ordering []int
+}
+
+func newTrieTraversalResult() *trieTraversalResult {
+ return &trieTraversalResult{
+ unordered: map[int][]*ruleNode{},
+ }
+}
+
+func (tr *trieTraversalResult) Add(node *ruleNode) {
+ root := node.prio[0]
+ nodes, ok := tr.unordered[root]
+ if !ok {
+ tr.ordering = append(tr.ordering, root)
+ }
+ tr.unordered[root] = append(nodes, node)
+}
+
+type trieNode struct {
+ ref Ref
+ mappers []*valueMapper
+ next *trieNode
+ any *trieNode
+ undefined *trieNode
+ scalars map[Value]*trieNode
+ array *trieNode
+ rules []*ruleNode
+}
+
+func (node *trieNode) String() string {
+ var flags []string
+ flags = append(flags, fmt.Sprintf("self:%p", node))
+ if len(node.ref) > 0 {
+ flags = append(flags, node.ref.String())
+ }
+ if node.next != nil {
+ flags = append(flags, fmt.Sprintf("next:%p", node.next))
+ }
+ if node.any != nil {
+ flags = append(flags, fmt.Sprintf("any:%p", node.any))
+ }
+ if node.undefined != nil {
+ flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined))
+ }
+ if node.array != nil {
+ flags = append(flags, fmt.Sprintf("array:%p", node.array))
+ }
+ if len(node.scalars) > 0 {
+ buf := []string{}
+ for k, v := range node.scalars {
+ buf = append(buf, fmt.Sprintf("scalar(%v):%p", k, v))
+ }
+ sort.Strings(buf)
+ flags = append(flags, strings.Join(buf, " "))
+ }
+ if len(node.rules) > 0 {
+ flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules)))
+ }
+ if len(node.mappers) > 0 {
+ flags = append(flags, "mapper(s)")
+ }
+ return strings.Join(flags, " ")
+}
+
+type ruleNode struct {
+ prio [2]int
+ rule *Rule
+}
+
+func newTrieNodeImpl() *trieNode {
+ return &trieNode{
+ scalars: map[Value]*trieNode{},
+ }
+}
+
+func (node *trieNode) Do(walker trieWalker) {
+ next := walker.Do(node)
+ if next == nil {
+ return
+ }
+ if node.any != nil {
+ node.any.Do(next)
+ }
+ if node.undefined != nil {
+ node.undefined.Do(next)
+ }
+ for _, child := range node.scalars {
+ child.Do(next)
+ }
+ if node.array != nil {
+ node.array.Do(next)
+ }
+ if node.next != nil {
+ node.next.Do(next)
+ }
+}
+
+func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode {
+
+ if node.next == nil {
+ node.next = newTrieNodeImpl()
+ node.next.ref = ref
+ }
+
+ if mapper != nil {
+ node.next.addMapper(mapper)
+ }
+
+ return node.next.insertValue(value)
+}
+
+func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error {
+
+ if node == nil {
+ return nil
+ }
+
+ for i := range node.rules {
+ tr.Add(node.rules[i])
+ }
+
+ return node.next.traverse(resolver, tr)
+}
+
+func (node *trieNode) addMapper(mapper *valueMapper) {
+ for i := range node.mappers {
+ if node.mappers[i].Key == mapper.Key {
+ return
+ }
+ }
+ node.mappers = append(node.mappers, mapper)
+}
+
+func (node *trieNode) insertValue(value Value) *trieNode {
+
+ switch value := value.(type) {
+ case nil:
+ if node.undefined == nil {
+ node.undefined = newTrieNodeImpl()
+ }
+ return node.undefined
+ case Var:
+ if node.any == nil {
+ node.any = newTrieNodeImpl()
+ }
+ return node.any
+ case Null, Boolean, Number, String:
+ child, ok := node.scalars[value]
+ if !ok {
+ child = newTrieNodeImpl()
+ node.scalars[value] = child
+ }
+ return child
+ case *Array:
+ if node.array == nil {
+ node.array = newTrieNodeImpl()
+ }
+ return node.array.insertArray(value)
+ }
+
+ panic("illegal value")
+}
+
+func (node *trieNode) insertArray(arr *Array) *trieNode {
+
+ if arr.Len() == 0 {
+ return node
+ }
+
+ switch head := arr.Elem(0).Value.(type) {
+ case Var:
+ if node.any == nil {
+ node.any = newTrieNodeImpl()
+ }
+ return node.any.insertArray(arr.Slice(1, -1))
+ case Null, Boolean, Number, String:
+ child, ok := node.scalars[head]
+ if !ok {
+ child = newTrieNodeImpl()
+ node.scalars[head] = child
+ }
+ return child.insertArray(arr.Slice(1, -1))
+ }
+
+ panic("illegal value")
+}
+
+func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error {
+
+ if node == nil {
+ return nil
+ }
+
+ v, err := resolver.Resolve(node.ref)
+ if err != nil {
+ if IsUnknownValueErr(err) {
+ return node.traverseUnknown(resolver, tr)
+ }
+ return err
+ }
+
+ if node.undefined != nil {
+ node.undefined.Traverse(resolver, tr)
+ }
+
+ if v == nil {
+ return nil
+ }
+
+ if node.any != nil {
+ node.any.Traverse(resolver, tr)
+ }
+
+ if len(node.mappers) == 0 {
+ return node.traverseValue(resolver, tr, v)
+ }
+
+ for i := range node.mappers {
+ if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error {
+
+ switch value := value.(type) {
+ case *Array:
+ if node.array == nil {
+ return nil
+ }
+ return node.array.traverseArray(resolver, tr, value)
+
+ case Null, Boolean, Number, String:
+ child, ok := node.scalars[value]
+ if !ok {
+ return nil
+ }
+ return child.Traverse(resolver, tr)
+ }
+
+ return nil
+}
+
+func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error {
+
+ if arr.Len() == 0 {
+ return node.Traverse(resolver, tr)
+ }
+
+ head := arr.Elem(0).Value
+
+ if !IsScalar(head) {
+ return nil
+ }
+
+ if node.any != nil {
+ node.any.traverseArray(resolver, tr, arr.Slice(1, -1))
+ }
+
+ child, ok := node.scalars[head]
+ if !ok {
+ return nil
+ }
+
+ return child.traverseArray(resolver, tr, arr.Slice(1, -1))
+}
+
+func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error {
+
+ if node == nil {
+ return nil
+ }
+
+ if err := node.Traverse(resolver, tr); err != nil {
+ return err
+ }
+
+ if err := node.undefined.traverseUnknown(resolver, tr); err != nil {
+ return err
+ }
+
+ if err := node.any.traverseUnknown(resolver, tr); err != nil {
+ return err
+ }
+
+ if err := node.array.traverseUnknown(resolver, tr); err != nil {
+ return err
+ }
+
+ for _, child := range node.scalars {
+ if err := child.traverseUnknown(resolver, tr); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type triePrinter struct {
+ depth int
+ w io.Writer
+}
+
+func (p triePrinter) Do(x interface{}) trieWalker {
+ padding := strings.Repeat(" ", p.depth)
+ fmt.Fprintf(p.w, "%v%v\n", padding, x)
+ p.depth++
+ return p
+}
+
+func eqOperandsToRefAndValue(isVirtual func(Ref) bool, a, b *Term) (Ref, Value, bool) {
+
+ ref, ok := a.Value.(Ref)
+ if !ok {
+ return nil, nil, false
+ }
+
+ if !RootDocumentNames.Contains(ref[0]) {
+ return nil, nil, false
+ }
+
+ if isVirtual(ref) {
+ return nil, nil, false
+ }
+
+ if ref.IsNested() || !ref.IsGround() {
+ return nil, nil, false
+ }
+
+ switch b := b.Value.(type) {
+ case Null, Boolean, Number, String, Var:
+ return ref, b, true
+ case *Array:
+ stop := false
+ first := true
+ vis := NewGenericVisitor(func(x interface{}) bool {
+ if first {
+ first = false
+ return false
+ }
+ switch x.(type) {
+ // No nested structures or values that require evaluation (other than var).
+ case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref:
+ stop = true
+ }
+ return stop
+ })
+ vis.Walk(b)
+ if !stop {
+ return ref, b, true
+ }
+ }
+
+ return nil, nil, false
+}
+
+func globDelimiterToString(delim *Term) (string, bool) {
+
+ arr, ok := delim.Value.(*Array)
+ if !ok {
+ return "", false
+ }
+
+ var result string
+
+ if arr.Len() == 0 {
+ result = "."
+ } else {
+ for i := 0; i < arr.Len(); i++ {
+ term := arr.Elem(i)
+ s, ok := term.Value.(String)
+ if !ok {
+ return "", false
+ }
+ result += string(s)
+ }
+ }
+
+ return result, true
+}
+
+func globPatternToArray(pattern *Term, delim string) *Term {
+
+ s, ok := pattern.Value.(String)
+ if !ok {
+ return nil
+ }
+
+ parts := splitStringEscaped(string(s), delim)
+ arr := make([]*Term, len(parts))
+
+ for i := range parts {
+ if parts[i] == "*" {
+ arr[i] = VarTerm("$globwildcard")
+ } else {
+ var escaped bool
+ for _, c := range parts[i] {
+ if c == '\\' {
+ escaped = !escaped
+ continue
+ }
+ if !escaped {
+ switch c {
+ case '[', '?', '{', '*':
+ // TODO(tsandall): super glob and character pattern
+ // matching not supported yet.
+ return nil
+ }
+ }
+ escaped = false
+ }
+ arr[i] = StringTerm(parts[i])
+ }
+ }
+
+ return NewTerm(NewArray(arr...))
+}
+
+// splits s on characters in delim except if delim characters have been escaped
+// with reverse solidus.
+func splitStringEscaped(s string, delim string) []string {
+
+ var last, curr int
+ var escaped bool
+ var result []string
+
+ for ; curr < len(s); curr++ {
+ if s[curr] == '\\' || escaped {
+ escaped = !escaped
+ continue
+ }
+ if strings.ContainsRune(delim, rune(s[curr])) {
+ result = append(result, s[last:curr])
+ last = curr + 1
+ }
+ }
+
+ result = append(result, s[last:])
+
+ return result
+}
+
+func stringSliceToArray(s []string) *Array {
+ arr := make([]*Term, len(s))
+ for i, v := range s {
+ arr[i] = StringTerm(v)
+ }
+ return NewArray(arr...)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go b/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go
new file mode 100644
index 00000000..8223e59e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go
@@ -0,0 +1,387 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/open-policy-agent/opa/ast/internal/tokens"
+)
+
+const bom = 0xFEFF
+
+// Scanner is used to tokenize an input stream of
+// Rego source code.
+type Scanner struct {
+ offset int
+ row int
+ col int
+ bs []byte
+ curr rune
+ width int
+ errors []Error
+ filename string
+}
+
+// Error represents a scanner error.
+type Error struct {
+ Pos Position
+ Message string
+}
+
+// Position represents a point in the scanned source code.
+type Position struct {
+ Offset int // start offset in bytes
+ End int // end offset in bytes
+ Row int // line number computed in bytes
+ Col int // column number computed in bytes
+}
+
+// New returns an initialized scanner that will scan
+// through the source code provided by the io.Reader.
+func New(r io.Reader) (*Scanner, error) {
+
+ bs, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &Scanner{
+ offset: 0,
+ row: 1,
+ col: 0,
+ bs: bs,
+ curr: -1,
+ width: 0,
+ }
+
+ s.next()
+
+ if s.curr == bom {
+ s.next()
+ }
+
+ return s, nil
+}
+
+// Bytes returns the raw bytes for the full source
+// which the scanner has read in.
+func (s *Scanner) Bytes() []byte {
+ return s.bs
+}
+
+// String returns a human readable string of the current scanner state.
+func (s *Scanner) String() string {
+ return fmt.Sprintf("", s.curr, s.offset, len(s.bs))
+}
+
+// Scan will increment the scanners position in the source
+// code until the next token is found. The token, starting position
+// of the token, string literal, and any errors encountered are
+// returned. A token will always be returned, the caller must check
+// for any errors before using the other values.
+func (s *Scanner) Scan() (tokens.Token, Position, string, []Error) {
+
+ pos := Position{Offset: s.offset - s.width, Row: s.row, Col: s.col}
+ var tok tokens.Token
+ var lit string
+
+ if s.isWhitespace() {
+ lit = string(s.curr)
+ s.next()
+ tok = tokens.Whitespace
+ } else if isLetter(s.curr) {
+ lit = s.scanIdentifier()
+ tok = tokens.Keyword(lit)
+ } else if isDecimal(s.curr) {
+ lit = s.scanNumber()
+ tok = tokens.Number
+ } else {
+ ch := s.curr
+ s.next()
+ switch ch {
+ case -1:
+ tok = tokens.EOF
+ case '#':
+ lit = s.scanComment()
+ tok = tokens.Comment
+ case '"':
+ lit = s.scanString()
+ tok = tokens.String
+ case '`':
+ lit = s.scanRawString()
+ tok = tokens.String
+ case '[':
+ tok = tokens.LBrack
+ case ']':
+ tok = tokens.RBrack
+ case '{':
+ tok = tokens.LBrace
+ case '}':
+ tok = tokens.RBrace
+ case '(':
+ tok = tokens.LParen
+ case ')':
+ tok = tokens.RParen
+ case ',':
+ tok = tokens.Comma
+ case ':':
+ if s.curr == '=' {
+ s.next()
+ tok = tokens.Assign
+ } else {
+ tok = tokens.Colon
+ }
+ case '+':
+ tok = tokens.Add
+ case '-':
+ tok = tokens.Sub
+ case '*':
+ tok = tokens.Mul
+ case '/':
+ tok = tokens.Quo
+ case '%':
+ tok = tokens.Rem
+ case '&':
+ tok = tokens.And
+ case '|':
+ tok = tokens.Or
+ case '=':
+ if s.curr == '=' {
+ s.next()
+ tok = tokens.Equal
+ } else {
+ tok = tokens.Unify
+ }
+ case '>':
+ if s.curr == '=' {
+ s.next()
+ tok = tokens.Gte
+ } else {
+ tok = tokens.Gt
+ }
+ case '<':
+ if s.curr == '=' {
+ s.next()
+ tok = tokens.Lte
+ } else {
+ tok = tokens.Lt
+ }
+ case '!':
+ if s.curr == '=' {
+ s.next()
+ tok = tokens.Neq
+ } else {
+ s.error("illegal ! character")
+ }
+ case ';':
+ tok = tokens.Semicolon
+ case '.':
+ tok = tokens.Dot
+ }
+ }
+
+ pos.End = s.offset - s.width
+ errs := s.errors
+ s.errors = nil
+
+ return tok, pos, lit, errs
+}
+
+func (s *Scanner) scanIdentifier() string {
+ start := s.offset - 1
+ for isLetter(s.curr) || isDigit(s.curr) {
+ s.next()
+ }
+ return string(s.bs[start : s.offset-1])
+}
+
+func (s *Scanner) scanNumber() string {
+
+ start := s.offset - 1
+
+ if s.curr != '.' {
+ for isDecimal(s.curr) {
+ s.next()
+ }
+ }
+
+ if s.curr == '.' {
+ s.next()
+ var found bool
+ for isDecimal(s.curr) {
+ s.next()
+ found = true
+ }
+ if !found {
+ s.error("expected fraction")
+ }
+ }
+
+ if lower(s.curr) == 'e' {
+ s.next()
+ if s.curr == '+' || s.curr == '-' {
+ s.next()
+ }
+ var found bool
+ for isDecimal(s.curr) {
+ s.next()
+ found = true
+ }
+ if !found {
+ s.error("expected exponent")
+ }
+ }
+
+ // Scan any digits following the decimals to get the
+ // entire invalid number/identifier.
+ // Example: 0a2b should be a single invalid number "0a2b"
+ // rather than a number "0", followed by identifier "a2b".
+ if isLetter(s.curr) {
+ s.error("illegal number format")
+ for isLetter(s.curr) || isDigit(s.curr) {
+ s.next()
+ }
+ }
+
+ return string(s.bs[start : s.offset-1])
+}
+
+func (s *Scanner) scanString() string {
+ start := s.literalStart()
+ for {
+ ch := s.curr
+
+ if ch == '\n' || ch < 0 {
+ s.error("non-terminated string")
+ break
+ }
+
+ s.next()
+
+ if ch == '"' {
+ break
+ }
+
+ if ch == '\\' {
+ switch s.curr {
+ case '\\', '"', '/', 'b', 'f', 'n', 'r', 't':
+ s.next()
+ case 'u':
+ s.next()
+ s.next()
+ s.next()
+ s.next()
+ default:
+ s.error("illegal escape sequence")
+ }
+ }
+ }
+
+ return string(s.bs[start : s.offset-1])
+}
+
+func (s *Scanner) scanRawString() string {
+ start := s.literalStart()
+ for {
+ ch := s.curr
+ s.next()
+ if ch == '`' {
+ break
+ } else if ch < 0 {
+ s.error("non-terminated string")
+ break
+ }
+ }
+ return string(s.bs[start : s.offset-1])
+}
+
+func (s *Scanner) scanComment() string {
+ start := s.literalStart()
+ for s.curr != '\n' && s.curr != -1 {
+ s.next()
+ }
+ end := s.offset - 1
+ // Trim carriage returns that precede the newline
+ if s.offset > 1 && s.bs[s.offset-2] == '\r' {
+ end = end - 1
+ }
+ return string(s.bs[start:end])
+}
+
+func (s *Scanner) next() {
+
+ if s.offset >= len(s.bs) {
+ s.curr = -1
+ s.offset = len(s.bs) + 1
+ return
+ }
+
+ s.curr = rune(s.bs[s.offset])
+ s.width = 1
+
+ if s.curr == 0 {
+ s.error("illegal null character")
+ } else if s.curr >= utf8.RuneSelf {
+ s.curr, s.width = utf8.DecodeRune(s.bs[s.offset:])
+ if s.curr == utf8.RuneError && s.width == 1 {
+ s.error("illegal utf-8 character")
+ } else if s.curr == bom && s.offset > 0 {
+ s.error("illegal byte-order mark")
+ }
+ }
+
+ s.offset += s.width
+
+ if s.curr == '\n' {
+ s.row++
+ s.col = 0
+ } else {
+ s.col++
+ }
+}
+
+func (s *Scanner) peek(i int) rune {
+ if s.offset+i < len(s.bs) {
+ return rune(s.bs[s.offset+i])
+ }
+ return 0
+}
+
+func (s *Scanner) literalStart() int {
+ // The current offset is at the first character past the literal delimiter (#, ", `, etc.)
+ // Need to subtract width of first character (plus one for the delimiter).
+ return s.offset - (s.width + 1)
+}
+
+// From the Go scanner (src/go/scanner/scanner.go)
+
+func isLetter(ch rune) bool {
+ return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_'
+}
+
+func isDigit(ch rune) bool {
+ return isDecimal(ch) || ch >= utf8.RuneSelf && unicode.IsDigit(ch)
+}
+
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+
+func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
+
+func (s *Scanner) isWhitespace() bool {
+ return s.curr == ' ' || s.curr == '\t' || s.curr == '\n' || s.curr == '\r'
+}
+
+func (s *Scanner) error(reason string) {
+ s.errors = append(s.errors, Error{Pos: Position{
+ Offset: s.offset,
+ Row: s.row,
+ Col: s.col,
+ }, Message: reason})
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go b/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go
new file mode 100644
index 00000000..29229a5a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go
@@ -0,0 +1,138 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package tokens
+
+// Token represents a single Rego source code token
+// for use by the Parser.
+type Token int
+
+func (t Token) String() string {
+ if t < 0 || int(t) >= len(strings) {
+ return "unknown"
+ }
+ return strings[t]
+}
+
+// All tokens must be defined here
+const (
+ Illegal Token = iota
+ EOF
+ Whitespace
+ Ident
+ Comment
+
+ Package
+ Import
+ As
+ Default
+ Else
+ Not
+ Some
+ With
+ Null
+ True
+ False
+
+ Number
+ String
+
+ LBrack
+ RBrack
+ LBrace
+ RBrace
+ LParen
+ RParen
+ Comma
+ Colon
+
+ Add
+ Sub
+ Mul
+ Quo
+ Rem
+ And
+ Or
+ Unify
+ Equal
+ Assign
+ Neq
+ Gt
+ Lt
+ Gte
+ Lte
+ Dot
+ Semicolon
+)
+
+var strings = [...]string{
+ Illegal: "illegal",
+ EOF: "eof",
+ Whitespace: "whitespace",
+ Comment: "comment",
+ Ident: "ident",
+ Package: "package",
+ Import: "import",
+ As: "as",
+ Default: "default",
+ Else: "else",
+ Not: "not",
+ Some: "some",
+ With: "with",
+ Null: "null",
+ True: "true",
+ False: "false",
+ Number: "number",
+ String: "string",
+ LBrack: "[",
+ RBrack: "]",
+ LBrace: "{",
+ RBrace: "}",
+ LParen: "(",
+ RParen: ")",
+ Comma: ",",
+ Colon: ":",
+ Add: "plus",
+ Sub: "minus",
+ Mul: "mul",
+ Quo: "div",
+ Rem: "rem",
+ And: "and",
+ Or: "or",
+ Unify: "eq",
+ Equal: "equal",
+ Assign: "assign",
+ Neq: "neq",
+ Gt: "gt",
+ Lt: "lt",
+ Gte: "gte",
+ Lte: "lte",
+ Dot: ".",
+ Semicolon: ";",
+}
+
+var keywords = map[string]Token{
+ "package": Package,
+ "import": Import,
+ "as": As,
+ "default": Default,
+ "else": Else,
+ "not": Not,
+ "some": Some,
+ "with": With,
+ "null": Null,
+ "true": True,
+ "false": False,
+}
+
+// Keyword will return a token for the passed in
+// literal value. If the value is a Rego keyword
+// then the appropriate token is returned. Everything
+// else is an Ident.
+func Keyword(lit string) Token {
+ if tok, ok := keywords[lit]; ok {
+ return tok
+ }
+ return Ident
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/location/location.go b/vendor/github.com/open-policy-agent/opa/ast/location/location.go
new file mode 100644
index 00000000..13ae6e35
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/location/location.go
@@ -0,0 +1,90 @@
+// Package location defines locations in Rego source code.
+package location
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+// Location records a position in source code
+type Location struct {
+ Text []byte `json:"-"` // The original text fragment from the source.
+ File string `json:"file"` // The name of the source file (which may be empty).
+ Row int `json:"row"` // The line in the source.
+ Col int `json:"col"` // The column in the row.
+ Offset int `json:"-"` // The byte offset for the location in the source.
+}
+
+// NewLocation returns a new Location object.
+func NewLocation(text []byte, file string, row int, col int) *Location {
+ return &Location{Text: text, File: file, Row: row, Col: col}
+}
+
+// Equal checks if two locations are equal to each other.
+func (loc *Location) Equal(other *Location) bool {
+ return bytes.Equal(loc.Text, other.Text) &&
+ loc.File == other.File &&
+ loc.Row == other.Row &&
+ loc.Col == other.Col
+}
+
+// Errorf returns a new error value with a message formatted to include the location
+// info (e.g., line, column, filename, etc.)
+func (loc *Location) Errorf(f string, a ...interface{}) error {
+ return errors.New(loc.Format(f, a...))
+}
+
+// Wrapf returns a new error value that wraps an existing error with a message formatted
+// to include the location info (e.g., line, column, filename, etc.)
+func (loc *Location) Wrapf(err error, f string, a ...interface{}) error {
+ return errors.Wrap(err, loc.Format(f, a...))
+}
+
+// Format returns a formatted string prefixed with the location information.
+func (loc *Location) Format(f string, a ...interface{}) string {
+ if len(loc.File) > 0 {
+ f = fmt.Sprintf("%v:%v: %v", loc.File, loc.Row, f)
+ } else {
+ f = fmt.Sprintf("%v:%v: %v", loc.Row, loc.Col, f)
+ }
+ return fmt.Sprintf(f, a...)
+}
+
+func (loc *Location) String() string {
+ if len(loc.File) > 0 {
+ return fmt.Sprintf("%v:%v", loc.File, loc.Row)
+ }
+ if len(loc.Text) > 0 {
+ return string(loc.Text)
+ }
+ return fmt.Sprintf("%v:%v", loc.Row, loc.Col)
+}
+
+// Compare returns -1, 0, or 1 to indicate if this loc is less than, equal to,
+// or greater than the other. Comparison is performed on the file, row, and
+// column of the Location (but not on the text.) Nil locations are greater than
+// non-nil locations.
+func (loc *Location) Compare(other *Location) int {
+ if loc == nil && other == nil {
+ return 0
+ } else if loc == nil {
+ return 1
+ } else if other == nil {
+ return -1
+ } else if loc.File < other.File {
+ return -1
+ } else if loc.File > other.File {
+ return 1
+ } else if loc.Row < other.Row {
+ return -1
+ } else if loc.Row > other.Row {
+ return 1
+ } else if loc.Col < other.Col {
+ return -1
+ } else if loc.Col > other.Col {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/map.go b/vendor/github.com/open-policy-agent/opa/ast/map.go
new file mode 100644
index 00000000..b0cc9eb6
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/map.go
@@ -0,0 +1,133 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "encoding/json"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+// ValueMap represents a key/value map between AST term values. Any type of term
+// can be used as a key in the map.
+type ValueMap struct {
+ hashMap *util.HashMap
+}
+
+// NewValueMap returns a new ValueMap.
+func NewValueMap() *ValueMap {
+ vs := &ValueMap{
+ hashMap: util.NewHashMap(valueEq, valueHash),
+ }
+ return vs
+}
+
+// MarshalJSON provides a custom marshaller for the ValueMap which
+// will include the key, value, and value type.
+func (vs *ValueMap) MarshalJSON() ([]byte, error) {
+ var tmp []map[string]interface{}
+ vs.Iter(func(k Value, v Value) bool {
+ tmp = append(tmp, map[string]interface{}{
+ "name": k.String(),
+ "type": TypeName(v),
+ "value": v,
+ })
+ return false
+ })
+ return json.Marshal(tmp)
+}
+
+// Copy returns a shallow copy of the ValueMap.
+func (vs *ValueMap) Copy() *ValueMap {
+ if vs == nil {
+ return nil
+ }
+ cpy := NewValueMap()
+ cpy.hashMap = vs.hashMap.Copy()
+ return cpy
+}
+
+// Equal returns true if this ValueMap equals the other.
+func (vs *ValueMap) Equal(other *ValueMap) bool {
+ if vs == nil {
+ return other == nil || other.Len() == 0
+ }
+ if other == nil {
+ return vs == nil || vs.Len() == 0
+ }
+ return vs.hashMap.Equal(other.hashMap)
+}
+
+// Len returns the number of elements in the map.
+func (vs *ValueMap) Len() int {
+ if vs == nil {
+ return 0
+ }
+ return vs.hashMap.Len()
+}
+
+// Get returns the value in the map for k.
+func (vs *ValueMap) Get(k Value) Value {
+ if vs != nil {
+ if v, ok := vs.hashMap.Get(k); ok {
+ return v.(Value)
+ }
+ }
+ return nil
+}
+
+// Hash returns a hash code for this ValueMap.
+func (vs *ValueMap) Hash() int {
+ if vs == nil {
+ return 0
+ }
+ return vs.hashMap.Hash()
+}
+
+// Iter calls the iter function for each key/value pair in the map. If the iter
+// function returns true, iteration stops.
+func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool {
+ if vs == nil {
+ return false
+ }
+ return vs.hashMap.Iter(func(kt, vt util.T) bool {
+ k := kt.(Value)
+ v := vt.(Value)
+ return iter(k, v)
+ })
+}
+
+// Put inserts a key k into the map with value v.
+func (vs *ValueMap) Put(k, v Value) {
+ if vs == nil {
+ panic("put on nil value map")
+ }
+ vs.hashMap.Put(k, v)
+}
+
+// Delete removes a key k from the map.
+func (vs *ValueMap) Delete(k Value) {
+ if vs == nil {
+ return
+ }
+ vs.hashMap.Delete(k)
+}
+
+func (vs *ValueMap) String() string {
+ if vs == nil {
+ return "{}"
+ }
+ return vs.hashMap.String()
+}
+
+func valueHash(v util.T) int {
+ return v.(Value).Hash()
+}
+
+func valueEq(a, b util.T) bool {
+ av := a.(Value)
+ bv := b.(Value)
+ return av.Compare(bv) == 0
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser.go b/vendor/github.com/open-policy-agent/opa/ast/parser.go
new file mode 100644
index 00000000..ab3ddf59
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/parser.go
@@ -0,0 +1,1498 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/big"
+
+ "github.com/open-policy-agent/opa/ast/internal/scanner"
+ "github.com/open-policy-agent/opa/ast/internal/tokens"
+ "github.com/open-policy-agent/opa/ast/location"
+)
+
+// Note: This state is kept isolated from the parser so that we
+// can do efficient shallow copies of these values when doing a
+// save() and restore().
+type state struct {
+ s *scanner.Scanner
+ lastEnd int
+ skippedNL bool
+ tok tokens.Token
+ tokEnd int
+ lit string
+ loc Location
+ errors Errors
+ comments []*Comment
+ wildcard int
+}
+
+func (s *state) String() string {
+ return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments))
+}
+
+func (s *state) Loc() *location.Location {
+ cpy := s.loc
+ return &cpy
+}
+
+func (s *state) Text(offset, end int) []byte {
+ bs := s.s.Bytes()
+ if offset >= 0 && offset < len(bs) {
+ if end >= offset && end <= len(bs) {
+ return bs[offset:end]
+ }
+ }
+ return nil
+}
+
+// Parser is used to parse Rego statements.
+type Parser struct {
+ r io.Reader
+ s *state
+}
+
+// NewParser creates and initializes a Parser.
+func NewParser() *Parser {
+ p := &Parser{s: &state{}}
+ return p
+}
+
+// WithFilename provides the filename for Location details
+// on parsed statements.
+func (p *Parser) WithFilename(filename string) *Parser {
+ p.s.loc.File = filename
+ return p
+}
+
+// WithReader provides the io.Reader that the parser will
+// use as its source.
+func (p *Parser) WithReader(r io.Reader) *Parser {
+ p.r = r
+ return p
+}
+
+// Parse will read the Rego source and parse statements and
+// comments as they are found. Any errors encountered while
+// parsing will be accumulated and returned as a list of Errors.
+func (p *Parser) Parse() ([]Statement, []*Comment, Errors) {
+
+ var err error
+ p.s.s, err = scanner.New(p.r)
+ if err != nil {
+ return nil, nil, Errors{
+ &Error{
+ Code: ParseErr,
+ Message: err.Error(),
+ Location: nil,
+ },
+ }
+ }
+
+ // read the first token to initialize the parser
+ p.scan()
+
+ var stmts []Statement
+
+ // Read from the scanner until the last token is reached or no statements
+ // can be parsed. Attempt to parse package statements, import statements,
+ // rule statements, and then body/query statements (in that order). If a
+ // statement cannot be parsed, restore the parser state before trying the
+ // next type of statement. If a statement can be parsed, continue from that
+ // point trying to parse packages, imports, etc. in the same order.
+ for p.s.tok != tokens.EOF {
+
+ s := p.save()
+
+ if pkg := p.parsePackage(); pkg != nil {
+ stmts = append(stmts, pkg)
+ continue
+ } else if len(p.s.errors) > 0 {
+ break
+ }
+
+ p.restore(s)
+ s = p.save()
+
+ if imp := p.parseImport(); imp != nil {
+ stmts = append(stmts, imp)
+ continue
+ } else if len(p.s.errors) > 0 {
+ break
+ }
+
+ p.restore(s)
+ s = p.save()
+
+ if rules := p.parseRules(); rules != nil {
+ for i := range rules {
+ stmts = append(stmts, rules[i])
+ }
+ continue
+ } else if len(p.s.errors) > 0 {
+ break
+ }
+
+ p.restore(s)
+ s = p.save()
+
+ if body := p.parseQuery(true, tokens.EOF); body != nil {
+ stmts = append(stmts, body)
+ continue
+ }
+
+ break
+ }
+
+ return stmts, p.s.comments, p.s.errors
+}
+
+func (p *Parser) parsePackage() *Package {
+
+ var pkg Package
+ pkg.SetLoc(p.s.Loc())
+
+ if p.s.tok != tokens.Package {
+ return nil
+ }
+
+ p.scan()
+ if p.s.tok != tokens.Ident {
+ p.illegalToken()
+ return nil
+ }
+
+ term := p.parseTerm()
+
+ if term != nil {
+ switch v := term.Value.(type) {
+ case Var:
+ pkg.Path = Ref{
+ DefaultRootDocument.Copy().SetLocation(term.Location),
+ StringTerm(string(v)).SetLocation(term.Location),
+ }
+ case Ref:
+ pkg.Path = make(Ref, len(v)+1)
+ pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location)
+ first, ok := v[0].Value.(Var)
+ if !ok {
+ p.errorf(v[0].Location, "unexpected %v token: expecting var", TypeName(v[0].Value))
+ return nil
+ }
+ pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location)
+ for i := 2; i < len(pkg.Path); i++ {
+ switch v[i-1].Value.(type) {
+ case String:
+ pkg.Path[i] = v[i-1]
+ default:
+ p.errorf(v[i-1].Location, "unexpected %v token: expecting string", TypeName(v[i-1].Value))
+ return nil
+ }
+ }
+ default:
+ p.illegalToken()
+ return nil
+ }
+ }
+
+ if pkg.Path == nil {
+ if len(p.s.errors) == 0 {
+ p.error(p.s.Loc(), "expected path")
+ }
+ return nil
+ }
+
+ return &pkg
+}
+
+func (p *Parser) parseImport() *Import {
+
+ var imp Import
+ imp.SetLoc(p.s.Loc())
+
+ if p.s.tok != tokens.Import {
+ return nil
+ }
+
+ p.scan()
+ if p.s.tok != tokens.Ident {
+ p.error(p.s.Loc(), "expected ident")
+ return nil
+ }
+
+ term := p.parseTerm()
+ if term != nil {
+ switch v := term.Value.(type) {
+ case Var:
+ imp.Path = RefTerm(term).SetLocation(term.Location)
+ case Ref:
+ for i := 1; i < len(v); i++ {
+ if _, ok := v[i].Value.(String); !ok {
+ p.errorf(v[i].Location, "unexpected %v token: expecting string", TypeName(v[i].Value))
+ return nil
+ }
+ }
+ imp.Path = term
+ }
+ }
+
+ if imp.Path == nil {
+ p.error(p.s.Loc(), "expected path")
+ return nil
+ }
+
+ path := imp.Path.Value.(Ref)
+
+ if !RootDocumentNames.Contains(path[0]) {
+ p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", RootDocumentNames, path[0])
+ return nil
+ }
+
+ if p.s.tok == tokens.As {
+ p.scan()
+
+ if p.s.tok != tokens.Ident {
+ p.illegal("expected var")
+ return nil
+ }
+
+ alias := p.parseTerm()
+
+ v, ok := alias.Value.(Var)
+ if !ok {
+ p.illegal("expected var")
+ return nil
+ }
+ imp.Alias = v
+ }
+
+ return &imp
+}
+
+func (p *Parser) parseRules() []*Rule {
+
+ var rule Rule
+ rule.SetLoc(p.s.Loc())
+
+ if p.s.tok == tokens.Default {
+ p.scan()
+ rule.Default = true
+ }
+
+ if p.s.tok != tokens.Ident {
+ return nil
+ }
+
+ if rule.Head = p.parseHead(rule.Default); rule.Head == nil {
+ return nil
+ }
+
+ if rule.Default {
+ if !p.validateDefaultRuleValue(&rule) {
+ return nil
+ }
+
+ rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location))
+ return []*Rule{&rule}
+ }
+
+ if p.s.tok == tokens.LBrace {
+ p.scan()
+ if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
+ return nil
+ }
+ p.scan()
+ } else {
+ return nil
+ }
+
+ if p.s.tok == tokens.Else {
+
+ if rule.Head.Assign {
+ p.error(p.s.Loc(), "else keyword cannot be used on rule declared with := operator")
+ return nil
+ }
+
+ if rule.Head.Key != nil {
+ p.error(p.s.Loc(), "else keyword cannot be used on partial rules")
+ return nil
+ }
+
+ if rule.Else = p.parseElse(rule.Head); rule.Else == nil {
+ return nil
+ }
+ }
+
+ rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd)
+
+ var rules []*Rule
+
+ rules = append(rules, &rule)
+
+ for p.s.tok == tokens.LBrace {
+
+ if rule.Else != nil {
+ p.error(p.s.Loc(), "expected else keyword")
+ return nil
+ }
+
+ loc := p.s.Loc()
+
+ p.scan()
+ var next Rule
+
+ if next.Body = p.parseBody(tokens.RBrace); next.Body == nil {
+ return nil
+ }
+ p.scan()
+
+ loc.Text = p.s.Text(loc.Offset, p.s.lastEnd)
+ next.SetLoc(loc)
+
+ // Chained rule head's keep the original
+ // rule's head AST but have their location
+ // set to the rule body.
+ next.Head = rule.Head.Copy()
+ setLocRecursive(next.Head, loc)
+
+ rules = append(rules, &next)
+ }
+
+ return rules
+}
+
+func (p *Parser) parseElse(head *Head) *Rule {
+
+ var rule Rule
+ rule.SetLoc(p.s.Loc())
+
+ rule.Head = head.Copy()
+ rule.Head.SetLoc(p.s.Loc())
+
+ defer func() {
+ rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd)
+ }()
+
+ p.scan()
+
+ switch p.s.tok {
+ case tokens.LBrace:
+ rule.Head.Value = BooleanTerm(true)
+ case tokens.Unify:
+ p.scan()
+ rule.Head.Value = p.parseTermRelation()
+ if rule.Head.Value == nil {
+ return nil
+ }
+ rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd)
+ default:
+ p.illegal("expected else value term or rule body")
+ return nil
+ }
+
+ if p.s.tok != tokens.LBrace {
+ rule.Body = NewBody(NewExpr(BooleanTerm(true)))
+ setLocRecursive(rule.Body, rule.Location)
+ return &rule
+ }
+
+ p.scan()
+
+ if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil {
+ return nil
+ }
+
+ p.scan()
+
+ if p.s.tok == tokens.Else {
+ if rule.Else = p.parseElse(head); rule.Else == nil {
+ return nil
+ }
+ }
+ return &rule
+}
+
+func (p *Parser) parseHead(defaultRule bool) *Head {
+
+ var head Head
+ head.SetLoc(p.s.Loc())
+
+ defer func() {
+ head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd)
+ }()
+
+ if term := p.parseVar(); term != nil {
+ if v, ok := term.Value.(Var); ok {
+ head.Name = v
+ }
+ }
+ if head.Name == "" {
+ p.illegal("expected rule head name")
+ }
+
+ p.scan()
+
+ if p.s.tok == tokens.LParen {
+ p.scan()
+ if p.s.tok != tokens.RParen {
+ head.Args = p.parseTermList(tokens.RParen, nil)
+ if head.Args == nil {
+ return nil
+ }
+ }
+ p.scan()
+
+ if p.s.tok == tokens.LBrack {
+ return nil
+ }
+ }
+
+ if p.s.tok == tokens.LBrack {
+ p.scan()
+ head.Key = p.parseTermRelation()
+ if head.Key == nil {
+ p.illegal("expected rule key term (e.g., %s[] { ... })", head.Name)
+ }
+ if p.s.tok != tokens.RBrack {
+ p.illegal("non-terminated rule key")
+ }
+ p.scan()
+ }
+
+ if p.s.tok == tokens.Unify {
+ p.scan()
+ head.Value = p.parseTermRelation()
+ if head.Value == nil {
+ p.illegal("expected rule value term (e.g., %s[] { ... })", head.Name)
+ }
+ } else if p.s.tok == tokens.Assign {
+
+ if defaultRule {
+ p.error(p.s.Loc(), "default rules must use = operator (not := operator)")
+ return nil
+ } else if head.Key != nil {
+ p.error(p.s.Loc(), "partial rules must use = operator (not := operator)")
+ return nil
+ } else if len(head.Args) > 0 {
+ p.error(p.s.Loc(), "functions must use = operator (not := operator)")
+ return nil
+ }
+
+ p.scan()
+ head.Assign = true
+ head.Value = p.parseTermRelation()
+ if head.Value == nil {
+ p.illegal("expected rule value term (e.g., %s := { ... })", head.Name)
+ }
+ }
+
+ if head.Value == nil && head.Key == nil {
+ head.Value = BooleanTerm(true).SetLocation(head.Location)
+ }
+
+ return &head
+}
+
+func (p *Parser) parseBody(end tokens.Token) Body {
+ return p.parseQuery(false, end)
+}
+
+func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body {
+ body := Body{}
+
+ if p.s.tok == end {
+ p.error(p.s.Loc(), "found empty body")
+ return nil
+ }
+
+ for {
+
+ expr := p.parseLiteral()
+ if expr == nil {
+ return nil
+ }
+
+ body.Append(expr)
+
+ if p.s.tok == tokens.Semicolon {
+ p.scan()
+ continue
+ }
+
+ if p.s.tok == end || requireSemi {
+ return body
+ }
+
+ if !p.s.skippedNL {
+ // If there was already an error then don't pile this one on
+ if len(p.s.errors) == 0 {
+ p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end)
+ }
+ return nil
+ }
+ }
+}
+
+func (p *Parser) parseLiteral() (expr *Expr) {
+
+ offset := p.s.loc.Offset
+ loc := p.s.Loc()
+
+ defer func() {
+ if expr != nil {
+ loc.Text = p.s.Text(offset, p.s.lastEnd)
+ expr.SetLoc(loc)
+ }
+ }()
+
+ var negated bool
+ switch p.s.tok {
+ case tokens.Some:
+ return p.parseSome()
+ case tokens.Not:
+ p.scan()
+ negated = true
+ fallthrough
+ default:
+ expr := p.parseExpr()
+ if expr != nil {
+ expr.Negated = negated
+ if p.s.tok == tokens.With {
+ if expr.With = p.parseWith(); expr.With == nil {
+ return nil
+ }
+ }
+ return expr
+ }
+ return nil
+ }
+}
+
+func (p *Parser) parseWith() []*With {
+
+ withs := []*With{}
+
+ for {
+
+ with := With{
+ Location: p.s.Loc(),
+ }
+ p.scan()
+
+ if p.s.tok != tokens.Ident {
+ p.illegal("expected ident")
+ return nil
+ }
+
+ if with.Target = p.parseTerm(); with.Target == nil {
+ return nil
+ }
+
+ switch with.Target.Value.(type) {
+ case Ref, Var:
+ break
+ default:
+ p.illegal("expected with target path")
+ }
+
+ if p.s.tok != tokens.As {
+ p.illegal("expected as keyword")
+ return nil
+ }
+
+ p.scan()
+
+ if with.Value = p.parseTermRelation(); with.Value == nil {
+ return nil
+ }
+
+ with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd)
+
+ withs = append(withs, &with)
+
+ if p.s.tok != tokens.With {
+ break
+ }
+ }
+
+ return withs
+}
+
+func (p *Parser) parseSome() *Expr {
+
+ decl := &SomeDecl{}
+ decl.SetLoc(p.s.Loc())
+
+ for {
+
+ p.scan()
+
+ switch p.s.tok {
+ case tokens.Ident:
+ }
+
+ if p.s.tok != tokens.Ident {
+ p.illegal("expected var")
+ return nil
+ }
+
+ decl.Symbols = append(decl.Symbols, p.parseVar())
+
+ p.scan()
+
+ if p.s.tok != tokens.Comma {
+ break
+ }
+ }
+
+ return NewExpr(decl).SetLocation(decl.Location)
+}
+
+func (p *Parser) parseExpr() *Expr {
+
+ lhs := p.parseTermRelation()
+
+ if lhs == nil {
+ return nil
+ }
+
+ if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil {
+ if rhs := p.parseTermRelation(); rhs != nil {
+ return NewExpr([]*Term{op, lhs, rhs})
+ }
+ return nil
+ }
+
+ // NOTE(tsandall): the top-level call term is converted to an expr because
+ // the evaluator does not support the call term type (nested calls are
+ // rewritten by the compiler.)
+ if call, ok := lhs.Value.(Call); ok {
+ return NewExpr([]*Term(call))
+ }
+
+ return NewExpr(lhs)
+}
+
+// parseTermRelation consumes the next term from the input and returns it. If a
+// term cannot be parsed the return value is nil and error will be recorded. The
+// scanner will be advanced to the next token before returning.
+func (p *Parser) parseTermRelation() *Term {
+ return p.parseTermRelationRec(nil, p.s.loc.Offset)
+}
+
+func (p *Parser) parseTermRelationRec(lhs *Term, offset int) *Term {
+ if lhs == nil {
+ lhs = p.parseTermOr(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil {
+ if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte:
+ return p.parseTermRelationRec(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ return lhs
+}
+
+func (p *Parser) parseTermOr(lhs *Term, offset int) *Term {
+ if lhs == nil {
+ lhs = p.parseTermAnd(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Or); op != nil {
+ if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Or:
+ return p.parseTermOr(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ return lhs
+ }
+ return nil
+}
+
+func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term {
+ if lhs == nil {
+ lhs = p.parseTermArith(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.And); op != nil {
+ if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.And:
+ return p.parseTermAnd(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ return lhs
+ }
+ return nil
+}
+
+func (p *Parser) parseTermArith(lhs *Term, offset int) *Term {
+ if lhs == nil {
+ lhs = p.parseTermFactor(nil, offset)
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil {
+ if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Add, tokens.Sub:
+ return p.parseTermArith(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ return lhs
+}
+
+func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term {
+ if lhs == nil {
+ lhs = p.parseTerm()
+ }
+ if lhs != nil {
+ if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil {
+ if rhs := p.parseTerm(); rhs != nil {
+ call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd)
+ switch p.s.tok {
+ case tokens.Mul, tokens.Quo, tokens.Rem:
+ return p.parseTermFactor(call, offset)
+ default:
+ return call
+ }
+ }
+ }
+ }
+ return lhs
+}
+
+func (p *Parser) parseTerm() *Term {
+ var term *Term
+ switch p.s.tok {
+ case tokens.Null:
+ term = NullTerm().SetLocation(p.s.Loc())
+ case tokens.True:
+ term = BooleanTerm(true).SetLocation(p.s.Loc())
+ case tokens.False:
+ term = BooleanTerm(false).SetLocation(p.s.Loc())
+ case tokens.Sub, tokens.Dot, tokens.Number:
+ term = p.parseNumber()
+ case tokens.String:
+ term = p.parseString()
+ case tokens.Ident:
+ term = p.parseVar()
+ case tokens.LBrack:
+ term = p.parseArray()
+ case tokens.LBrace:
+ term = p.parseSetOrObject()
+ case tokens.LParen:
+ offset := p.s.loc.Offset
+ p.scan()
+ if r := p.parseTermRelation(); r != nil {
+ if p.s.tok == tokens.RParen {
+ r.Location.Text = p.s.Text(offset, p.s.tokEnd)
+ term = r
+ } else {
+ p.error(p.s.Loc(), "non-terminated expression")
+ }
+ }
+ default:
+ p.illegalToken()
+ return nil
+ }
+
+ return p.parseTermFinish(term)
+}
+
+func (p *Parser) parseTermFinish(head *Term) *Term {
+ if head == nil {
+ return nil
+ }
+ offset := p.s.loc.Offset
+ p.scanWS()
+ switch p.s.tok {
+ case tokens.LParen, tokens.Dot, tokens.LBrack:
+ return p.parseRef(head, offset)
+ case tokens.Whitespace:
+ p.scan()
+ fallthrough
+ default:
+ if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) {
+ return RefTerm(head).SetLocation(head.Location)
+ }
+ return head
+ }
+}
+
+func (p *Parser) parseNumber() *Term {
+ var prefix string
+ loc := p.s.Loc()
+ if p.s.tok == tokens.Sub {
+ prefix = "-"
+ p.scan()
+ switch p.s.tok {
+ case tokens.Number, tokens.Dot:
+ break
+ default:
+ p.illegal("expected number")
+ return nil
+ }
+ }
+ if p.s.tok == tokens.Dot {
+ prefix += "."
+ p.scan()
+ if p.s.tok != tokens.Number {
+ p.illegal("expected number")
+ return nil
+ }
+ }
+
+ // Check for multiple leading 0's, parsed by math/big.Float.Parse as decimal 0:
+ // https://golang.org/pkg/math/big/#Float.Parse
+ if ((len(prefix) != 0 && prefix[0] == '-') || len(prefix) == 0) &&
+ len(p.s.lit) > 1 && p.s.lit[0] == '0' && p.s.lit[1] == '0' {
+ p.illegal("expected number")
+ return nil
+ }
+
+ // Ensure that the number is valid
+ s := prefix + p.s.lit
+ f, ok := new(big.Float).SetString(s)
+ if !ok {
+ p.illegal("expected number")
+ return nil
+ }
+
+ // Put limit on size of exponent to prevent non-linear cost of String()
+ // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068
+ //
+ // n == sign * mantissa * 2^exp
+ // 0.5 <= mantissa < 1.0
+ //
+ // The limit is arbitrary.
+ exp := f.MantExp(nil)
+ if exp > 1e5 || exp < -1e5 {
+ p.error(p.s.Loc(), "number too big")
+ return nil
+ }
+
+ // Note: Use the original string, do *not* round trip from
+ // the big.Float as it can cause precision loss.
+ r := NumberTerm(json.Number(s)).SetLocation(loc)
+ return r
+}
+
+func (p *Parser) parseString() *Term {
+ if p.s.lit[0] == '"' {
+ var s string
+ err := json.Unmarshal([]byte(p.s.lit), &s)
+ if err != nil {
+ p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit)
+ return nil
+ }
+ term := StringTerm(s).SetLocation(p.s.Loc())
+ return term
+ }
+ return p.parseRawString()
+}
+
+func (p *Parser) parseRawString() *Term {
+ if len(p.s.lit) < 2 {
+ return nil
+ }
+ term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc())
+ return term
+}
+
+// this is the name to use for instantiating an empty set, e.g., `set()`.
+var setConstructor = RefTerm(VarTerm("set"))
+
+func (p *Parser) parseCall(operator *Term, offset int) (term *Term) {
+
+ loc := operator.Location
+ var end int
+
+ defer func() {
+ p.setLoc(term, loc, offset, end)
+ }()
+
+ p.scan()
+
+ if p.s.tok == tokens.RParen {
+ end = p.s.tokEnd
+ p.scanWS()
+ if operator.Equal(setConstructor) {
+ return SetTerm()
+ }
+ return CallTerm(operator)
+ }
+
+ if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil {
+ end = p.s.tokEnd
+ p.scanWS()
+ return CallTerm(r...)
+ }
+
+ return nil
+}
+
+func (p *Parser) parseRef(head *Term, offset int) (term *Term) {
+
+ loc := head.Location
+ var end int
+
+ defer func() {
+ p.setLoc(term, loc, offset, end)
+ }()
+
+ switch h := head.Value.(type) {
+ case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call:
+ // ok
+ default:
+ p.errorf(loc, "illegal ref (head cannot be %v)", TypeName(h))
+ }
+
+ ref := []*Term{head}
+
+ for {
+ switch p.s.tok {
+ case tokens.Dot:
+ p.scanWS()
+ if p.s.tok != tokens.Ident {
+ p.illegal("expected %v", tokens.Ident)
+ return nil
+ }
+ ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc()))
+ p.scanWS()
+ case tokens.LParen:
+ term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset)
+ if term != nil {
+ switch p.s.tok {
+ case tokens.Whitespace:
+ p.scan()
+ end = p.s.lastEnd
+ return term
+ case tokens.Dot, tokens.LBrack:
+ term = p.parseRef(term, offset)
+ }
+ }
+ end = p.s.tokEnd
+ return term
+ case tokens.LBrack:
+ p.scan()
+ if term := p.parseTermRelation(); term != nil {
+ if p.s.tok != tokens.RBrack {
+ p.illegal("expected %v", tokens.LBrack)
+ return nil
+ }
+ ref = append(ref, term)
+ p.scanWS()
+ } else {
+ return nil
+ }
+ case tokens.Whitespace:
+ end = p.s.lastEnd
+ p.scan()
+ return RefTerm(ref...)
+ default:
+ end = p.s.lastEnd
+ return RefTerm(ref...)
+ }
+ }
+}
+
+func (p *Parser) parseArray() (term *Term) {
+
+ loc := p.s.Loc()
+ offset := p.s.loc.Offset
+
+ defer func() {
+ p.setLoc(term, loc, offset, p.s.tokEnd)
+ }()
+
+ p.scan()
+
+ if p.s.tok == tokens.RBrack {
+ return ArrayTerm()
+ }
+
+ potentialComprehension := true
+
+ // Skip leading commas, eg [, x, y]
+ // Supported for backwards compatibility. In the future
+ // we should make this a parse error.
+ if p.s.tok == tokens.Comma {
+ potentialComprehension = false
+ p.scan()
+ }
+
+ s := p.save()
+
+ // NOTE(tsandall): The parser cannot attempt a relational term here because
+ // of ambiguity around comprehensions. For example, given:
+ //
+ // {1 | 1}
+ //
+ // Does this represent a set comprehension or a set containing binary OR
+ // call? We resolve the ambiguity by prioritizing comprehensions.
+ head := p.parseTerm()
+
+ if head == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.RBrack:
+ return ArrayTerm(head)
+ case tokens.Comma:
+ p.scan()
+ if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil {
+ return NewTerm(NewArray(terms...))
+ }
+ return nil
+ case tokens.Or:
+ if potentialComprehension {
+ // Try to parse as if it is an array comprehension
+ p.scan()
+ if body := p.parseBody(tokens.RBrack); body != nil {
+ return ArrayComprehensionTerm(head, body)
+ }
+ if p.s.tok != tokens.Comma {
+ return nil
+ }
+ }
+ // fall back to parsing as a normal array definition
+ fallthrough
+ default:
+ p.restore(s)
+ if terms := p.parseTermList(tokens.RBrack, nil); terms != nil {
+ return NewTerm(NewArray(terms...))
+ }
+ return nil
+ }
+}
+
+func (p *Parser) parseSetOrObject() (term *Term) {
+
+ loc := p.s.Loc()
+ offset := p.s.loc.Offset
+
+ defer func() {
+ p.setLoc(term, loc, offset, p.s.tokEnd)
+ }()
+
+ p.scan()
+
+ if p.s.tok == tokens.RBrace {
+ return ObjectTerm()
+ }
+
+ potentialComprehension := true
+
+ // Skip leading commas, eg {, x, y}
+ // Supported for backwards compatibility. In the future
+ // we should make this a parse error.
+ if p.s.tok == tokens.Comma {
+ potentialComprehension = false
+ p.scan()
+ }
+
+ s := p.save()
+
+ // Try parsing just a single term first to give comprehensions higher
+ // priority to "or" calls in ambiguous situations. Eg: { a | b }
+ // will be a set comprehension.
+ //
+ // Note: We don't know yet if it is a set or object being defined.
+ head := p.parseTerm()
+
+ if head == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.Or:
+ if potentialComprehension {
+ return p.parseSet(s, head, potentialComprehension)
+ }
+ case tokens.RBrace, tokens.Comma:
+ return p.parseSet(s, head, potentialComprehension)
+ case tokens.Colon:
+ return p.parseObject(head, potentialComprehension)
+ }
+
+ p.restore(s)
+
+ if head = p.parseTermRelation(); head == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.RBrace, tokens.Comma:
+ return p.parseSet(s, head, false)
+ case tokens.Colon:
+ // It still might be an object comprehension, eg { a+1: b | ... }
+ return p.parseObject(head, potentialComprehension)
+ default:
+ p.illegal("non-terminated set")
+ }
+
+ return nil
+}
+
+func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term {
+ switch p.s.tok {
+ case tokens.RBrace:
+ return SetTerm(head)
+ case tokens.Comma:
+ p.scan()
+ if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil {
+ return SetTerm(terms...)
+ }
+ return nil
+ case tokens.Or:
+ if potentialComprehension {
+ // Try to parse as if it is a set comprehension
+ p.scan()
+ if body := p.parseBody(tokens.RBrace); body != nil {
+ return SetComprehensionTerm(head, body)
+ }
+ if p.s.tok != tokens.Comma {
+ return nil
+ }
+ }
+ // Fall back to parsing as normal set definition
+ p.restore(s)
+ if terms := p.parseTermList(tokens.RBrace, nil); terms != nil {
+ return SetTerm(terms...)
+ }
+ return nil
+ }
+ return nil
+}
+
+func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term {
+ // NOTE(tsandall): Assumption: this function is called after parsing the key
+ // of the head element and then receiving a colon token from the scanner.
+ // Advance beyond the colon and attempt to parse an object.
+ p.scan()
+
+ s := p.save()
+ v := p.parseTerm()
+
+ if v == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.RBrace, tokens.Comma, tokens.Or:
+ if potentialComprehension {
+ if term := p.parseObjectFinish(k, v, true); term != nil {
+ return term
+ }
+ }
+ }
+
+ p.restore(s)
+
+ if v = p.parseTermRelation(); v == nil {
+ return nil
+ }
+
+ switch p.s.tok {
+ case tokens.Comma, tokens.RBrace:
+ return p.parseObjectFinish(k, v, false)
+ default:
+ p.illegal("non-terminated object")
+ }
+
+ return nil
+}
+
+func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term {
+ switch p.s.tok {
+ case tokens.RBrace:
+ return ObjectTerm([2]*Term{key, val})
+ case tokens.Or:
+ if potentialComprehension {
+ p.scan()
+ if body := p.parseBody(tokens.RBrace); body != nil {
+ return ObjectComprehensionTerm(key, val, body)
+ }
+ } else {
+ p.illegal("non-terminated object")
+ }
+ case tokens.Comma:
+ p.scan()
+ if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil {
+ return ObjectTerm(r...)
+ }
+ }
+ return nil
+}
+
+func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term {
+ if p.s.tok == end {
+ return r
+ }
+ for {
+ term := p.parseTermRelation()
+ if term != nil {
+ r = append(r, term)
+ switch p.s.tok {
+ case end:
+ return r
+ case tokens.Comma:
+ p.scan()
+ if p.s.tok == end {
+ return r
+ }
+ continue
+ default:
+ p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end))
+ return nil
+ }
+ }
+ return nil
+ }
+}
+
+func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term {
+ if p.s.tok == end {
+ return r
+ }
+ for {
+ key := p.parseTermRelation()
+ if key != nil {
+ switch p.s.tok {
+ case tokens.Colon:
+ p.scan()
+ if val := p.parseTermRelation(); val != nil {
+ r = append(r, [2]*Term{key, val})
+ switch p.s.tok {
+ case end:
+ return r
+ case tokens.Comma:
+ p.scan()
+ if p.s.tok == end {
+ return r
+ }
+ continue
+ default:
+ p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end))
+ return nil
+ }
+ }
+ default:
+ p.illegal(fmt.Sprintf("expected %q", tokens.Colon))
+ return nil
+ }
+ }
+ return nil
+ }
+}
+
+func (p *Parser) parseTermOp(values ...tokens.Token) *Term {
+ for i := range values {
+ if p.s.tok == values[i] {
+ r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc())
+ p.scan()
+ return r
+ }
+ }
+ return nil
+}
+
+func (p *Parser) parseVar() *Term {
+
+ s := p.s.lit
+
+ term := VarTerm(s).SetLocation(p.s.Loc())
+
+ // Update wildcard values with unique identifiers
+ if term.Equal(Wildcard) {
+ term.Value = Var(p.genwildcard())
+ }
+
+ return term
+}
+
+func (p *Parser) genwildcard() string {
+ c := p.s.wildcard
+ p.s.wildcard++
+ return fmt.Sprintf("%v%d", WildcardPrefix, c)
+}
+
+func (p *Parser) error(loc *location.Location, reason string) {
+ p.errorf(loc, reason)
+}
+
+func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) {
+ p.s.errors = append(p.s.errors, &Error{
+ Code: ParseErr,
+ Message: fmt.Sprintf(f, a...),
+ Location: loc,
+ Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset),
+ })
+}
+
+func (p *Parser) illegal(note string, a ...interface{}) {
+
+ tok := p.s.tok.String()
+
+ if p.s.tok == tokens.Illegal {
+ p.errorf(p.s.Loc(), "illegal token")
+ return
+ }
+
+ tokType := "token"
+ if p.s.tok >= tokens.Package && p.s.tok <= tokens.False {
+ tokType = "keyword"
+ }
+
+ note = fmt.Sprintf(note, a...)
+ if len(note) > 0 {
+ p.errorf(p.s.Loc(), "unexpected %s %s: %v", tok, tokType, note)
+ } else {
+ p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType)
+ }
+}
+
+func (p *Parser) illegalToken() {
+ p.illegal("")
+}
+
+func (p *Parser) scan() {
+ p.doScan(true)
+}
+
+func (p *Parser) scanWS() {
+ p.doScan(false)
+}
+
+func (p *Parser) doScan(skipws bool) {
+
+ // NOTE(tsandall): the last position is used to compute the "text" field for
+ // complex AST nodes. Whitespace never affects the last position of an AST
+ // node so do not update it when scanning.
+ if p.s.tok != tokens.Whitespace {
+ p.s.lastEnd = p.s.tokEnd
+ p.s.skippedNL = false
+ }
+
+ var errs []scanner.Error
+ for {
+ var pos scanner.Position
+ p.s.tok, pos, p.s.lit, errs = p.s.s.Scan()
+
+ p.s.tokEnd = pos.End
+ p.s.loc.Row = pos.Row
+ p.s.loc.Col = pos.Col
+ p.s.loc.Offset = pos.Offset
+ p.s.loc.Text = p.s.Text(pos.Offset, pos.End)
+
+ for _, err := range errs {
+ p.error(p.s.Loc(), err.Message)
+ }
+
+ if len(errs) > 0 {
+ p.s.tok = tokens.Illegal
+ }
+
+ if p.s.tok == tokens.Whitespace {
+ if p.s.lit == "\n" {
+ p.s.skippedNL = true
+ }
+ if skipws {
+ continue
+ }
+ }
+
+ if p.s.tok != tokens.Comment {
+ break
+ }
+
+ // For backwards compatibility leave a nil
+ // Text value if there is no text rather than
+ // an empty string.
+ var commentText []byte
+ if len(p.s.lit) > 1 {
+ commentText = []byte(p.s.lit[1:])
+ }
+ comment := NewComment(commentText)
+ comment.SetLoc(p.s.Loc())
+ p.s.comments = append(p.s.comments, comment)
+ }
+}
+
+func (p *Parser) save() *state {
+ cpy := *p.s
+ s := *cpy.s
+ cpy.s = &s
+ return &cpy
+}
+
+func (p *Parser) restore(s *state) {
+ p.s = s
+}
+
+func setLocRecursive(x interface{}, loc *location.Location) {
+ NewGenericVisitor(func(x interface{}) bool {
+ if node, ok := x.(Node); ok {
+ node.SetLoc(loc)
+ }
+ return false
+ }).Walk(x)
+}
+
+func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term {
+ if term != nil {
+ cpy := *loc
+ term.Location = &cpy
+ term.Location.Text = p.s.Text(offset, end)
+ }
+ return term
+}
+
+func (p *Parser) validateDefaultRuleValue(rule *Rule) bool {
+ if rule.Head.Value == nil {
+ p.error(rule.Loc(), fmt.Sprintf("illegal default rule (must have a value)"))
+ return false
+ }
+
+ valid := true
+ vis := NewGenericVisitor(func(x interface{}) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures
+ return true
+ case Ref, Var, Call:
+ p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x)))
+ valid = false
+ return true
+ }
+ return false
+ })
+
+ vis.Walk(rule.Head.Value.Value)
+ return valid
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
new file mode 100644
index 00000000..644169a4
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go
@@ -0,0 +1,696 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// This file contains extra functions for parsing Rego.
+// Most of the parsing is handled by the code in parser.go,
+// however, there are additional utilities that are
+// helpful for dealing with Rego source inputs (e.g., REPL
+// statements, source files, etc.)
+
+package ast
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "unicode"
+
+ "github.com/pkg/errors"
+)
+
+// MustParseBody returns a parsed body.
+// If an error occurs during parsing, panic.
+func MustParseBody(input string) Body {
+ parsed, err := ParseBody(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseExpr returns a parsed expression.
+// If an error occurs during parsing, panic.
+func MustParseExpr(input string) *Expr {
+ parsed, err := ParseExpr(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseImports returns a slice of imports.
+// If an error occurs during parsing, panic.
+func MustParseImports(input string) []*Import {
+ parsed, err := ParseImports(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseModule returns a parsed module.
+// If an error occurs during parsing, panic.
+func MustParseModule(input string) *Module {
+ parsed, err := ParseModule("", input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParsePackage returns a Package.
+// If an error occurs during parsing, panic.
+func MustParsePackage(input string) *Package {
+ parsed, err := ParsePackage(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseStatements returns a slice of parsed statements.
+// If an error occurs during parsing, panic.
+func MustParseStatements(input string) []Statement {
+ parsed, _, err := ParseStatements("", input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseStatement returns exactly one statement.
+// If an error occurs during parsing, panic.
+func MustParseStatement(input string) Statement {
+ parsed, err := ParseStatement(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseRef returns a parsed reference.
+// If an error occurs during parsing, panic.
+func MustParseRef(input string) Ref {
+ parsed, err := ParseRef(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseRule returns a parsed rule.
+// If an error occurs during parsing, panic.
+func MustParseRule(input string) *Rule {
+ parsed, err := ParseRule(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// MustParseTerm returns a parsed term.
+// If an error occurs during parsing, panic.
+func MustParseTerm(input string) *Term {
+ parsed, err := ParseTerm(input)
+ if err != nil {
+ panic(err)
+ }
+ return parsed
+}
+
+// ParseRuleFromBody returns a rule if the body can be interpreted as a rule
+// definition. Otherwise, an error is returned.
+func ParseRuleFromBody(module *Module, body Body) (*Rule, error) {
+
+ if len(body) != 1 {
+ return nil, fmt.Errorf("multiple expressions cannot be used for rule head")
+ }
+
+ return ParseRuleFromExpr(module, body[0])
+}
+
+// ParseRuleFromExpr returns a rule if the expression can be interpreted as a
+// rule definition.
+func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) {
+
+ if len(expr.With) > 0 {
+ return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head")
+ }
+
+ if expr.Negated {
+ return nil, fmt.Errorf("negated expressions cannot be used for rule head")
+ }
+
+ if _, ok := expr.Terms.(*SomeDecl); ok {
+ return nil, errors.New("some declarations cannot be used for rule head")
+ }
+
+ if term, ok := expr.Terms.(*Term); ok {
+ switch v := term.Value.(type) {
+ case Ref:
+ return ParsePartialSetDocRuleFromTerm(module, term)
+ default:
+ return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(v))
+ }
+ }
+
+ if _, ok := expr.Terms.([]*Term); !ok {
+ // This is a defensive check in case other kinds of expression terms are
+ // introduced in the future.
+ return nil, errors.New("expression cannot be used for rule head")
+ }
+
+ if expr.IsAssignment() {
+
+ lhs, rhs := expr.Operand(0), expr.Operand(1)
+ if lhs == nil || rhs == nil {
+ return nil, errors.New("assignment requires two operands")
+ }
+
+ rule, err := ParseCompleteDocRuleFromAssignmentExpr(module, lhs, rhs)
+
+ if err == nil {
+ rule.Location = expr.Location
+ rule.Head.Location = expr.Location
+ return rule, nil
+ } else if _, ok := lhs.Value.(Call); ok {
+ return nil, errFunctionAssignOperator
+ } else if _, ok := lhs.Value.(Ref); ok {
+ return nil, errPartialRuleAssignOperator
+ }
+
+ return nil, errTermAssignOperator(lhs.Value)
+ }
+
+ if expr.IsEquality() {
+ return parseCompleteRuleFromEq(module, expr)
+ }
+
+ if _, ok := BuiltinMap[expr.Operator().String()]; ok {
+ return nil, fmt.Errorf("rule name conflicts with built-in function")
+ }
+
+ return ParseRuleFromCallExpr(module, expr.Terms.([]*Term))
+}
+
+func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) {
+
+ // ensure the rule location is set to the expr location
+ // the helper functions called below try to set the location based
+ // on the terms they've been provided but that is not as accurate.
+ defer func() {
+ if rule != nil {
+ rule.Location = expr.Location
+ rule.Head.Location = expr.Location
+ }
+ }()
+
+ lhs, rhs := expr.Operand(0), expr.Operand(1)
+ if lhs == nil || rhs == nil {
+ return nil, errors.New("assignment requires two operands")
+ }
+
+ rule, err = ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
+
+ if err == nil {
+ return rule, nil
+ }
+
+ rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs)
+ if err == nil {
+ return rule, nil
+ }
+
+ return ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs)
+}
+
+// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can
+// be interpreted as a complete document definition declared with the assignment
+// operator.
+func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+
+ rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs)
+ if err != nil {
+ return nil, err
+ }
+
+ rule.Head.Assign = true
+
+ return rule, nil
+}
+
+// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be
+// interpreted as a complete document definition.
+func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+
+ var name Var
+
+ if RootDocumentRefs.Contains(lhs) {
+ name = lhs.Value.(Ref)[0].Value.(Var)
+ } else if v, ok := lhs.Value.(Var); ok {
+ name = v
+ } else {
+ return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value))
+ }
+
+ rule := &Rule{
+ Location: lhs.Location,
+ Head: &Head{
+ Location: lhs.Location,
+ Name: name,
+ Value: rhs,
+ },
+ Body: NewBody(
+ NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location),
+ ),
+ Module: module,
+ }
+
+ return rule, nil
+}
+
+// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be
+// interpreted as a partial object document definition.
+func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+
+ ref, ok := lhs.Value.(Ref)
+ if !ok || len(ref) != 2 {
+ return nil, fmt.Errorf("%v cannot be used as rule name", TypeName(lhs.Value))
+ }
+
+ if _, ok := ref[0].Value.(Var); !ok {
+ return nil, fmt.Errorf("%vs cannot be used as rule name", TypeName(ref[0].Value))
+ }
+
+ name := ref[0].Value.(Var)
+ key := ref[1]
+
+ rule := &Rule{
+ Location: rhs.Location,
+ Head: &Head{
+ Location: rhs.Location,
+ Name: name,
+ Key: key,
+ Value: rhs,
+ },
+ Body: NewBody(
+ NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location),
+ ),
+ Module: module,
+ }
+
+ return rule, nil
+}
+
+// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted
+// as a partial set document definition.
+func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) {
+
+ ref, ok := term.Value.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value))
+ }
+
+ if len(ref) != 2 {
+ return nil, fmt.Errorf("refs cannot be used for rule")
+ }
+
+ name, ok := ref[0].Value.(Var)
+ if !ok {
+ return nil, fmt.Errorf("%vs cannot be used as rule name", TypeName(ref[0].Value))
+ }
+
+ rule := &Rule{
+ Location: term.Location,
+ Head: &Head{
+ Location: term.Location,
+ Name: name,
+ Key: ref[1],
+ },
+ Body: NewBody(
+ NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location),
+ ),
+ Module: module,
+ }
+
+ return rule, nil
+}
+
+// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a
+// function definition (e.g., f(x) = y => f(x) = y { true }).
+func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) {
+
+ call, ok := lhs.Value.(Call)
+ if !ok {
+ return nil, fmt.Errorf("must be call")
+ }
+
+ ref, ok := call[0].Value.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("%vs cannot be used in function signature", TypeName(call[0].Value))
+ }
+
+ name, ok := ref[0].Value.(Var)
+ if !ok {
+ return nil, fmt.Errorf("%vs cannot be used in function signature", TypeName(ref[0].Value))
+ }
+
+ rule := &Rule{
+ Location: lhs.Location,
+ Head: &Head{
+ Location: lhs.Location,
+ Name: name,
+ Args: Args(call[1:]),
+ Value: rhs,
+ },
+ Body: NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)),
+ Module: module,
+ }
+
+ return rule, nil
+}
+
+// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a
+// function returning true or some value (e.g., f(x) => f(x) = true { true }).
+func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) {
+
+ if len(terms) <= 1 {
+ return nil, fmt.Errorf("rule argument list must take at least one argument")
+ }
+
+ loc := terms[0].Location
+ args := terms[1:]
+ value := BooleanTerm(true).SetLocation(loc)
+
+ rule := &Rule{
+ Location: loc,
+ Head: &Head{
+ Location: loc,
+ Name: Var(terms[0].String()),
+ Args: args,
+ Value: value,
+ },
+ Module: module,
+ Body: NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)),
+ }
+ return rule, nil
+}
+
+// ParseImports returns a slice of Import objects.
+func ParseImports(input string) ([]*Import, error) {
+ stmts, _, err := ParseStatements("", input)
+ if err != nil {
+ return nil, err
+ }
+ result := []*Import{}
+ for _, stmt := range stmts {
+ if imp, ok := stmt.(*Import); ok {
+ result = append(result, imp)
+ } else {
+ return nil, fmt.Errorf("expected import but got %T", stmt)
+ }
+ }
+ return result, nil
+}
+
+// ParseModule returns a parsed Module object.
+// For details on Module objects and their fields, see policy.go.
+// Empty input will return nil, nil.
+func ParseModule(filename, input string) (*Module, error) {
+ stmts, comments, err := ParseStatements(filename, input)
+ if err != nil {
+ return nil, err
+ }
+ return parseModule(filename, stmts, comments)
+}
+
+// ParseBody returns exactly one body.
+// If multiple bodies are parsed, an error is returned.
+func ParseBody(input string) (Body, error) {
+ stmts, _, err := ParseStatements("", input)
+ if err != nil {
+ return nil, err
+ }
+
+ result := Body{}
+
+ for _, stmt := range stmts {
+ switch stmt := stmt.(type) {
+ case Body:
+ for i := range stmt {
+ result.Append(stmt[i])
+ }
+ case *Comment:
+ // skip
+ default:
+ return nil, fmt.Errorf("expected body but got %T", stmt)
+ }
+ }
+
+ return result, nil
+}
+
+// ParseExpr returns exactly one expression.
+// If multiple expressions are parsed, an error is returned.
+func ParseExpr(input string) (*Expr, error) {
+ body, err := ParseBody(input)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse expression")
+ }
+ if len(body) != 1 {
+ return nil, fmt.Errorf("expected exactly one expression but got: %v", body)
+ }
+ return body[0], nil
+}
+
+// ParsePackage returns exactly one Package.
+// If multiple statements are parsed, an error is returned.
+func ParsePackage(input string) (*Package, error) {
+ stmt, err := ParseStatement(input)
+ if err != nil {
+ return nil, err
+ }
+ pkg, ok := stmt.(*Package)
+ if !ok {
+ return nil, fmt.Errorf("expected package but got %T", stmt)
+ }
+ return pkg, nil
+}
+
+// ParseTerm returns exactly one term.
+// If multiple terms are parsed, an error is returned.
+func ParseTerm(input string) (*Term, error) {
+ body, err := ParseBody(input)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse term")
+ }
+ if len(body) != 1 {
+ return nil, fmt.Errorf("expected exactly one term but got: %v", body)
+ }
+ term, ok := body[0].Terms.(*Term)
+ if !ok {
+ return nil, fmt.Errorf("expected term but got %v", body[0].Terms)
+ }
+ return term, nil
+}
+
+// ParseRef returns exactly one reference.
+func ParseRef(input string) (Ref, error) {
+ term, err := ParseTerm(input)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse ref")
+ }
+ ref, ok := term.Value.(Ref)
+ if !ok {
+ return nil, fmt.Errorf("expected ref but got %v", term)
+ }
+ return ref, nil
+}
+
+// ParseRule returns exactly one rule.
+// If multiple rules are parsed, an error is returned.
+func ParseRule(input string) (*Rule, error) {
+ stmts, _, err := ParseStatements("", input)
+ if err != nil {
+ return nil, err
+ }
+ if len(stmts) != 1 {
+ return nil, fmt.Errorf("expected exactly one statement (rule)")
+ }
+ rule, ok := stmts[0].(*Rule)
+ if !ok {
+ return nil, fmt.Errorf("expected rule but got %T", stmts[0])
+ }
+ return rule, nil
+}
+
+// ParseStatement returns exactly one statement.
+// A statement might be a term, expression, rule, etc. Regardless,
+// this function expects *exactly* one statement. If multiple
+// statements are parsed, an error is returned.
+func ParseStatement(input string) (Statement, error) {
+ stmts, _, err := ParseStatements("", input)
+ if err != nil {
+ return nil, err
+ }
+ if len(stmts) != 1 {
+ return nil, fmt.Errorf("expected exactly one statement")
+ }
+ return stmts[0], nil
+}
+
+type commentKey struct {
+ File string
+ Row int
+ Col int
+}
+
+func (a commentKey) Compare(other commentKey) int {
+ if a.File < other.File {
+ return -1
+ } else if a.File > other.File {
+ return 1
+ } else if a.Row < other.Row {
+ return -1
+ } else if a.Row > other.Row {
+ return 1
+ } else if a.Col < other.Col {
+ return -1
+ } else if a.Col > other.Col {
+ return 1
+ }
+ return 0
+}
+
+// ParseStatements returns a slice of parsed statements.
+// This is the default return value from the parser.
+func ParseStatements(filename, input string) ([]Statement, []*Comment, error) {
+
+ stmts, comment, errs := NewParser().WithFilename(filename).WithReader(bytes.NewBufferString(input)).Parse()
+
+ if len(errs) > 0 {
+ return nil, nil, errs
+ }
+
+ return stmts, comment, nil
+}
+
+func parseModule(filename string, stmts []Statement, comments []*Comment) (*Module, error) {
+
+ if len(stmts) == 0 {
+ return nil, NewError(ParseErr, &Location{File: filename}, "empty module")
+ }
+
+ var errs Errors
+
+ _package, ok := stmts[0].(*Package)
+ if !ok {
+ loc := stmts[0].(Statement).Loc()
+ errs = append(errs, NewError(ParseErr, loc, "package expected"))
+ }
+
+ mod := &Module{
+ Package: _package,
+ }
+
+ // The comments slice only holds comments that were not their own statements.
+ mod.Comments = append(mod.Comments, comments...)
+
+ for _, stmt := range stmts[1:] {
+ switch stmt := stmt.(type) {
+ case *Import:
+ mod.Imports = append(mod.Imports, stmt)
+ case *Rule:
+ setRuleModule(stmt, mod)
+ mod.Rules = append(mod.Rules, stmt)
+ case Body:
+ rule, err := ParseRuleFromBody(mod, stmt)
+ if err != nil {
+ errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error()))
+ } else {
+ mod.Rules = append(mod.Rules, rule)
+ }
+ case *Package:
+ errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package"))
+ case *Comment: // Ignore comments, they're handled above.
+ default:
+ panic("illegal value") // Indicates grammar is out-of-sync with code.
+ }
+ }
+
+ if len(errs) == 0 {
+ return mod, nil
+ }
+
+ return nil, errs
+}
+
+func setRuleModule(rule *Rule, module *Module) {
+ rule.Module = module
+ if rule.Else != nil {
+ setRuleModule(rule.Else, module)
+ }
+}
+
+// ParserErrorDetail holds additional details for parser errors.
+type ParserErrorDetail struct {
+ Line string `json:"line"`
+ Idx int `json:"idx"`
+}
+
+func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail {
+
+ // Find first non-space character at or before offset position.
+ if offset >= len(bs) {
+ offset = len(bs) - 1
+ } else if offset < 0 {
+ offset = 0
+ }
+
+ for offset > 0 && unicode.IsSpace(rune(bs[offset])) {
+ offset--
+ }
+
+ // Find beginning of line containing offset.
+ begin := offset
+
+ for begin > 0 && !isNewLineChar(bs[begin]) {
+ begin--
+ }
+
+ if isNewLineChar(bs[begin]) {
+ begin++
+ }
+
+ // Find end of line containing offset.
+ end := offset
+
+ for end < len(bs) && !isNewLineChar(bs[end]) {
+ end++
+ }
+
+ if begin > end {
+ begin = end
+ }
+
+ // Extract line and compute index of offset byte in line.
+ line := bs[begin:end]
+ index := offset - begin
+
+ return &ParserErrorDetail{
+ Line: string(line),
+ Idx: index,
+ }
+}
+
+// Lines returns the pretty formatted line output for the error details.
+func (d ParserErrorDetail) Lines() []string {
+ line := strings.TrimLeft(d.Line, "\t") // remove leading tabs
+ tabCount := len(d.Line) - len(line)
+ return []string{line, strings.Repeat(" ", d.Idx-tabCount) + "^"}
+}
+
+func isNewLineChar(b byte) bool {
+ return b == '\r' || b == '\n'
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/policy.go b/vendor/github.com/open-policy-agent/opa/ast/policy.go
new file mode 100644
index 00000000..563411d4
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/policy.go
@@ -0,0 +1,1407 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+// Initialize seed for term hashing. This is intentionally placed before the
+// root document sets are constructed to ensure they use the same hash seed as
+// subsequent lookups. If the hash seeds are out of sync, lookups will fail.
+var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano()))
+var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32())
+var hashSeed1 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32())
+
+// DefaultRootDocument is the default root document.
+//
+// All package directives inside source files are implicitly prefixed with the
+// DefaultRootDocument value.
+var DefaultRootDocument = VarTerm("data")
+
+// InputRootDocument names the document containing query arguments.
+var InputRootDocument = VarTerm("input")
+
+// RootDocumentNames contains the names of top-level documents that can be
+// referred to in modules and queries.
+var RootDocumentNames = NewSet(
+ DefaultRootDocument,
+ InputRootDocument,
+)
+
+// DefaultRootRef is a reference to the root of the default document.
+//
+// All refs to data in the policy engine's storage layer are prefixed with this ref.
+var DefaultRootRef = Ref{DefaultRootDocument}
+
+// InputRootRef is a reference to the root of the input document.
+//
+// All refs to query arguments are prefixed with this ref.
+var InputRootRef = Ref{InputRootDocument}
+
+// RootDocumentRefs contains the prefixes of top-level documents that all
+// non-local references start with.
+var RootDocumentRefs = NewSet(
+ NewTerm(DefaultRootRef),
+ NewTerm(InputRootRef),
+)
+
+// SystemDocumentKey is the name of the top-level key that identifies the system
+// document.
+var SystemDocumentKey = String("system")
+
+// ReservedVars is the set of names that refer to implicitly ground vars.
+var ReservedVars = NewVarSet(
+ DefaultRootDocument.Value.(Var),
+ InputRootDocument.Value.(Var),
+)
+
+// Wildcard represents the wildcard variable as defined in the language.
+var Wildcard = &Term{Value: Var("_")}
+
+// WildcardPrefix is the special character that all wildcard variables are
+// prefixed with when the statement they are contained in is parsed.
+var WildcardPrefix = "$"
+
+// Keywords contains strings that map to language keywords.
+var Keywords = [...]string{
+ "not",
+ "package",
+ "import",
+ "as",
+ "default",
+ "else",
+ "with",
+ "null",
+ "true",
+ "false",
+ "some",
+}
+
+// IsKeyword returns true if s is a language keyword.
+func IsKeyword(s string) bool {
+ for _, x := range Keywords {
+ if x == s {
+ return true
+ }
+ }
+ return false
+}
+
+type (
+ // Node represents a node in an AST. Nodes may be statements in a policy module
+ // or elements of an ad-hoc query, expression, etc.
+ Node interface {
+ fmt.Stringer
+ Loc() *Location
+ SetLoc(*Location)
+ }
+
+ // Statement represents a single statement in a policy module.
+ Statement interface {
+ Node
+ }
+)
+
+type (
+
+ // Module represents a collection of policies (defined by rules)
+ // within a namespace (defined by the package) and optional
+ // dependencies on external documents (defined by imports).
+ Module struct {
+ Package *Package `json:"package"`
+ Imports []*Import `json:"imports,omitempty"`
+ Rules []*Rule `json:"rules,omitempty"`
+ Comments []*Comment `json:"comments,omitempty"`
+ }
+
+ // Comment contains the raw text from the comment in the definition.
+ Comment struct {
+ Text []byte
+ Location *Location
+ }
+
+ // Package represents the namespace of the documents produced
+ // by rules inside the module.
+ Package struct {
+ Location *Location `json:"-"`
+ Path Ref `json:"path"`
+ }
+
+ // Import represents a dependency on a document outside of the policy
+ // namespace. Imports are optional.
+ Import struct {
+ Location *Location `json:"-"`
+ Path *Term `json:"path"`
+ Alias Var `json:"alias,omitempty"`
+ }
+
+ // Rule represents a rule as defined in the language. Rules define the
+ // content of documents that represent policy decisions.
+ Rule struct {
+ Location *Location `json:"-"`
+ Default bool `json:"default,omitempty"`
+ Head *Head `json:"head"`
+ Body Body `json:"body"`
+ Else *Rule `json:"else,omitempty"`
+
+ // Module is a pointer to the module containing this rule. If the rule
+ // was NOT created while parsing/constructing a module, this should be
+ // left unset. The pointer is not included in any standard operations
+ // on the rule (e.g., printing, comparison, visiting, etc.)
+ Module *Module `json:"-"`
+ }
+
+ // Head represents the head of a rule.
+ Head struct {
+ Location *Location `json:"-"`
+ Name Var `json:"name"`
+ Args Args `json:"args,omitempty"`
+ Key *Term `json:"key,omitempty"`
+ Value *Term `json:"value,omitempty"`
+ Assign bool `json:"assign,omitempty"`
+ }
+
+ // Args represents zero or more arguments to a rule.
+ Args []*Term
+
+ // Body represents one or more expressions contained inside a rule or user
+ // function.
+ Body []*Expr
+
+ // Expr represents a single expression contained inside the body of a rule.
+ Expr struct {
+ Location *Location `json:"-"`
+ Generated bool `json:"generated,omitempty"`
+ Index int `json:"index"`
+ Negated bool `json:"negated,omitempty"`
+ Terms interface{} `json:"terms"`
+ With []*With `json:"with,omitempty"`
+ }
+
+ // SomeDecl represents a variable declaration statement. The symbols are variables.
+ SomeDecl struct {
+ Location *Location `json:"-"`
+ Symbols []*Term `json:"symbols"`
+ }
+
+ // With represents a modifier on an expression.
+ With struct {
+ Location *Location `json:"-"`
+ Target *Term `json:"target"`
+ Value *Term `json:"value"`
+ }
+)
+
+// Compare returns an integer indicating whether mod is less than, equal to,
+// or greater than other.
+func (mod *Module) Compare(other *Module) int {
+ if mod == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := mod.Package.Compare(other.Package); cmp != 0 {
+ return cmp
+ }
+ if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 {
+ return cmp
+ }
+ return rulesCompare(mod.Rules, other.Rules)
+}
+
+// Copy returns a deep copy of mod.
+func (mod *Module) Copy() *Module {
+ cpy := *mod
+ cpy.Rules = make([]*Rule, len(mod.Rules))
+ for i := range mod.Rules {
+ cpy.Rules[i] = mod.Rules[i].Copy()
+ }
+ cpy.Imports = make([]*Import, len(mod.Imports))
+ for i := range mod.Imports {
+ cpy.Imports[i] = mod.Imports[i].Copy()
+ }
+ cpy.Package = mod.Package.Copy()
+ return &cpy
+}
+
+// Equal returns true if mod equals other.
+func (mod *Module) Equal(other *Module) bool {
+ return mod.Compare(other) == 0
+}
+
+func (mod *Module) String() string {
+ buf := []string{}
+ buf = append(buf, mod.Package.String())
+ if len(mod.Imports) > 0 {
+ buf = append(buf, "")
+ for _, imp := range mod.Imports {
+ buf = append(buf, imp.String())
+ }
+ }
+ if len(mod.Rules) > 0 {
+ buf = append(buf, "")
+ for _, rule := range mod.Rules {
+ buf = append(buf, rule.String())
+ }
+ }
+ return strings.Join(buf, "\n")
+}
+
+// RuleSet returns a RuleSet containing named rules in the mod.
+func (mod *Module) RuleSet(name Var) RuleSet {
+ rs := NewRuleSet()
+ for _, rule := range mod.Rules {
+ if rule.Head.Name.Equal(name) {
+ rs.Add(rule)
+ }
+ }
+ return rs
+}
+
+// UnmarshalJSON parses bs and stores the result in mod. The rules in the module
+// will have their module pointer set to mod.
+func (mod *Module) UnmarshalJSON(bs []byte) error {
+
+ // Declare a new type and use a type conversion to avoid recursively calling
+ // Module#UnmarshalJSON.
+ type module Module
+
+ if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil {
+ return err
+ }
+
+ WalkRules(mod, func(rule *Rule) bool {
+ rule.Module = mod
+ return false
+ })
+
+ return nil
+}
+
+// NewComment returns a new Comment object.
+func NewComment(text []byte) *Comment {
+ return &Comment{
+ Text: text,
+ }
+}
+
+// Loc returns the location of the comment in the definition.
+func (c *Comment) Loc() *Location {
+ if c == nil {
+ return nil
+ }
+ return c.Location
+}
+
+// SetLoc sets the location on c.
+func (c *Comment) SetLoc(loc *Location) {
+ c.Location = loc
+}
+
+func (c *Comment) String() string {
+ return "#" + string(c.Text)
+}
+
+// Copy returns a deep copy of c.
+func (c *Comment) Copy() *Comment {
+ cpy := *c
+ cpy.Text = make([]byte, len(c.Text))
+ copy(cpy.Text, c.Text)
+ return &cpy
+}
+
+// Equal returns true if this comment equals the other comment.
+// Unlike other equality checks on AST nodes, comment equality
+// depends on location.
+func (c *Comment) Equal(other *Comment) bool {
+ return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text)
+}
+
+// Compare returns an integer indicating whether pkg is less than, equal to,
+// or greater than other.
+func (pkg *Package) Compare(other *Package) int {
+ return Compare(pkg.Path, other.Path)
+}
+
+// Copy returns a deep copy of pkg.
+func (pkg *Package) Copy() *Package {
+ cpy := *pkg
+ cpy.Path = pkg.Path.Copy()
+ return &cpy
+}
+
+// Equal returns true if pkg is equal to other.
+func (pkg *Package) Equal(other *Package) bool {
+ return pkg.Compare(other) == 0
+}
+
+// Loc returns the location of the Package in the definition.
+func (pkg *Package) Loc() *Location {
+ if pkg == nil {
+ return nil
+ }
+ return pkg.Location
+}
+
+// SetLoc sets the location on pkg.
+func (pkg *Package) SetLoc(loc *Location) {
+ pkg.Location = loc
+}
+
+func (pkg *Package) String() string {
+ if pkg == nil {
+ return ""
+ } else if len(pkg.Path) <= 1 {
+ return fmt.Sprintf("package ", pkg.Path)
+ }
+ // Omit head as all packages have the DefaultRootDocument prepended at parse time.
+ path := make(Ref, len(pkg.Path)-1)
+ path[0] = VarTerm(string(pkg.Path[1].Value.(String)))
+ copy(path[1:], pkg.Path[2:])
+ return fmt.Sprintf("package %v", path)
+}
+
+// IsValidImportPath returns an error indicating if the import path is invalid.
+// If the import path is invalid, err is nil.
+func IsValidImportPath(v Value) (err error) {
+ switch v := v.(type) {
+ case Var:
+ if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) {
+ return fmt.Errorf("invalid path %v: path must begin with input or data", v)
+ }
+ case Ref:
+ if err := IsValidImportPath(v[0].Value); err != nil {
+ return fmt.Errorf("invalid path %v: path must begin with input or data", v)
+ }
+ for _, e := range v[1:] {
+ if _, ok := e.Value.(String); !ok {
+ return fmt.Errorf("invalid path %v: path elements must be strings", v)
+ }
+ }
+ default:
+ return fmt.Errorf("invalid path %v: path must be ref or var", v)
+ }
+ return nil
+}
+
+// Compare returns an integer indicating whether imp is less than, equal to,
+// or greater than other.
+func (imp *Import) Compare(other *Import) int {
+ if imp == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := Compare(imp.Path, other.Path); cmp != 0 {
+ return cmp
+ }
+ return Compare(imp.Alias, other.Alias)
+}
+
+// Copy returns a deep copy of imp.
+func (imp *Import) Copy() *Import {
+ cpy := *imp
+ cpy.Path = imp.Path.Copy()
+ return &cpy
+}
+
+// Equal returns true if imp is equal to other.
+func (imp *Import) Equal(other *Import) bool {
+ return imp.Compare(other) == 0
+}
+
+// Loc returns the location of the Import in the definition.
+func (imp *Import) Loc() *Location {
+ if imp == nil {
+ return nil
+ }
+ return imp.Location
+}
+
+// SetLoc sets the location on imp.
+func (imp *Import) SetLoc(loc *Location) {
+ imp.Location = loc
+}
+
+// Name returns the variable that is used to refer to the imported virtual
+// document. This is the alias if defined otherwise the last element in the
+// path.
+func (imp *Import) Name() Var {
+ if len(imp.Alias) != 0 {
+ return imp.Alias
+ }
+ switch v := imp.Path.Value.(type) {
+ case Var:
+ return v
+ case Ref:
+ if len(v) == 1 {
+ return v[0].Value.(Var)
+ }
+ return Var(v[len(v)-1].Value.(String))
+ }
+ panic("illegal import")
+}
+
+func (imp *Import) String() string {
+ buf := []string{"import", imp.Path.String()}
+ if len(imp.Alias) > 0 {
+ buf = append(buf, "as "+imp.Alias.String())
+ }
+ return strings.Join(buf, " ")
+}
+
+// Compare returns an integer indicating whether rule is less than, equal to,
+// or greater than other.
+func (rule *Rule) Compare(other *Rule) int {
+ if rule == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := rule.Head.Compare(other.Head); cmp != 0 {
+ return cmp
+ }
+ if cmp := util.Compare(rule.Default, other.Default); cmp != 0 {
+ return cmp
+ }
+ if cmp := rule.Body.Compare(other.Body); cmp != 0 {
+ return cmp
+ }
+ return rule.Else.Compare(other.Else)
+}
+
+// Copy returns a deep copy of rule.
+func (rule *Rule) Copy() *Rule {
+ cpy := *rule
+ cpy.Head = rule.Head.Copy()
+ cpy.Body = rule.Body.Copy()
+ if cpy.Else != nil {
+ cpy.Else = rule.Else.Copy()
+ }
+ return &cpy
+}
+
+// Equal returns true if rule is equal to other.
+func (rule *Rule) Equal(other *Rule) bool {
+ return rule.Compare(other) == 0
+}
+
+// Loc returns the location of the Rule in the definition.
+func (rule *Rule) Loc() *Location {
+ if rule == nil {
+ return nil
+ }
+ return rule.Location
+}
+
+// SetLoc sets the location on rule.
+func (rule *Rule) SetLoc(loc *Location) {
+ rule.Location = loc
+}
+
+// Path returns a ref referring to the document produced by this rule. If rule
+// is not contained in a module, this function panics.
+func (rule *Rule) Path() Ref {
+ if rule.Module == nil {
+ panic("assertion failed")
+ }
+ return rule.Module.Package.Path.Append(StringTerm(string(rule.Head.Name)))
+}
+
+func (rule *Rule) String() string {
+ buf := []string{}
+ if rule.Default {
+ buf = append(buf, "default")
+ }
+ buf = append(buf, rule.Head.String())
+ if !rule.Default {
+ buf = append(buf, "{")
+ buf = append(buf, rule.Body.String())
+ buf = append(buf, "}")
+ }
+ if rule.Else != nil {
+ buf = append(buf, rule.Else.elseString())
+ }
+ return strings.Join(buf, " ")
+}
+
+func (rule *Rule) elseString() string {
+ var buf []string
+
+ buf = append(buf, "else")
+
+ value := rule.Head.Value
+ if value != nil {
+ buf = append(buf, "=")
+ buf = append(buf, value.String())
+ }
+
+ buf = append(buf, "{")
+ buf = append(buf, rule.Body.String())
+ buf = append(buf, "}")
+
+ if rule.Else != nil {
+ buf = append(buf, rule.Else.elseString())
+ }
+
+ return strings.Join(buf, " ")
+}
+
+// NewHead returns a new Head object. If args are provided, the first will be
+// used for the key and the second will be used for the value.
+func NewHead(name Var, args ...*Term) *Head {
+ head := &Head{
+ Name: name,
+ }
+ if len(args) == 0 {
+ return head
+ }
+ head.Key = args[0]
+ if len(args) == 1 {
+ return head
+ }
+ head.Value = args[1]
+ return head
+}
+
+// DocKind represents the collection of document types that can be produced by rules.
+type DocKind int
+
+const (
+ // CompleteDoc represents a document that is completely defined by the rule.
+ CompleteDoc = iota
+
+ // PartialSetDoc represents a set document that is partially defined by the rule.
+ PartialSetDoc = iota
+
+ // PartialObjectDoc represents an object document that is partially defined by the rule.
+ PartialObjectDoc = iota
+)
+
+// DocKind returns the type of document produced by this rule.
+func (head *Head) DocKind() DocKind {
+ if head.Key != nil {
+ if head.Value != nil {
+ return PartialObjectDoc
+ }
+ return PartialSetDoc
+ }
+ return CompleteDoc
+}
+
+// Compare returns an integer indicating whether head is less than, equal to,
+// or greater than other.
+func (head *Head) Compare(other *Head) int {
+ if head == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if head.Assign && !other.Assign {
+ return -1
+ } else if !head.Assign && other.Assign {
+ return 1
+ }
+ if cmp := Compare(head.Args, other.Args); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(head.Name, other.Name); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(head.Key, other.Key); cmp != 0 {
+ return cmp
+ }
+ return Compare(head.Value, other.Value)
+}
+
+// Copy returns a deep copy of head.
+func (head *Head) Copy() *Head {
+ cpy := *head
+ cpy.Args = head.Args.Copy()
+ cpy.Key = head.Key.Copy()
+ cpy.Value = head.Value.Copy()
+ return &cpy
+}
+
+// Equal returns true if this head equals other.
+func (head *Head) Equal(other *Head) bool {
+ return head.Compare(other) == 0
+}
+
+func (head *Head) String() string {
+ var buf []string
+ if len(head.Args) != 0 {
+ buf = append(buf, head.Name.String()+head.Args.String())
+ } else if head.Key != nil {
+ buf = append(buf, head.Name.String()+"["+head.Key.String()+"]")
+ } else {
+ buf = append(buf, head.Name.String())
+ }
+ if head.Value != nil {
+ if head.Assign {
+ buf = append(buf, ":=")
+ } else {
+ buf = append(buf, "=")
+ }
+ buf = append(buf, head.Value.String())
+ }
+ return strings.Join(buf, " ")
+}
+
+// Vars returns a set of vars found in the head.
+func (head *Head) Vars() VarSet {
+ vis := &VarVisitor{vars: VarSet{}}
+ // TODO: improve test coverage for this.
+ if head.Args != nil {
+ vis.Walk(head.Args)
+ }
+ if head.Key != nil {
+ vis.Walk(head.Key)
+ }
+ if head.Value != nil {
+ vis.Walk(head.Value)
+ }
+ return vis.vars
+}
+
+// Loc returns the Location of head.
+func (head *Head) Loc() *Location {
+ if head == nil {
+ return nil
+ }
+ return head.Location
+}
+
+// SetLoc sets the location on head.
+func (head *Head) SetLoc(loc *Location) {
+ head.Location = loc
+}
+
+// Copy returns a deep copy of a.
+func (a Args) Copy() Args {
+ cpy := Args{}
+ for _, t := range a {
+ cpy = append(cpy, t.Copy())
+ }
+ return cpy
+}
+
+func (a Args) String() string {
+ var buf []string
+ for _, t := range a {
+ buf = append(buf, t.String())
+ }
+ return "(" + strings.Join(buf, ", ") + ")"
+}
+
+// Loc returns the Location of a.
+func (a Args) Loc() *Location {
+ if len(a) == 0 {
+ return nil
+ }
+ return a[0].Location
+}
+
+// SetLoc sets the location on a.
+func (a Args) SetLoc(loc *Location) {
+ if len(a) != 0 {
+ a[0].SetLocation(loc)
+ }
+}
+
+// Vars returns a set of vars that appear in a.
+func (a Args) Vars() VarSet {
+ vis := &VarVisitor{vars: VarSet{}}
+ vis.Walk(a)
+ return vis.vars
+}
+
+// NewBody returns a new Body containing the given expressions. The indices of
+// the immediate expressions will be reset.
+func NewBody(exprs ...*Expr) Body {
+ for i, expr := range exprs {
+ expr.Index = i
+ }
+ return Body(exprs)
+}
+
+// MarshalJSON returns JSON encoded bytes representing body.
+func (body Body) MarshalJSON() ([]byte, error) {
+ // Serialize empty Body to empty array. This handles both the empty case and the
+ // nil case (whereas by default the result would be null if body was nil.)
+ if len(body) == 0 {
+ return []byte(`[]`), nil
+ }
+ return json.Marshal([]*Expr(body))
+}
+
+// Append adds the expr to the body and updates the expr's index accordingly.
+func (body *Body) Append(expr *Expr) {
+ n := len(*body)
+ expr.Index = n
+ *body = append(*body, expr)
+}
+
+// Set sets the expr in the body at the specified position and updates the
+// expr's index accordingly.
+func (body Body) Set(expr *Expr, pos int) {
+ body[pos] = expr
+ expr.Index = pos
+}
+
+// Compare returns an integer indicating whether body is less than, equal to,
+// or greater than other.
+//
+// If body is a subset of other, it is considered less than (and vice versa).
+func (body Body) Compare(other Body) int {
+ minLen := len(body)
+ if len(other) < minLen {
+ minLen = len(other)
+ }
+ for i := 0; i < minLen; i++ {
+ if cmp := body[i].Compare(other[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(body) < len(other) {
+ return -1
+ }
+ if len(other) < len(body) {
+ return 1
+ }
+ return 0
+}
+
+// Copy returns a deep copy of body.
+func (body Body) Copy() Body {
+ cpy := make(Body, len(body))
+ for i := range body {
+ cpy[i] = body[i].Copy()
+ }
+ return cpy
+}
+
+// Contains returns true if this body contains the given expression.
+func (body Body) Contains(x *Expr) bool {
+ for _, e := range body {
+ if e.Equal(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// Equal returns true if this Body is equal to the other Body.
+func (body Body) Equal(other Body) bool {
+ return body.Compare(other) == 0
+}
+
+// Hash returns the hash code for the Body.
+func (body Body) Hash() int {
+ s := 0
+ for _, e := range body {
+ s += e.Hash()
+ }
+ return s
+}
+
+// IsGround returns true if all of the expressions in the Body are ground.
+func (body Body) IsGround() bool {
+ for _, e := range body {
+ if !e.IsGround() {
+ return false
+ }
+ }
+ return true
+}
+
+// Loc returns the location of the Body in the definition.
+func (body Body) Loc() *Location {
+ if len(body) == 0 {
+ return nil
+ }
+ return body[0].Location
+}
+
+// SetLoc sets the location on body.
+func (body Body) SetLoc(loc *Location) {
+ if len(body) != 0 {
+ body[0].SetLocation(loc)
+ }
+}
+
+func (body Body) String() string {
+ var buf []string
+ for _, v := range body {
+ buf = append(buf, v.String())
+ }
+ return strings.Join(buf, "; ")
+}
+
+// Vars returns a VarSet containing variables in body. The params can be set to
+// control which vars are included.
+func (body Body) Vars(params VarVisitorParams) VarSet {
+ vis := NewVarVisitor().WithParams(params)
+ vis.Walk(body)
+ return vis.Vars()
+}
+
+// NewExpr returns a new Expr object.
+func NewExpr(terms interface{}) *Expr {
+ return &Expr{
+ Negated: false,
+ Terms: terms,
+ Index: 0,
+ With: nil,
+ }
+}
+
+// Complement returns a copy of this expression with the negation flag flipped.
+func (expr *Expr) Complement() *Expr {
+ cpy := *expr
+ cpy.Negated = !cpy.Negated
+ return &cpy
+}
+
+// Equal returns true if this Expr equals the other Expr.
+func (expr *Expr) Equal(other *Expr) bool {
+ return expr.Compare(other) == 0
+}
+
+// Compare returns an integer indicating whether expr is less than, equal to,
+// or greater than other.
+//
+// Expressions are compared as follows:
+//
+// 1. Declarations are always less than other expressions.
+// 2. Preceding expression (by Index) is always less than the other expression.
+// 3. Non-negated expressions are always less than than negated expressions.
+// 4. Single term expressions are always less than built-in expressions.
+//
+// Otherwise, the expression terms are compared normally. If both expressions
+// have the same terms, the modifiers are compared.
+func (expr *Expr) Compare(other *Expr) int {
+
+ if expr == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+
+ o1 := expr.sortOrder()
+ o2 := other.sortOrder()
+ if o1 < o2 {
+ return -1
+ } else if o2 < o1 {
+ return 1
+ }
+
+ switch {
+ case expr.Index < other.Index:
+ return -1
+ case expr.Index > other.Index:
+ return 1
+ }
+
+ switch {
+ case expr.Negated && !other.Negated:
+ return 1
+ case !expr.Negated && other.Negated:
+ return -1
+ }
+
+ switch t := expr.Terms.(type) {
+ case *Term:
+ if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 {
+ return cmp
+ }
+ case []*Term:
+ if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 {
+ return cmp
+ }
+ case *SomeDecl:
+ if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 {
+ return cmp
+ }
+ }
+
+ return withSliceCompare(expr.With, other.With)
+}
+
+func (expr *Expr) sortOrder() int {
+ switch expr.Terms.(type) {
+ case *SomeDecl:
+ return 0
+ case *Term:
+ return 1
+ case []*Term:
+ return 2
+ }
+ return -1
+}
+
+// Copy returns a deep copy of expr.
+func (expr *Expr) Copy() *Expr {
+
+ cpy := *expr
+
+ switch ts := expr.Terms.(type) {
+ case *SomeDecl:
+ cpy.Terms = ts.Copy()
+ case []*Term:
+ cpyTs := make([]*Term, len(ts))
+ for i := range ts {
+ cpyTs[i] = ts[i].Copy()
+ }
+ cpy.Terms = cpyTs
+ case *Term:
+ cpy.Terms = ts.Copy()
+ }
+
+ cpy.With = make([]*With, len(expr.With))
+ for i := range expr.With {
+ cpy.With[i] = expr.With[i].Copy()
+ }
+
+ return &cpy
+}
+
+// Hash returns the hash code of the Expr.
+func (expr *Expr) Hash() int {
+ s := expr.Index
+ switch ts := expr.Terms.(type) {
+ case *SomeDecl:
+ s += ts.Hash()
+ case []*Term:
+ for _, t := range ts {
+ s += t.Value.Hash()
+ }
+ case *Term:
+ s += ts.Value.Hash()
+ }
+ if expr.Negated {
+ s++
+ }
+ for _, w := range expr.With {
+ s += w.Hash()
+ }
+ return s
+}
+
+// IncludeWith returns a copy of expr with the with modifier appended.
+func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr {
+ cpy := *expr
+ cpy.With = append(cpy.With, &With{Target: target, Value: value})
+ return &cpy
+}
+
+// NoWith returns a copy of expr where the with modifier has been removed.
+func (expr *Expr) NoWith() *Expr {
+ cpy := *expr
+ cpy.With = nil
+ return &cpy
+}
+
+// IsEquality returns true if this is an equality expression.
+func (expr *Expr) IsEquality() bool {
+ return isglobalbuiltin(expr, Var(Equality.Name))
+}
+
+// IsAssignment returns true if this an assignment expression.
+func (expr *Expr) IsAssignment() bool {
+ return isglobalbuiltin(expr, Var(Assign.Name))
+}
+
+// IsCall returns true if this expression calls a function.
+func (expr *Expr) IsCall() bool {
+ _, ok := expr.Terms.([]*Term)
+ return ok
+}
+
+// Operator returns the name of the function or built-in this expression refers
+// to. If this expression is not a function call, returns nil.
+func (expr *Expr) Operator() Ref {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok || len(terms) == 0 {
+ return nil
+ }
+ return terms[0].Value.(Ref)
+}
+
+// Operand returns the term at the zero-based pos. If the expr does not include
+// at least pos+1 terms, this function returns nil.
+func (expr *Expr) Operand(pos int) *Term {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok {
+ return nil
+ }
+ idx := pos + 1
+ if idx < len(terms) {
+ return terms[idx]
+ }
+ return nil
+}
+
+// Operands returns the built-in function operands.
+func (expr *Expr) Operands() []*Term {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok {
+ return nil
+ }
+ return terms[1:]
+}
+
+// IsGround returns true if all of the expression terms are ground.
+func (expr *Expr) IsGround() bool {
+ switch ts := expr.Terms.(type) {
+ case []*Term:
+ for _, t := range ts[1:] {
+ if !t.IsGround() {
+ return false
+ }
+ }
+ case *Term:
+ return ts.IsGround()
+ }
+ return true
+}
+
+// SetOperator sets the expr's operator and returns the expr itself. If expr is
+// not a call expr, this function will panic.
+func (expr *Expr) SetOperator(term *Term) *Expr {
+ expr.Terms.([]*Term)[0] = term
+ return expr
+}
+
+// SetLocation sets the expr's location and returns the expr itself.
+func (expr *Expr) SetLocation(loc *Location) *Expr {
+ expr.Location = loc
+ return expr
+}
+
+// Loc returns the Location of expr.
+func (expr *Expr) Loc() *Location {
+ if expr == nil {
+ return nil
+ }
+ return expr.Location
+}
+
+// SetLoc sets the location on expr.
+func (expr *Expr) SetLoc(loc *Location) {
+ expr.SetLocation(loc)
+}
+
+func (expr *Expr) String() string {
+ var buf []string
+ if expr.Negated {
+ buf = append(buf, "not")
+ }
+ switch t := expr.Terms.(type) {
+ case []*Term:
+ if expr.IsEquality() && validEqAssignArgCount(expr) {
+ buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2]))
+ } else {
+ buf = append(buf, Call(t).String())
+ }
+ case *Term:
+ buf = append(buf, t.String())
+ case *SomeDecl:
+ buf = append(buf, t.String())
+ }
+
+ for i := range expr.With {
+ buf = append(buf, expr.With[i].String())
+ }
+
+ return strings.Join(buf, " ")
+}
+
+// UnmarshalJSON parses the byte array and stores the result in expr.
+func (expr *Expr) UnmarshalJSON(bs []byte) error {
+ v := map[string]interface{}{}
+ if err := util.UnmarshalJSON(bs, &v); err != nil {
+ return err
+ }
+ return unmarshalExpr(expr, v)
+}
+
+// Vars returns a VarSet containing variables in expr. The params can be set to
+// control which vars are included.
+func (expr *Expr) Vars(params VarVisitorParams) VarSet {
+ vis := NewVarVisitor().WithParams(params)
+ vis.Walk(expr)
+ return vis.Vars()
+}
+
+// NewBuiltinExpr creates a new Expr object with the supplied terms.
+// The builtin operator must be the first term.
+func NewBuiltinExpr(terms ...*Term) *Expr {
+ return &Expr{Terms: terms}
+}
+
+func (d *SomeDecl) String() string {
+ buf := make([]string, len(d.Symbols))
+ for i := range buf {
+ buf[i] = d.Symbols[i].String()
+ }
+ return "some " + strings.Join(buf, ", ")
+}
+
+// SetLoc sets the Location on d.
+func (d *SomeDecl) SetLoc(loc *Location) {
+ d.Location = loc
+}
+
+// Loc returns the Location of d.
+func (d *SomeDecl) Loc() *Location {
+ return d.Location
+}
+
+// Copy returns a deep copy of d.
+func (d *SomeDecl) Copy() *SomeDecl {
+ cpy := *d
+ cpy.Symbols = termSliceCopy(d.Symbols)
+ return &cpy
+}
+
+// Compare returns an integer indicating whether d is less than, equal to, or
+// greater than other.
+func (d *SomeDecl) Compare(other *SomeDecl) int {
+ return termSliceCompare(d.Symbols, other.Symbols)
+}
+
+// Hash returns a hash code of d.
+func (d *SomeDecl) Hash() int {
+ return termSliceHash(d.Symbols)
+}
+
+func (w *With) String() string {
+ return "with " + w.Target.String() + " as " + w.Value.String()
+}
+
+// Equal returns true if this With is equals the other With.
+func (w *With) Equal(other *With) bool {
+ return Compare(w, other) == 0
+}
+
+// Compare returns an integer indicating whether w is less than, equal to, or
+// greater than other.
+func (w *With) Compare(other *With) int {
+ if w == nil {
+ if other == nil {
+ return 0
+ }
+ return -1
+ } else if other == nil {
+ return 1
+ }
+ if cmp := Compare(w.Target, other.Target); cmp != 0 {
+ return cmp
+ }
+ return Compare(w.Value, other.Value)
+}
+
+// Copy returns a deep copy of w.
+func (w *With) Copy() *With {
+ cpy := *w
+ cpy.Value = w.Value.Copy()
+ cpy.Target = w.Target.Copy()
+ return &cpy
+}
+
+// Hash returns the hash code of the With.
+func (w With) Hash() int {
+ return w.Target.Hash() + w.Value.Hash()
+}
+
+// SetLocation sets the location on w.
+func (w *With) SetLocation(loc *Location) *With {
+ w.Location = loc
+ return w
+}
+
+// Loc returns the Location of w.
+func (w *With) Loc() *Location {
+ if w == nil {
+ return nil
+ }
+ return w.Location
+}
+
+// SetLoc sets the location on w.
+func (w *With) SetLoc(loc *Location) {
+ w.Location = loc
+}
+
+// Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified.
+func Copy(x interface{}) interface{} {
+ switch x := x.(type) {
+ case *Module:
+ return x.Copy()
+ case *Package:
+ return x.Copy()
+ case *Import:
+ return x.Copy()
+ case *Rule:
+ return x.Copy()
+ case *Head:
+ return x.Copy()
+ case Args:
+ return x.Copy()
+ case Body:
+ return x.Copy()
+ case *Expr:
+ return x.Copy()
+ case *With:
+ return x.Copy()
+ case *SomeDecl:
+ return x.Copy()
+ case *Term:
+ return x.Copy()
+ case *ArrayComprehension:
+ return x.Copy()
+ case *SetComprehension:
+ return x.Copy()
+ case *ObjectComprehension:
+ return x.Copy()
+ case Set:
+ return x.Copy()
+ case *object:
+ return x.Copy()
+ case *Array:
+ return x.Copy()
+ case Ref:
+ return x.Copy()
+ case Call:
+ return x.Copy()
+ case *Comment:
+ return x.Copy()
+ }
+ return x
+}
+
+// RuleSet represents a collection of rules that produce a virtual document.
+type RuleSet []*Rule
+
+// NewRuleSet returns a new RuleSet containing the given rules.
+func NewRuleSet(rules ...*Rule) RuleSet {
+ rs := make(RuleSet, 0, len(rules))
+ for _, rule := range rules {
+ rs.Add(rule)
+ }
+ return rs
+}
+
+// Add inserts the rule into rs.
+func (rs *RuleSet) Add(rule *Rule) {
+ for _, exist := range *rs {
+ if exist.Equal(rule) {
+ return
+ }
+ }
+ *rs = append(*rs, rule)
+}
+
+// Contains returns true if rs contains rule.
+func (rs RuleSet) Contains(rule *Rule) bool {
+ for i := range rs {
+ if rs[i].Equal(rule) {
+ return true
+ }
+ }
+ return false
+}
+
+// Diff returns a new RuleSet containing rules in rs that are not in other.
+func (rs RuleSet) Diff(other RuleSet) RuleSet {
+ result := NewRuleSet()
+ for i := range rs {
+ if !other.Contains(rs[i]) {
+ result.Add(rs[i])
+ }
+ }
+ return result
+}
+
+// Equal returns true if rs equals other.
+func (rs RuleSet) Equal(other RuleSet) bool {
+ return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0
+}
+
+// Merge returns a ruleset containing the union of rules from rs an other.
+func (rs RuleSet) Merge(other RuleSet) RuleSet {
+ result := NewRuleSet()
+ for i := range rs {
+ result.Add(rs[i])
+ }
+ for i := range other {
+ result.Add(other[i])
+ }
+ return result
+}
+
+func (rs RuleSet) String() string {
+ buf := make([]string, 0, len(rs))
+ for _, rule := range rs {
+ buf = append(buf, rule.String())
+ }
+ return "{" + strings.Join(buf, ", ") + "}"
+}
+
+type ruleSlice []*Rule
+
+func (s ruleSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
+func (s ruleSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
+func (s ruleSlice) Len() int { return len(s) }
+
+// Returns true if the equality or assignment expression referred to by expr
+// has a valid number of arguments.
+func validEqAssignArgCount(expr *Expr) bool {
+ return len(expr.Operands()) == 2
+}
+
+// this function checks if the expr refers to a non-namespaced (global) built-in
+// function like eq, gt, plus, etc.
+func isglobalbuiltin(expr *Expr, name Var) bool {
+ terms, ok := expr.Terms.([]*Term)
+ if !ok {
+ return false
+ }
+
+ // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid
+ // allocation here.
+ ref, ok := terms[0].Value.(Ref)
+ if !ok || len(ref) != 1 {
+ return false
+ } else if head, ok := ref[0].Value.(Var); !ok {
+ return false
+ } else {
+ return head.Equal(name)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/pretty.go b/vendor/github.com/open-policy-agent/opa/ast/pretty.go
new file mode 100644
index 00000000..b4f05ad5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/pretty.go
@@ -0,0 +1,82 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Pretty writes a pretty representation of the AST rooted at x to w.
+//
+// This is function is intended for debug purposes when inspecting ASTs.
+func Pretty(w io.Writer, x interface{}) {
+ pp := &prettyPrinter{
+ depth: -1,
+ w: w,
+ }
+ NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x)
+}
+
+type prettyPrinter struct {
+ depth int
+ w io.Writer
+}
+
+func (pp *prettyPrinter) Before(x interface{}) bool {
+ switch x.(type) {
+ case *Term:
+ default:
+ pp.depth++
+ }
+
+ switch x := x.(type) {
+ case *Term:
+ return false
+ case Args:
+ if len(x) == 0 {
+ return false
+ }
+ pp.writeType(x)
+ case *Expr:
+ extras := []string{}
+ if x.Negated {
+ extras = append(extras, "negated")
+ }
+ extras = append(extras, fmt.Sprintf("index=%d", x.Index))
+ pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " "))
+ case Null, Boolean, Number, String, Var:
+ pp.writeValue(x)
+ default:
+ pp.writeType(x)
+ }
+ return false
+}
+
+func (pp *prettyPrinter) After(x interface{}) {
+ switch x.(type) {
+ case *Term:
+ default:
+ pp.depth--
+ }
+}
+
+func (pp *prettyPrinter) writeValue(x interface{}) {
+ pp.writeIndent(fmt.Sprint(x))
+}
+
+func (pp *prettyPrinter) writeType(x interface{}) {
+ pp.writeIndent(TypeName(x))
+}
+
+func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) {
+ pad := strings.Repeat(" ", pp.depth)
+ pp.write(pad+f, a...)
+}
+
+func (pp *prettyPrinter) write(f string, a ...interface{}) {
+ fmt.Fprintf(pp.w, f+"\n", a...)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/strings.go b/vendor/github.com/open-policy-agent/opa/ast/strings.go
new file mode 100644
index 00000000..8f992801
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/strings.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "reflect"
+ "strings"
+)
+
+// TypeName returns a human readable name for the AST element type.
+func TypeName(x interface{}) string {
+ return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name())
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/term.go b/vendor/github.com/open-policy-agent/opa/ast/term.go
new file mode 100644
index 00000000..370e6a7f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/term.go
@@ -0,0 +1,2683 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/big"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/OneOfOne/xxhash"
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/ast/location"
+ "github.com/open-policy-agent/opa/util"
+)
+
+var errFindNotFound = fmt.Errorf("find: not found")
+
+// Location records a position in source code.
+type Location = location.Location
+
+// NewLocation returns a new Location object.
+func NewLocation(text []byte, file string, row int, col int) *Location {
+ return location.NewLocation(text, file, row, col)
+}
+
+// Value declares the common interface for all Term values. Every kind of Term value
+// in the language is represented as a type that implements this interface:
+//
+// - Null, Boolean, Number, String
+// - Object, Array, Set
+// - Variables, References
+// - Array, Set, and Object Comprehensions
+// - Calls
+type Value interface {
+ Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively.
+ Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found.
+ Hash() int // Returns hash code of the value.
+ IsGround() bool // IsGround returns true if this value is not a variable or contains no variables.
+ String() string // String returns a human readable string representation of the value.
+}
+
+// InterfaceToValue converts a native Go value x to a Value.
+func InterfaceToValue(x interface{}) (Value, error) {
+ switch x := x.(type) {
+ case nil:
+ return Null{}, nil
+ case bool:
+ return Boolean(x), nil
+ case json.Number:
+ return Number(x), nil
+ case int64:
+ return int64Number(x), nil
+ case uint64:
+ return uint64Number(x), nil
+ case float64:
+ return floatNumber(x), nil
+ case int:
+ return intNumber(x), nil
+ case string:
+ return String(x), nil
+ case []interface{}:
+ r := make([]*Term, len(x))
+ for i, e := range x {
+ e, err := InterfaceToValue(e)
+ if err != nil {
+ return nil, err
+ }
+ r[i] = &Term{Value: e}
+ }
+ return NewArray(r...), nil
+ case map[string]interface{}:
+ r := newobject(len(x))
+ for k, v := range x {
+ k, err := InterfaceToValue(k)
+ if err != nil {
+ return nil, err
+ }
+ v, err := InterfaceToValue(v)
+ if err != nil {
+ return nil, err
+ }
+ r.Insert(NewTerm(k), NewTerm(v))
+ }
+ return r, nil
+ case map[string]string:
+ r := newobject(len(x))
+ for k, v := range x {
+ k, err := InterfaceToValue(k)
+ if err != nil {
+ return nil, err
+ }
+ v, err := InterfaceToValue(v)
+ if err != nil {
+ return nil, err
+ }
+ r.Insert(NewTerm(k), NewTerm(v))
+ }
+ return r, nil
+ default:
+ return nil, fmt.Errorf("ast: illegal value: %T", x)
+ }
+}
+
+// ValueFromReader returns an AST value from a JSON serialized value in the reader.
+func ValueFromReader(r io.Reader) (Value, error) {
+ var x interface{}
+ if err := util.NewJSONDecoder(r).Decode(&x); err != nil {
+ return nil, err
+ }
+ return InterfaceToValue(x)
+}
+
+// As converts v into a Go native type referred to by x.
+func As(v Value, x interface{}) error {
+ return util.NewJSONDecoder(bytes.NewBufferString(v.String())).Decode(x)
+}
+
+// Resolver defines the interface for resolving references to native Go values.
+type Resolver interface {
+ Resolve(ref Ref) (value interface{}, err error)
+}
+
+// ValueResolver defines the interface for resolving references to AST values.
+type ValueResolver interface {
+ Resolve(ref Ref) (value Value, err error)
+}
+
+// UnknownValueErr indicates a ValueResolver was unable to resolve a reference
+// because the reference refers to an unknown value.
+type UnknownValueErr struct{}
+
+func (UnknownValueErr) Error() string {
+ return "unknown value"
+}
+
+// IsUnknownValueErr returns true if the err is an UnknownValueErr.
+func IsUnknownValueErr(err error) bool {
+ _, ok := err.(UnknownValueErr)
+ return ok
+}
+
+type illegalResolver struct{}
+
+func (illegalResolver) Resolve(ref Ref) (interface{}, error) {
+ return nil, fmt.Errorf("illegal value: %v", ref)
+}
+
+// ValueToInterface returns the Go representation of an AST value. The AST
+// value should not contain any values that require evaluation (e.g., vars,
+// comprehensions, etc.)
+func ValueToInterface(v Value, resolver Resolver) (interface{}, error) {
+ switch v := v.(type) {
+ case Null:
+ return nil, nil
+ case Boolean:
+ return bool(v), nil
+ case Number:
+ return json.Number(v), nil
+ case String:
+ return string(v), nil
+ case *Array:
+ buf := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ x1, err := ValueToInterface(v.Elem(i).Value, resolver)
+ if err != nil {
+ return nil, err
+ }
+ buf = append(buf, x1)
+ }
+ return buf, nil
+ case *object:
+ buf := make(map[string]interface{}, v.Len())
+ err := v.Iter(func(k, v *Term) error {
+ ki, err := ValueToInterface(k.Value, resolver)
+ if err != nil {
+ return err
+ }
+ var str string
+ var ok bool
+ if str, ok = ki.(string); !ok {
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(ki); err != nil {
+ return err
+ }
+ str = strings.TrimSpace(buf.String())
+ }
+ vi, err := ValueToInterface(v.Value, resolver)
+ if err != nil {
+ return err
+ }
+ buf[str] = vi
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+ case Set:
+ buf := []interface{}{}
+ err := v.Iter(func(x *Term) error {
+ x1, err := ValueToInterface(x.Value, resolver)
+ if err != nil {
+ return err
+ }
+ buf = append(buf, x1)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+ case Ref:
+ return resolver.Resolve(v)
+ default:
+ return nil, fmt.Errorf("%v requires evaluation", TypeName(v))
+ }
+}
+
+// JSON returns the JSON representation of v. The value must not contain any
+// refs or terms that require evaluation (e.g., vars, comprehensions, etc.)
+func JSON(v Value) (interface{}, error) {
+ return ValueToInterface(v, illegalResolver{})
+}
+
+// MustJSON returns the JSON representation of v. The value must not contain any
+// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If
+// the conversion fails, this function will panic. This function is mostly for
+// test purposes.
+func MustJSON(v Value) interface{} {
+ r, err := JSON(v)
+ if err != nil {
+ panic(err)
+ }
+ return r
+}
+
+// MustInterfaceToValue converts a native Go value x to a Value. If the
+// conversion fails, this function will panic. This function is mostly for test
+// purposes.
+func MustInterfaceToValue(x interface{}) Value {
+ v, err := InterfaceToValue(x)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Term is an argument to a function.
+type Term struct {
+ Value Value `json:"value"` // the value of the Term as represented in Go
+ Location *Location `json:"-"` // the location of the Term in the source
+}
+
+// NewTerm returns a new Term object.
+func NewTerm(v Value) *Term {
+ return &Term{
+ Value: v,
+ }
+}
+
+// SetLocation updates the term's Location and returns the term itself.
+func (term *Term) SetLocation(loc *Location) *Term {
+ term.Location = loc
+ return term
+}
+
+// Loc returns the Location of term.
+func (term *Term) Loc() *Location {
+ if term == nil {
+ return nil
+ }
+ return term.Location
+}
+
+// SetLoc sets the location on term.
+func (term *Term) SetLoc(loc *Location) {
+ term.SetLocation(loc)
+}
+
+// Copy returns a deep copy of term.
+func (term *Term) Copy() *Term {
+
+ if term == nil {
+ return nil
+ }
+
+ cpy := *term
+
+ switch v := term.Value.(type) {
+ case Null, Boolean, Number, String, Var:
+ cpy.Value = v
+ case Ref:
+ cpy.Value = v.Copy()
+ case *Array:
+ cpy.Value = v.Copy()
+ case Set:
+ cpy.Value = v.Copy()
+ case *object:
+ cpy.Value = v.Copy()
+ case *ArrayComprehension:
+ cpy.Value = v.Copy()
+ case *ObjectComprehension:
+ cpy.Value = v.Copy()
+ case *SetComprehension:
+ cpy.Value = v.Copy()
+ case Call:
+ cpy.Value = v.Copy()
+ }
+
+ return &cpy
+}
+
+// Equal returns true if this term equals the other term. Equality is
+// defined for each kind of term.
+func (term *Term) Equal(other *Term) bool {
+ if term == nil && other != nil {
+ return false
+ }
+ if term != nil && other == nil {
+ return false
+ }
+ if term == other {
+ return true
+ }
+
+ // TODO(tsandall): This early-exit avoids allocations for types that have
+ // Equal() functions that just use == underneath. We should revisit the
+ // other types and implement Equal() functions that do not require
+ // allocations.
+ switch v := term.Value.(type) {
+ case Null:
+ return v.Equal(other.Value)
+ case Boolean:
+ return v.Equal(other.Value)
+ case Number:
+ return v.Equal(other.Value)
+ case String:
+ return v.Equal(other.Value)
+ case Var:
+ return v.Equal(other.Value)
+ }
+
+ return term.Value.Compare(other.Value) == 0
+}
+
+// Get returns a value referred to by name from the term.
+func (term *Term) Get(name *Term) *Term {
+ switch v := term.Value.(type) {
+ case *Array:
+ return v.Get(name)
+ case *object:
+ return v.Get(name)
+ case Set:
+ if v.Contains(name) {
+ return name
+ }
+ }
+ return nil
+}
+
+// Hash returns the hash code of the Term's value.
+func (term *Term) Hash() int {
+ return term.Value.Hash()
+}
+
+// IsGround returns true if this terms' Value is ground.
+func (term *Term) IsGround() bool {
+ return term.Value.IsGround()
+}
+
+// MarshalJSON returns the JSON encoding of the term.
+//
+// Specialized marshalling logic is required to include a type hint for Value.
+func (term *Term) MarshalJSON() ([]byte, error) {
+ d := map[string]interface{}{
+ "type": TypeName(term.Value),
+ "value": term.Value,
+ }
+ return json.Marshal(d)
+}
+
+func (term *Term) String() string {
+ return term.Value.String()
+}
+
+// UnmarshalJSON parses the byte array and stores the result in term.
+// Specialized unmarshalling is required to handle Value.
+func (term *Term) UnmarshalJSON(bs []byte) error {
+ v := map[string]interface{}{}
+ if err := util.UnmarshalJSON(bs, &v); err != nil {
+ return err
+ }
+ val, err := unmarshalValue(v)
+ if err != nil {
+ return err
+ }
+ term.Value = val
+ return nil
+}
+
+// Vars returns a VarSet with variables contained in this term.
+func (term *Term) Vars() VarSet {
+ vis := &VarVisitor{vars: VarSet{}}
+ vis.Walk(term)
+ return vis.vars
+}
+
+// IsConstant returns true if the AST value is constant.
+func IsConstant(v Value) bool {
+ found := false
+ vis := GenericVisitor{
+ func(x interface{}) bool {
+ switch x.(type) {
+ case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call:
+ found = true
+ return true
+ }
+ return false
+ },
+ }
+ vis.Walk(v)
+ return !found
+}
+
+// IsComprehension returns true if the supplied value is a comprehension.
+func IsComprehension(x Value) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return true
+ }
+ return false
+}
+
+// ContainsRefs returns true if the Value v contains refs.
+func ContainsRefs(v interface{}) bool {
+ found := false
+ WalkRefs(v, func(r Ref) bool {
+ found = true
+ return found
+ })
+ return found
+}
+
+// ContainsComprehensions returns true if the Value v contains comprehensions.
+func ContainsComprehensions(v interface{}) bool {
+ found := false
+ WalkClosures(v, func(x interface{}) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ found = true
+ return found
+ }
+ return found
+ })
+ return found
+}
+
+// IsScalar returns true if the AST value is a scalar.
+func IsScalar(v Value) bool {
+ switch v.(type) {
+ case String:
+ return true
+ case Number:
+ return true
+ case Boolean:
+ return true
+ case Null:
+ return true
+ }
+ return false
+}
+
+// Null represents the null value defined by JSON.
+type Null struct{}
+
+// NullTerm creates a new Term with a Null value.
+func NullTerm() *Term {
+ return &Term{Value: Null{}}
+}
+
+// Equal returns true if the other term Value is also Null.
+func (null Null) Equal(other Value) bool {
+ switch other.(type) {
+ case Null:
+ return true
+ default:
+ return false
+ }
+}
+
+// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (null Null) Compare(other Value) int {
+ return Compare(null, other)
+}
+
+// Find returns the current value or a not found error.
+func (null Null) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return null, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (null Null) Hash() int {
+ return 0
+}
+
+// IsGround always returns true.
+func (null Null) IsGround() bool {
+ return true
+}
+
+func (null Null) String() string {
+ return "null"
+}
+
+// Boolean represents a boolean value defined by JSON.
+type Boolean bool
+
+// BooleanTerm creates a new Term with a Boolean value.
+func BooleanTerm(b bool) *Term {
+ return &Term{Value: Boolean(b)}
+}
+
+// Equal returns true if the other Value is a Boolean and is equal.
+func (bol Boolean) Equal(other Value) bool {
+ switch other := other.(type) {
+ case Boolean:
+ return bol == other
+ default:
+ return false
+ }
+}
+
+// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (bol Boolean) Compare(other Value) int {
+ return Compare(bol, other)
+}
+
+// Find returns the current value or a not found error.
+func (bol Boolean) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return bol, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (bol Boolean) Hash() int {
+ if bol {
+ return 1
+ }
+ return 0
+}
+
+// IsGround always returns true.
+func (bol Boolean) IsGround() bool {
+ return true
+}
+
+func (bol Boolean) String() string {
+ return strconv.FormatBool(bool(bol))
+}
+
+// Number represents a numeric value as defined by JSON.
+type Number json.Number
+
+// NumberTerm creates a new Term with a Number value.
+func NumberTerm(n json.Number) *Term {
+ return &Term{Value: Number(n)}
+}
+
+// IntNumberTerm creates a new Term with an integer Number value.
+func IntNumberTerm(i int) *Term {
+ return &Term{Value: Number(strconv.Itoa(i))}
+}
+
+// UIntNumberTerm creates a new Term with an unsigned integer Number value.
+func UIntNumberTerm(u uint64) *Term {
+ return &Term{Value: uint64Number(u)}
+}
+
+// FloatNumberTerm creates a new Term with a floating point Number value.
+func FloatNumberTerm(f float64) *Term {
+ s := strconv.FormatFloat(f, 'g', -1, 64)
+ return &Term{Value: Number(s)}
+}
+
+// Equal returns true if the other Value is a Number and is equal.
+func (num Number) Equal(other Value) bool {
+ switch other := other.(type) {
+ case Number:
+ return Compare(num, other) == 0
+ default:
+ return false
+ }
+}
+
+// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (num Number) Compare(other Value) int {
+ return Compare(num, other)
+}
+
+// Find returns the current value or a not found error.
+func (num Number) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return num, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (num Number) Hash() int {
+ f, err := json.Number(num).Float64()
+ if err != nil {
+ bs := []byte(num)
+ h := xxhash.Checksum64(bs)
+ return int(h)
+ }
+ return int(f)
+}
+
+// Int returns the int representation of num if possible.
+func (num Number) Int() (int, bool) {
+ i64, ok := num.Int64()
+ return int(i64), ok
+}
+
+// Int64 returns the int64 representation of num if possible.
+func (num Number) Int64() (int64, bool) {
+ i, err := json.Number(num).Int64()
+ if err != nil {
+ return 0, false
+ }
+ return i, true
+}
+
+// Float64 returns the float64 representation of num if possible.
+func (num Number) Float64() (float64, bool) {
+ f, err := json.Number(num).Float64()
+ if err != nil {
+ return 0, false
+ }
+ return f, true
+}
+
+// IsGround always returns true.
+func (num Number) IsGround() bool {
+ return true
+}
+
+// MarshalJSON returns JSON encoded bytes representing num.
+func (num Number) MarshalJSON() ([]byte, error) {
+ return json.Marshal(json.Number(num))
+}
+
+func (num Number) String() string {
+ return string(num)
+}
+
+func intNumber(i int) Number {
+ return Number(strconv.Itoa(i))
+}
+
+func int64Number(i int64) Number {
+ return Number(strconv.FormatInt(i, 10))
+}
+
+func uint64Number(u uint64) Number {
+ return Number(strconv.FormatUint(u, 10))
+}
+
+func floatNumber(f float64) Number {
+ return Number(strconv.FormatFloat(f, 'g', -1, 64))
+}
+
+// String represents a string value as defined by JSON.
+type String string
+
+// StringTerm creates a new Term with a String value.
+func StringTerm(s string) *Term {
+ return &Term{Value: String(s)}
+}
+
+// Equal returns true if the other Value is a String and is equal.
+func (str String) Equal(other Value) bool {
+ switch other := other.(type) {
+ case String:
+ return str == other
+ default:
+ return false
+ }
+}
+
+// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (str String) Compare(other Value) int {
+ return Compare(str, other)
+}
+
+// Find returns the current value or a not found error.
+func (str String) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return str, nil
+ }
+ return nil, errFindNotFound
+}
+
+// IsGround always returns true.
+func (str String) IsGround() bool {
+ return true
+}
+
+func (str String) String() string {
+ return strconv.Quote(string(str))
+}
+
+// Hash returns the hash code for the Value.
+func (str String) Hash() int {
+ h := xxhash.ChecksumString64S(string(str), hashSeed0)
+ return int(h)
+}
+
+// Var represents a variable as defined by the language.
+type Var string
+
+// VarTerm creates a new Term with a Variable value.
+func VarTerm(v string) *Term {
+ return &Term{Value: Var(v)}
+}
+
+// Equal returns true if the other Value is a Variable and has the same value
+// (name).
+func (v Var) Equal(other Value) bool {
+ switch other := other.(type) {
+ case Var:
+ return v == other
+ default:
+ return false
+ }
+}
+
+// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (v Var) Compare(other Value) int {
+ return Compare(v, other)
+}
+
+// Find returns the current value or a not found error.
+func (v Var) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return v, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (v Var) Hash() int {
+ h := xxhash.ChecksumString64S(string(v), hashSeed0)
+ return int(h)
+}
+
+// IsGround always returns false.
+func (v Var) IsGround() bool {
+ return false
+}
+
+// IsWildcard returns true if this is a wildcard variable.
+func (v Var) IsWildcard() bool {
+ return strings.HasPrefix(string(v), WildcardPrefix)
+}
+
+// IsGenerated returns true if this variable was generated during compilation.
+func (v Var) IsGenerated() bool {
+ return strings.HasPrefix(string(v), "__local")
+}
+
+func (v Var) String() string {
+ // Special case for wildcard so that string representation is parseable. The
+ // parser mangles wildcard variables to make their names unique and uses an
+ // illegal variable name character (WildcardPrefix) to avoid conflicts. When
+ // we serialize the variable here, we need to make sure it's parseable.
+ if v.IsWildcard() {
+ return Wildcard.String()
+ }
+ return string(v)
+}
+
+// Ref represents a reference as defined by the language.
+type Ref []*Term
+
+// EmptyRef returns a new, empty reference.
+func EmptyRef() Ref {
+ return Ref([]*Term{})
+}
+
+// PtrRef returns a new reference against the head for the pointer
+// s. Path components in the pointer are unescaped.
+func PtrRef(head *Term, s string) (Ref, error) {
+ s = strings.Trim(s, "/")
+ if s == "" {
+ return Ref{head}, nil
+ }
+ parts := strings.Split(s, "/")
+ ref := make(Ref, len(parts)+1)
+ ref[0] = head
+ for i := 0; i < len(parts); i++ {
+ var err error
+ parts[i], err = url.PathUnescape(parts[i])
+ if err != nil {
+ return nil, err
+ }
+ ref[i+1] = StringTerm(parts[i])
+ }
+ return ref, nil
+}
+
+// RefTerm creates a new Term with a Ref value.
+func RefTerm(r ...*Term) *Term {
+ return &Term{Value: Ref(r)}
+}
+
+// Append returns a copy of ref with the term appended to the end.
+func (ref Ref) Append(term *Term) Ref {
+ n := len(ref)
+ dst := make(Ref, n+1)
+ copy(dst, ref)
+ dst[n] = term
+ return dst
+}
+
+// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref),
+// existing elements are shifted to the right. If pos > len(ref)+1 this
+// function panics.
+func (ref Ref) Insert(x *Term, pos int) Ref {
+ if pos == len(ref) {
+ return ref.Append(x)
+ } else if pos > len(ref)+1 {
+ panic("illegal index")
+ }
+ cpy := make(Ref, len(ref)+1)
+ for i := 0; i < pos; i++ {
+ cpy[i] = ref[i]
+ }
+ cpy[pos] = x
+ for i := pos; i < len(ref); i++ {
+ cpy[i+1] = ref[i]
+ }
+ return cpy
+}
+
+// Extend returns a copy of ref with the terms from other appended. The head of
+// other will be converted to a string.
+func (ref Ref) Extend(other Ref) Ref {
+ dst := make(Ref, len(ref)+len(other))
+ for i := range ref {
+ dst[i] = ref[i]
+ }
+ head := other[0].Copy()
+ head.Value = String(head.Value.(Var))
+ offset := len(ref)
+ dst[offset] = head
+ for i := range other[1:] {
+ dst[offset+i+1] = other[i+1]
+ }
+ return dst
+}
+
+// Concat returns a ref with the terms appended.
+func (ref Ref) Concat(terms []*Term) Ref {
+ if len(terms) == 0 {
+ return ref
+ }
+ cpy := make(Ref, len(ref)+len(terms))
+ for i := range ref {
+ cpy[i] = ref[i]
+ }
+ for i := range terms {
+ cpy[len(ref)+i] = terms[i]
+ }
+ return cpy
+}
+
+// Dynamic returns the offset of the first non-constant operand of ref.
+func (ref Ref) Dynamic() int {
+ switch ref[0].Value.(type) {
+ case Call:
+ return 0
+ }
+ for i := 1; i < len(ref); i++ {
+ if !IsConstant(ref[i].Value) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Copy returns a deep copy of ref.
+func (ref Ref) Copy() Ref {
+ return termSliceCopy(ref)
+}
+
+// Equal returns true if ref is equal to other.
+func (ref Ref) Equal(other Value) bool {
+ return Compare(ref, other) == 0
+}
+
+// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (ref Ref) Compare(other Value) int {
+ return Compare(ref, other)
+}
+
+// Find returns the current value or a not found error.
+func (ref Ref) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return ref, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (ref Ref) Hash() int {
+ return termSliceHash(ref)
+}
+
+// HasPrefix returns true if the other ref is a prefix of this ref.
+func (ref Ref) HasPrefix(other Ref) bool {
+ if len(other) > len(ref) {
+ return false
+ }
+ for i := range other {
+ if !ref[i].Equal(other[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// ConstantPrefix returns the constant portion of the ref starting from the head.
+func (ref Ref) ConstantPrefix() Ref {
+ ref = ref.Copy()
+
+ i := ref.Dynamic()
+ if i < 0 {
+ return ref
+ }
+ return ref[:i]
+}
+
+// GroundPrefix returns the ground portion of the ref starting from the head. By
+// definition, the head of the reference is always ground.
+func (ref Ref) GroundPrefix() Ref {
+ prefix := make(Ref, 0, len(ref))
+
+ for i, x := range ref {
+ if i > 0 && !x.IsGround() {
+ break
+ }
+ prefix = append(prefix, x)
+ }
+
+ return prefix
+}
+
+// IsGround returns true if all of the parts of the Ref are ground.
+func (ref Ref) IsGround() bool {
+ if len(ref) == 0 {
+ return true
+ }
+ return termSliceIsGround(ref[1:])
+}
+
+// IsNested returns true if this ref contains other Refs.
+func (ref Ref) IsNested() bool {
+ for _, x := range ref {
+ if _, ok := x.Value.(Ref); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Ptr returns a slash-separated path string for this ref. If the ref
+// contains non-string terms this function returns an error. Path
+// components are escaped.
+func (ref Ref) Ptr() (string, error) {
+ parts := make([]string, 0, len(ref)-1)
+ for _, term := range ref[1:] {
+ if str, ok := term.Value.(String); ok {
+ parts = append(parts, url.PathEscape(string(str)))
+ } else {
+ return "", fmt.Errorf("invalid path value type")
+ }
+ }
+ return strings.Join(parts, "/"), nil
+}
+
+var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
+
+func (ref Ref) String() string {
+ if len(ref) == 0 {
+ return ""
+ }
+ buf := []string{ref[0].Value.String()}
+ path := ref[1:]
+ for _, p := range path {
+ switch p := p.Value.(type) {
+ case String:
+ str := string(p)
+ if varRegexp.MatchString(str) && len(buf) > 0 && !IsKeyword(str) {
+ buf = append(buf, "."+str)
+ } else {
+ buf = append(buf, "["+p.String()+"]")
+ }
+ default:
+ buf = append(buf, "["+p.String()+"]")
+ }
+ }
+ return strings.Join(buf, "")
+}
+
+// OutputVars returns a VarSet containing variables that would be bound by evaluating
+// this expression in isolation.
+func (ref Ref) OutputVars() VarSet {
+ vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true})
+ vis.Walk(ref)
+ return vis.Vars()
+}
+
+// QueryIterator defines the interface for querying AST documents with references.
+type QueryIterator func(map[Var]Value, Value) error
+
+// ArrayTerm creates a new Term with an Array value.
+func ArrayTerm(a ...*Term) *Term {
+ return &Term{Value: &Array{a, 0}}
+}
+
+// NewArray creates an Array with the terms provided. The array will
+// use the provided term slice.
+func NewArray(a ...*Term) *Array {
+ return &Array{a, 0}
+}
+
+// Array represents an array as defined by the language. Arrays are similar to the
+// same types as defined by JSON with the exception that they can contain Vars
+// and References.
+type Array struct {
+ elems []*Term
+ hash int
+}
+
+// Copy returns a deep copy of arr.
+func (arr *Array) Copy() *Array {
+ return &Array{termSliceCopy(arr.elems), arr.hash}
+}
+
+// Equal returns true if arr is equal to other.
+func (arr *Array) Equal(other Value) bool {
+ return Compare(arr, other) == 0
+}
+
+// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (arr *Array) Compare(other Value) int {
+ return Compare(arr, other)
+}
+
+// Find returns the value at the index or an out-of-range error.
+func (arr *Array) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return arr, nil
+ }
+ num, ok := path[0].Value.(Number)
+ if !ok {
+ return nil, errFindNotFound
+ }
+ i, ok := num.Int()
+ if !ok {
+ return nil, errFindNotFound
+ }
+ if i < 0 || i >= arr.Len() {
+ return nil, errFindNotFound
+ }
+ return arr.Elem(i).Value.Find(path[1:])
+}
+
+// Get returns the element at pos or nil if not possible.
+func (arr *Array) Get(pos *Term) *Term {
+ num, ok := pos.Value.(Number)
+ if !ok {
+ return nil
+ }
+
+ i, ok := num.Int()
+ if !ok {
+ return nil
+ }
+
+ if i >= 0 && i < len(arr.elems) {
+ return arr.elems[i]
+ }
+
+ return nil
+}
+
+// Sorted returns a new Array that contains the sorted elements of arr.
+func (arr *Array) Sorted() *Array {
+ cpy := make([]*Term, len(arr.elems))
+ for i := range cpy {
+ cpy[i] = arr.elems[i]
+ }
+ sort.Sort(termSlice(cpy))
+ a := NewArray(cpy...)
+ a.hash = arr.hash
+ return a
+}
+
+// Hash returns the hash code for the Value.
+func (arr *Array) Hash() int {
+ if arr.hash == 0 {
+ arr.hash = termSliceHash(arr.elems)
+ }
+
+ return arr.hash
+}
+
+// IsGround returns true if all of the Array elements are ground.
+func (arr *Array) IsGround() bool {
+ return termSliceIsGround(arr.elems)
+}
+
+// MarshalJSON returns JSON encoded bytes representing arr.
+func (arr *Array) MarshalJSON() ([]byte, error) {
+ if len(arr.elems) == 0 {
+ return json.Marshal([]interface{}{})
+ }
+ return json.Marshal(arr.elems)
+}
+
+func (arr *Array) String() string {
+ var buf []string
+ for _, e := range arr.elems {
+ buf = append(buf, e.String())
+ }
+ return "[" + strings.Join(buf, ", ") + "]"
+}
+
+// Len returns the number of elements in the array.
+func (arr *Array) Len() int {
+ return len(arr.elems)
+}
+
+// Elem returns the element i of arr.
+func (arr *Array) Elem(i int) *Term {
+ return arr.elems[i]
+}
+
+// set sets the element i of arr.
+func (arr *Array) set(i int, v *Term) {
+ arr.elems[i] = v
+ arr.hash = 0
+}
+
+// Slice returns a slice of arr starting from i index to j. -1
+// indicates the end of the array. The returned value array is not a
+// copy and any modifications to either of arrays may be reflected to
+// the other.
+func (arr *Array) Slice(i, j int) *Array {
+ if j == -1 {
+ return &Array{elems: arr.elems[i:]}
+ }
+
+ return &Array{elems: arr.elems[i:j]}
+}
+
+// Iter calls f on each element in arr. If f returns an error,
+// iteration stops and the return value is the error.
+func (arr *Array) Iter(f func(*Term) error) error {
+ for i := range arr.elems {
+ if err := f(arr.elems[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Until calls f on each element in arr. If f returns true, iteration stops.
+func (arr *Array) Until(f func(*Term) bool) bool {
+ err := arr.Iter(func(t *Term) error {
+ if f(t) {
+ return errStop
+ }
+ return nil
+ })
+ return err != nil
+}
+
+// Foreach calls f on each element in arr.
+func (arr *Array) Foreach(f func(*Term)) {
+ arr.Iter(func(t *Term) error {
+ f(t)
+ return nil
+ })
+}
+
+// Append appends a term to arr, returning the appended array.
+func (arr *Array) Append(v *Term) *Array {
+ cpy := *arr
+ cpy.elems = append(arr.elems, v)
+ cpy.hash = 0
+ return &cpy
+}
+
+// Set represents a set as defined by the language.
+type Set interface {
+ Value
+ Len() int
+ Copy() Set
+ Diff(Set) Set
+ Intersect(Set) Set
+ Union(Set) Set
+ Add(*Term)
+ Iter(func(*Term) error) error
+ Until(func(*Term) bool) bool
+ Foreach(func(*Term))
+ Contains(*Term) bool
+ Map(func(*Term) (*Term, error)) (Set, error)
+ Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error)
+ Sorted() *Array
+ Slice() []*Term
+}
+
+// NewSet returns a new Set containing t.
+func NewSet(t ...*Term) Set {
+ s := newset(len(t))
+ for i := range t {
+ s.Add(t[i])
+ }
+ return s
+}
+
+func newset(n int) *set {
+ var keys []*Term
+ if n > 0 {
+ keys = make([]*Term, 0, n)
+ }
+ return &set{
+ elems: make(map[int]*Term, n),
+ keys: keys,
+ hash: 0,
+ ground: true,
+ }
+}
+
+// SetTerm returns a new Term representing a set containing terms t.
+func SetTerm(t ...*Term) *Term {
+ set := NewSet(t...)
+ return &Term{
+ Value: set,
+ }
+}
+
+type set struct {
+ elems map[int]*Term
+ keys []*Term
+ hash int
+ ground bool
+}
+
+// Copy returns a deep copy of s.
+func (s *set) Copy() Set {
+ cpy := newset(s.Len())
+ s.Foreach(func(x *Term) {
+ cpy.Add(x.Copy())
+ })
+ cpy.hash = s.hash
+ cpy.ground = s.ground
+ return cpy
+}
+
+// IsGround returns true if all terms in s are ground.
+func (s *set) IsGround() bool {
+ return s.ground
+}
+
+// Hash returns a hash code for s.
+func (s *set) Hash() int {
+ if s.hash == 0 {
+ s.Foreach(func(x *Term) {
+ s.hash += x.Hash()
+ })
+ }
+ return s.hash
+}
+
+func (s *set) String() string {
+ if s.Len() == 0 {
+ return "set()"
+ }
+ buf := []string{}
+ sorted := s.Sorted()
+ sorted.Foreach(func(x *Term) {
+ buf = append(buf, fmt.Sprint(x))
+ })
+ return "{" + strings.Join(buf, ", ") + "}"
+}
+
+// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (s *set) Compare(other Value) int {
+ o1 := sortOrder(s)
+ o2 := sortOrder(other)
+ if o1 < o2 {
+ return -1
+ } else if o1 > o2 {
+ return 1
+ }
+ t := other.(*set)
+ sort.Sort(termSlice(s.keys))
+ sort.Sort(termSlice(t.keys))
+ return termSliceCompare(s.keys, t.keys)
+}
+
+// Find returns the set or dereferences the element itself.
+func (s *set) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return s, nil
+ }
+ if !s.Contains(path[0]) {
+ return nil, errFindNotFound
+ }
+ return path[0].Value.Find(path[1:])
+}
+
+// Diff returns elements in s that are not in other.
+func (s *set) Diff(other Set) Set {
+ r := NewSet()
+ s.Foreach(func(x *Term) {
+ if !other.Contains(x) {
+ r.Add(x)
+ }
+ })
+ return r
+}
+
+// Intersect returns the set containing elements in both s and other.
+func (s *set) Intersect(other Set) Set {
+ o := other.(*set)
+ n, m := s.Len(), o.Len()
+ ss := s
+ so := o
+ if m < n {
+ ss = o
+ so = s
+ n = m
+ }
+
+ r := newset(n)
+ ss.Foreach(func(x *Term) {
+ if so.Contains(x) {
+ r.Add(x)
+ }
+ })
+ return r
+}
+
+// Union returns the set containing all elements of s and other.
+func (s *set) Union(other Set) Set {
+ r := NewSet()
+ s.Foreach(func(x *Term) {
+ r.Add(x)
+ })
+ other.Foreach(func(x *Term) {
+ r.Add(x)
+ })
+ return r
+}
+
+// Add updates s to include t.
+func (s *set) Add(t *Term) {
+ s.insert(t)
+}
+
+// Iter calls f on each element in s. If f returns an error, iteration stops
+// and the return value is the error.
+func (s *set) Iter(f func(*Term) error) error {
+ for i := range s.keys {
+ if err := f(s.keys[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var errStop = errors.New("stop")
+
+// Until calls f on each element in s. If f returns true, iteration stops.
+func (s *set) Until(f func(*Term) bool) bool {
+ err := s.Iter(func(t *Term) error {
+ if f(t) {
+ return errStop
+ }
+ return nil
+ })
+ return err != nil
+}
+
+// Foreach calls f on each element in s.
+func (s *set) Foreach(f func(*Term)) {
+ s.Iter(func(t *Term) error {
+ f(t)
+ return nil
+ })
+}
+
+// Map returns a new Set obtained by applying f to each value in s.
+func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) {
+ set := NewSet()
+ err := s.Iter(func(x *Term) error {
+ term, err := f(x)
+ if err != nil {
+ return err
+ }
+ set.Add(term)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return set, nil
+}
+
+// Reduce returns a Term produced by applying f to each value in s. The first
+// argument to f is the reduced value (starting with i) and the second argument
+// to f is the element in s.
+func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) {
+ err := s.Iter(func(x *Term) error {
+ var err error
+ i, err = f(i, x)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ return i, err
+}
+
+// Contains returns true if t is in s.
+func (s *set) Contains(t *Term) bool {
+ return s.get(t) != nil
+}
+
+// Len returns the number of elements in the set.
+func (s *set) Len() int {
+ return len(s.keys)
+}
+
+// MarshalJSON returns JSON encoded bytes representing s.
+func (s *set) MarshalJSON() ([]byte, error) {
+ if s.keys == nil {
+ return json.Marshal([]interface{}{})
+ }
+ return json.Marshal(s.keys)
+}
+
+// Sorted returns an Array that contains the sorted elements of s.
+func (s *set) Sorted() *Array {
+ cpy := make([]*Term, len(s.keys))
+ for i := range s.keys {
+ cpy[i] = s.keys[i]
+ }
+ sort.Sort(termSlice(cpy))
+ return NewArray(cpy...)
+}
+
+// Slice returns a slice of terms contained in the set.
+func (s *set) Slice() []*Term {
+ return s.keys
+}
+
+func (s *set) insert(x *Term) {
+ hash := x.Hash()
+ // This `equal` utility is duplicated and manually inlined a number of
+ // time in this file. Inlining it avoids heap allocations, so it makes
+ // a big performance difference: some operations like lookup become twice
+ // as slow without it.
+ var equal func(v Value) bool
+
+ switch x := x.Value.(type) {
+ case Null, Boolean, String, Var:
+ equal = func(y Value) bool { return x == y }
+ case Number:
+ if xi, err := json.Number(x).Int64(); err == nil {
+ equal = func(y Value) bool {
+ if y, ok := y.(Number); ok {
+ if yi, err := json.Number(y).Int64(); err == nil {
+ return xi == yi
+ }
+ }
+
+ return false
+ }
+ break
+ }
+
+ a, ok := new(big.Float).SetString(string(x))
+ if !ok {
+ panic("illegal value")
+ }
+
+ equal = func(b Value) bool {
+ if b, ok := b.(Number); ok {
+ b, ok := new(big.Float).SetString(string(b))
+ if !ok {
+ panic("illegal value")
+ }
+
+ return a.Cmp(b) == 0
+ }
+
+ return false
+ }
+ default:
+ equal = func(y Value) bool { return Compare(x, y) == 0 }
+ }
+
+ for curr, ok := s.elems[hash]; ok; {
+ if equal(curr.Value) {
+ return
+ }
+
+ hash++
+ curr, ok = s.elems[hash]
+ }
+
+ s.elems[hash] = x
+ s.keys = append(s.keys, x)
+ s.hash = 0
+ s.ground = s.ground && x.IsGround()
+}
+
+func (s *set) get(x *Term) *Term {
+ hash := x.Hash()
+ // This `equal` utility is duplicated and manually inlined a number of
+ // time in this file. Inlining it avoids heap allocations, so it makes
+ // a big performance difference: some operations like lookup become twice
+ // as slow without it.
+ var equal func(v Value) bool
+
+ switch x := x.Value.(type) {
+ case Null, Boolean, String, Var:
+ equal = func(y Value) bool { return x == y }
+ case Number:
+ if xi, err := json.Number(x).Int64(); err == nil {
+ equal = func(y Value) bool {
+ if y, ok := y.(Number); ok {
+ if yi, err := json.Number(y).Int64(); err == nil {
+ return xi == yi
+ }
+ }
+
+ return false
+ }
+ break
+ }
+
+ a, ok := new(big.Float).SetString(string(x))
+ if !ok {
+ panic("illegal value")
+ }
+
+ equal = func(b Value) bool {
+ if b, ok := b.(Number); ok {
+ b, ok := new(big.Float).SetString(string(b))
+ if !ok {
+ panic("illegal value")
+ }
+
+ return a.Cmp(b) == 0
+ }
+
+ return false
+ }
+ default:
+ equal = func(y Value) bool { return Compare(x, y) == 0 }
+ }
+
+ for curr, ok := s.elems[hash]; ok; {
+ if equal(curr.Value) {
+ return curr
+ }
+
+ hash++
+ curr, ok = s.elems[hash]
+ }
+ return nil
+}
+
+// Object represents an object as defined by the language.
+type Object interface {
+ Value
+ Len() int
+ Get(*Term) *Term
+ Copy() Object
+ Insert(*Term, *Term)
+ Iter(func(*Term, *Term) error) error
+ Until(func(*Term, *Term) bool) bool
+ Foreach(func(*Term, *Term))
+ Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error)
+ Diff(other Object) Object
+ Intersect(other Object) [][3]*Term
+ Merge(other Object) (Object, bool)
+ MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool)
+ Filter(filter Object) (Object, error)
+ Keys() []*Term
+ Elem(i int) (*Term, *Term)
+ get(k *Term) *objectElem // To prevent external implementations
+}
+
+// NewObject creates a new Object with t.
+func NewObject(t ...[2]*Term) Object {
+ obj := newobject(len(t))
+ for i := range t {
+ obj.Insert(t[i][0], t[i][1])
+ }
+ return obj
+}
+
+// ObjectTerm creates a new Term with an Object value.
+func ObjectTerm(o ...[2]*Term) *Term {
+ return &Term{Value: NewObject(o...)}
+}
+
+type object struct {
+ elems map[int]*objectElem
+ keys objectElemSlice
+ ground int // number of key and value grounds. Counting is
+ // required to support insert's key-value replace.
+ hash int
+}
+
+func newobject(n int) *object {
+ var keys objectElemSlice
+ if n > 0 {
+ keys = make(objectElemSlice, 0, n)
+ }
+ return &object{
+ elems: make(map[int]*objectElem, n),
+ keys: keys,
+ ground: 0,
+ hash: 0,
+ }
+}
+
+type objectElem struct {
+ key *Term
+ value *Term
+ next *objectElem
+}
+
+type objectElemSlice []*objectElem
+
+func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 }
+func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
+func (s objectElemSlice) Len() int { return len(s) }
+
+// Item is a helper for constructing an tuple containing two Terms
+// representing a key/value pair in an Object.
+func Item(key, value *Term) [2]*Term {
+ return [2]*Term{key, value}
+}
+
+// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (obj *object) Compare(other Value) int {
+ o1 := sortOrder(obj)
+ o2 := sortOrder(other)
+ if o1 < o2 {
+ return -1
+ } else if o2 < o1 {
+ return 1
+ }
+ a := obj
+ b := other.(*object)
+ // TODO: Ideally Compare would be immutable; the following sorts happen in place.
+ sort.Sort(a.keys)
+ sort.Sort(b.keys)
+ minLen := len(a.keys)
+ if len(b.keys) < len(a.keys) {
+ minLen = len(b.keys)
+ }
+ for i := 0; i < minLen; i++ {
+ keysCmp := Compare(a.keys[i].key, b.keys[i].key)
+ if keysCmp < 0 {
+ return -1
+ }
+ if keysCmp > 0 {
+ return 1
+ }
+ valA := a.keys[i].value
+ valB := b.keys[i].value
+ valCmp := Compare(valA, valB)
+ if valCmp != 0 {
+ return valCmp
+ }
+ }
+ if len(a.keys) < len(b.keys) {
+ return -1
+ }
+ if len(b.keys) < len(a.keys) {
+ return 1
+ }
+ return 0
+}
+
+// Find returns the value at the key or undefined.
+func (obj *object) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return obj, nil
+ }
+ value := obj.Get(path[0])
+ if value == nil {
+ return nil, errFindNotFound
+ }
+ return value.Value.Find(path[1:])
+}
+
+func (obj *object) Insert(k, v *Term) {
+ obj.insert(k, v)
+}
+
+// Get returns the value of k in obj if k exists, otherwise nil.
+func (obj *object) Get(k *Term) *Term {
+ if elem := obj.get(k); elem != nil {
+ return elem.value
+ }
+ return nil
+}
+
+// Hash returns the hash code for the Value.
+func (obj *object) Hash() int {
+ if obj.hash == 0 {
+ for h, curr := range obj.elems {
+ for ; curr != nil; curr = curr.next {
+ obj.hash += h
+ obj.hash += curr.value.Hash()
+ }
+ }
+ }
+ return obj.hash
+}
+
+// IsGround returns true if all of the Object key/value pairs are ground.
+func (obj *object) IsGround() bool {
+ return obj.ground == 2*len(obj.keys)
+}
+
+// Copy returns a deep copy of obj.
+func (obj *object) Copy() Object {
+ cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) {
+ return k.Copy(), v.Copy(), nil
+ })
+ cpy.(*object).hash = obj.hash
+ return cpy
+}
+
+// Diff returns a new Object that contains only the key/value pairs that exist in obj.
+func (obj *object) Diff(other Object) Object {
+ r := NewObject()
+ obj.Foreach(func(k, v *Term) {
+ if other.Get(k) == nil {
+ r.Insert(k, v)
+ }
+ })
+ return r
+}
+
+// Intersect returns a slice of term triplets that represent the intersection of keys
+// between obj and other. For each intersecting key, the values from obj and other are included
+// as the last two terms in the triplet (respectively).
+func (obj *object) Intersect(other Object) [][3]*Term {
+ r := [][3]*Term{}
+ obj.Foreach(func(k, v *Term) {
+ if v2 := other.Get(k); v2 != nil {
+ r = append(r, [3]*Term{k, v, v2})
+ }
+ })
+ return r
+}
+
+// Iter calls the function f for each key-value pair in the object. If f
+// returns an error, iteration stops and the error is returned.
+func (obj *object) Iter(f func(*Term, *Term) error) error {
+ for _, node := range obj.keys {
+ if err := f(node.key, node.value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Until calls f for each key-value pair in the object. If f returns
+// true, iteration stops and Until returns true. Otherwise, return
+// false.
+func (obj *object) Until(f func(*Term, *Term) bool) bool {
+ err := obj.Iter(func(k, v *Term) error {
+ if f(k, v) {
+ return errStop
+ }
+ return nil
+ })
+ return err != nil
+}
+
+// Foreach calls f for each key-value pair in the object.
+func (obj *object) Foreach(f func(*Term, *Term)) {
+ obj.Iter(func(k, v *Term) error {
+ f(k, v)
+ return nil
+ })
+}
+
+// Map returns a new Object constructed by mapping each element in the object
+// using the function f.
+func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) {
+ cpy := newobject(obj.Len())
+ err := obj.Iter(func(k, v *Term) error {
+ var err error
+ k, v, err = f(k, v)
+ if err != nil {
+ return err
+ }
+ cpy.insert(k, v)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return cpy, nil
+}
+
+// Keys returns the keys of obj.
+func (obj *object) Keys() []*Term {
+ keys := make([]*Term, len(obj.keys))
+
+ for i, elem := range obj.keys {
+ keys[i] = elem.key
+ }
+
+ return keys
+}
+
+func (obj *object) Elem(i int) (*Term, *Term) {
+ return obj.keys[i].key, obj.keys[i].value
+}
+
+// MarshalJSON returns JSON encoded bytes representing obj.
+func (obj *object) MarshalJSON() ([]byte, error) {
+ sl := make([][2]*Term, obj.Len())
+ for i, node := range obj.keys {
+ sl[i] = Item(node.key, node.value)
+ }
+ return json.Marshal(sl)
+}
+
+// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are
+// overlapping keys between obj and other, the values of associated with the keys are merged. Only
+// objects can be merged with other objects. If the values cannot be merged, the second turn value
+// will be false.
+func (obj object) Merge(other Object) (Object, bool) {
+ return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) {
+ obj1, ok1 := v1.Value.(Object)
+ obj2, ok2 := v2.Value.(Object)
+ if !ok1 || !ok2 {
+ return nil, true
+ }
+ obj3, ok := obj1.Merge(obj2)
+ if !ok {
+ return nil, true
+ }
+ return NewTerm(obj3), false
+ })
+}
+
+// MergeWith returns a new Object containing the merged keys of obj and other.
+// If there are overlapping keys between obj and other, the conflictResolver
+// is called. The conflictResolver can return a merged value and a boolean
+// indicating if the merge has failed and should stop.
+func (obj object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) {
+ result := NewObject()
+ stop := obj.Until(func(k, v *Term) bool {
+ v2 := other.Get(k)
+ // The key didn't exist in other, keep the original value
+ if v2 == nil {
+ result.Insert(k, v)
+ return false
+ }
+
+ // The key exists in both, resolve the conflict if possible
+ merged, stop := conflictResolver(v, v2)
+ if !stop {
+ result.Insert(k, merged)
+ }
+ return stop
+ })
+
+ if stop {
+ return nil, false
+ }
+
+ // Copy in any values from other for keys that don't exist in obj
+ other.Foreach(func(k, v *Term) {
+ if v2 := obj.Get(k); v2 == nil {
+ result.Insert(k, v)
+ }
+ })
+ return result, true
+}
+
+// Filter returns a new object from values in obj where the keys are
+// found in filter. Array indices for values can be specified as
+// number strings.
+func (obj *object) Filter(filter Object) (Object, error) {
+ filtered, err := filterObject(obj, filter)
+ if err != nil {
+ return nil, err
+ }
+ return filtered.(Object), nil
+}
+
+// Len returns the number of elements in the object.
+func (obj object) Len() int {
+ return len(obj.keys)
+}
+
+func (obj object) String() string {
+ var buf []string
+ sorted := objectElemSliceSorted(obj.keys)
+ for _, elem := range sorted {
+ buf = append(buf, fmt.Sprintf("%s: %s", elem.key, elem.value))
+ }
+ return "{" + strings.Join(buf, ", ") + "}"
+}
+
+func (obj *object) get(k *Term) *objectElem {
+ hash := k.Hash()
+
+ // This `equal` utility is duplicated and manually inlined a number of
+ // time in this file. Inlining it avoids heap allocations, so it makes
+ // a big performance difference: some operations like lookup become twice
+ // as slow without it.
+ var equal func(v Value) bool
+
+ switch x := k.Value.(type) {
+ case Null, Boolean, String, Var:
+ equal = func(y Value) bool { return x == y }
+ case Number:
+ if xi, err := json.Number(x).Int64(); err == nil {
+ equal = func(y Value) bool {
+ if y, ok := y.(Number); ok {
+ if yi, err := json.Number(y).Int64(); err == nil {
+ return xi == yi
+ }
+ }
+
+ return false
+ }
+ break
+ }
+
+ a, ok := new(big.Float).SetString(string(x))
+ if !ok {
+ panic("illegal value")
+ }
+
+ equal = func(b Value) bool {
+ if b, ok := b.(Number); ok {
+ b, ok := new(big.Float).SetString(string(b))
+ if !ok {
+ panic("illegal value")
+ }
+
+ return a.Cmp(b) == 0
+ }
+
+ return false
+ }
+ default:
+ equal = func(y Value) bool { return Compare(x, y) == 0 }
+ }
+
+ for curr := obj.elems[hash]; curr != nil; curr = curr.next {
+ if equal(curr.key.Value) {
+ return curr
+ }
+ }
+ return nil
+}
+
+func (obj *object) insert(k, v *Term) {
+ hash := k.Hash()
+ head := obj.elems[hash]
+ // This `equal` utility is duplicated and manually inlined a number of
+ // time in this file. Inlining it avoids heap allocations, so it makes
+ // a big performance difference: some operations like lookup become twice
+ // as slow without it.
+ var equal func(v Value) bool
+
+ switch x := k.Value.(type) {
+ case Null, Boolean, String, Var:
+ equal = func(y Value) bool { return x == y }
+ case Number:
+ if xi, err := json.Number(x).Int64(); err == nil {
+ equal = func(y Value) bool {
+ if y, ok := y.(Number); ok {
+ if yi, err := json.Number(y).Int64(); err == nil {
+ return xi == yi
+ }
+ }
+
+ return false
+ }
+ break
+ }
+
+ a, ok := new(big.Float).SetString(string(x))
+ if !ok {
+ panic("illegal value")
+ }
+
+ equal = func(b Value) bool {
+ if b, ok := b.(Number); ok {
+ b, ok := new(big.Float).SetString(string(b))
+ if !ok {
+ panic("illegal value")
+ }
+
+ return a.Cmp(b) == 0
+ }
+
+ return false
+ }
+ default:
+ equal = func(y Value) bool { return Compare(x, y) == 0 }
+ }
+
+ for curr := head; curr != nil; curr = curr.next {
+ if equal(curr.key.Value) {
+ // The ground bit of the value may change in
+ // replace, hence adjust the counter per old
+ // and new value.
+
+ if curr.value.IsGround() {
+ obj.ground--
+ }
+ if v.IsGround() {
+ obj.ground++
+ }
+
+ curr.value = v
+ obj.hash = 0
+ return
+ }
+ }
+ elem := &objectElem{
+ key: k,
+ value: v,
+ next: head,
+ }
+ obj.elems[hash] = elem
+ obj.keys = append(obj.keys, elem)
+ obj.hash = 0
+
+ if k.IsGround() {
+ obj.ground++
+ }
+ if v.IsGround() {
+ obj.ground++
+ }
+}
+
+func filterObject(o Value, filter Value) (Value, error) {
+ if filter.Compare(Null{}) == 0 {
+ return o, nil
+ }
+
+ filteredObj, ok := filter.(*object)
+ if !ok {
+ return nil, fmt.Errorf("invalid filter value %q, expected an object", filter)
+ }
+
+ switch v := o.(type) {
+ case String, Number, Boolean, Null:
+ return o, nil
+ case *Array:
+ values := NewArray()
+ for i := 0; i < v.Len(); i++ {
+ subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i)))
+ if subFilter != nil {
+ filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value)
+ if err != nil {
+ return nil, err
+ }
+ values = values.Append(NewTerm(filteredValue))
+ }
+ }
+ return values, nil
+ case Set:
+ values := NewSet()
+ err := v.Iter(func(t *Term) error {
+ if filteredObj.Get(t) != nil {
+ filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value)
+ if err != nil {
+ return err
+ }
+ values.Add(NewTerm(filteredValue))
+ }
+ return nil
+ })
+ return values, err
+ case *object:
+ values := NewObject()
+
+ iterObj := v
+ other := filteredObj
+ if v.Len() < filteredObj.Len() {
+ iterObj = filteredObj
+ other = v
+ }
+
+ err := iterObj.Iter(func(key *Term, value *Term) error {
+ if other.Get(key) != nil {
+ filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value)
+ if err != nil {
+ return err
+ }
+ values.Insert(key, NewTerm(filteredValue))
+ }
+ return nil
+ })
+ return values, err
+ default:
+ return nil, fmt.Errorf("invalid object value type %q", v)
+ }
+}
+
+// ArrayComprehension represents an array comprehension as defined in the language.
+type ArrayComprehension struct {
+ Term *Term `json:"term"`
+ Body Body `json:"body"`
+}
+
+// ArrayComprehensionTerm creates a new Term with an ArrayComprehension value.
+func ArrayComprehensionTerm(term *Term, body Body) *Term {
+ return &Term{
+ Value: &ArrayComprehension{
+ Term: term,
+ Body: body,
+ },
+ }
+}
+
+// Copy returns a deep copy of ac.
+func (ac *ArrayComprehension) Copy() *ArrayComprehension {
+ cpy := *ac
+ cpy.Body = ac.Body.Copy()
+ cpy.Term = ac.Term.Copy()
+ return &cpy
+}
+
+// Equal returns true if ac is equal to other.
+func (ac *ArrayComprehension) Equal(other Value) bool {
+ return Compare(ac, other) == 0
+}
+
+// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (ac *ArrayComprehension) Compare(other Value) int {
+ return Compare(ac, other)
+}
+
+// Find returns the current value or a not found error.
+func (ac *ArrayComprehension) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return ac, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code of the Value.
+func (ac *ArrayComprehension) Hash() int {
+ return ac.Term.Hash() + ac.Body.Hash()
+}
+
+// IsGround returns true if the Term and Body are ground.
+func (ac *ArrayComprehension) IsGround() bool {
+ return ac.Term.IsGround() && ac.Body.IsGround()
+}
+
+func (ac *ArrayComprehension) String() string {
+ return "[" + ac.Term.String() + " | " + ac.Body.String() + "]"
+}
+
+// ObjectComprehension represents an object comprehension as defined in the language.
+type ObjectComprehension struct {
+ Key *Term `json:"key"`
+ Value *Term `json:"value"`
+ Body Body `json:"body"`
+}
+
+// ObjectComprehensionTerm creates a new Term with an ObjectComprehension value.
+func ObjectComprehensionTerm(key, value *Term, body Body) *Term {
+ return &Term{
+ Value: &ObjectComprehension{
+ Key: key,
+ Value: value,
+ Body: body,
+ },
+ }
+}
+
+// Copy returns a deep copy of oc.
+func (oc *ObjectComprehension) Copy() *ObjectComprehension {
+ cpy := *oc
+ cpy.Body = oc.Body.Copy()
+ cpy.Key = oc.Key.Copy()
+ cpy.Value = oc.Value.Copy()
+ return &cpy
+}
+
+// Equal returns true if oc is equal to other.
+func (oc *ObjectComprehension) Equal(other Value) bool {
+ return Compare(oc, other) == 0
+}
+
+// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (oc *ObjectComprehension) Compare(other Value) int {
+ return Compare(oc, other)
+}
+
+// Find returns the current value or a not found error.
+func (oc *ObjectComprehension) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return oc, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code of the Value.
+func (oc *ObjectComprehension) Hash() int {
+ return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash()
+}
+
+// IsGround returns true if the Key, Value and Body are ground.
+func (oc *ObjectComprehension) IsGround() bool {
+ return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround()
+}
+
+func (oc *ObjectComprehension) String() string {
+ return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}"
+}
+
+// SetComprehension represents a set comprehension as defined in the language.
+type SetComprehension struct {
+ Term *Term `json:"term"`
+ Body Body `json:"body"`
+}
+
+// SetComprehensionTerm creates a new Term with an SetComprehension value.
+func SetComprehensionTerm(term *Term, body Body) *Term {
+ return &Term{
+ Value: &SetComprehension{
+ Term: term,
+ Body: body,
+ },
+ }
+}
+
+// Copy returns a deep copy of sc.
+func (sc *SetComprehension) Copy() *SetComprehension {
+ cpy := *sc
+ cpy.Body = sc.Body.Copy()
+ cpy.Term = sc.Term.Copy()
+ return &cpy
+}
+
+// Equal returns true if sc is equal to other.
+func (sc *SetComprehension) Equal(other Value) bool {
+ return Compare(sc, other) == 0
+}
+
+// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (sc *SetComprehension) Compare(other Value) int {
+ return Compare(sc, other)
+}
+
+// Find returns the current value or a not found error.
+func (sc *SetComprehension) Find(path Ref) (Value, error) {
+ if len(path) == 0 {
+ return sc, nil
+ }
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code of the Value.
+func (sc *SetComprehension) Hash() int {
+ return sc.Term.Hash() + sc.Body.Hash()
+}
+
+// IsGround returns true if the Term and Body are ground.
+func (sc *SetComprehension) IsGround() bool {
+ return sc.Term.IsGround() && sc.Body.IsGround()
+}
+
+func (sc *SetComprehension) String() string {
+ return "{" + sc.Term.String() + " | " + sc.Body.String() + "}"
+}
+
+// Call represents as function call in the language.
+type Call []*Term
+
+// CallTerm returns a new Term with a Call value defined by terms. The first
+// term is the operator and the rest are operands.
+func CallTerm(terms ...*Term) *Term {
+ return NewTerm(Call(terms))
+}
+
+// Copy returns a deep copy of c.
+func (c Call) Copy() Call {
+ return termSliceCopy(c)
+}
+
+// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to,
+// or greater than other.
+func (c Call) Compare(other Value) int {
+ return Compare(c, other)
+}
+
+// Find returns the current value or a not found error.
+func (c Call) Find(Ref) (Value, error) {
+ return nil, errFindNotFound
+}
+
+// Hash returns the hash code for the Value.
+func (c Call) Hash() int {
+ return termSliceHash(c)
+}
+
+// IsGround returns true if the Value is ground.
+func (c Call) IsGround() bool {
+ return termSliceIsGround(c)
+}
+
+// MakeExpr returns an ew Expr from this call.
+func (c Call) MakeExpr(output *Term) *Expr {
+ terms := []*Term(c)
+ return NewExpr(append(terms, output))
+}
+
+func (c Call) String() string {
+ args := make([]string, len(c)-1)
+ for i := 1; i < len(c); i++ {
+ args[i-1] = c[i].String()
+ }
+ return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", "))
+}
+
+func objectElemSliceSorted(a objectElemSlice) objectElemSlice {
+ b := make(objectElemSlice, len(a))
+ for i := range b {
+ b[i] = a[i]
+ }
+ sort.Sort(b)
+ return b
+}
+
+func termSliceCopy(a []*Term) []*Term {
+ cpy := make([]*Term, len(a))
+ for i := range a {
+ cpy[i] = a[i].Copy()
+ }
+ return cpy
+}
+
+func termSliceEqual(a, b []*Term) bool {
+ if len(a) == len(b) {
+ for i := range a {
+ if !a[i].Equal(b[i]) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func termSliceHash(a []*Term) int {
+ var hash int
+ for _, v := range a {
+ hash += v.Value.Hash()
+ }
+ return hash
+}
+
+func termSliceIsGround(a []*Term) bool {
+ for _, v := range a {
+ if !v.IsGround() {
+ return false
+ }
+ }
+ return true
+}
+
+// NOTE(tsandall): The unmarshalling errors in these functions are not
+// helpful for callers because they do not identify the source of the
+// unmarshalling error. Because OPA doesn't accept JSON describing ASTs
+// from callers, this is acceptable (for now). If that changes in the future,
+// the error messages should be revisited. The current approach focuses
+// on the happy path and treats all errors the same. If better error
+// reporting is needed, the error paths will need to be fleshed out.
+
+func unmarshalBody(b []interface{}) (Body, error) {
+ buf := Body{}
+ for _, e := range b {
+ if m, ok := e.(map[string]interface{}); ok {
+ expr := &Expr{}
+ if err := unmarshalExpr(expr, m); err == nil {
+ buf = append(buf, expr)
+ continue
+ }
+ }
+ goto unmarshal_error
+ }
+ return buf, nil
+unmarshal_error:
+ return nil, fmt.Errorf("ast: unable to unmarshal body")
+}
+
+func unmarshalExpr(expr *Expr, v map[string]interface{}) error {
+ if x, ok := v["negated"]; ok {
+ if b, ok := x.(bool); ok {
+ expr.Negated = b
+ } else {
+ return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"])
+ }
+ }
+ if err := unmarshalExprIndex(expr, v); err != nil {
+ return err
+ }
+ switch ts := v["terms"].(type) {
+ case map[string]interface{}:
+ t, err := unmarshalTerm(ts)
+ if err != nil {
+ return err
+ }
+ expr.Terms = t
+ case []interface{}:
+ terms, err := unmarshalTermSlice(ts)
+ if err != nil {
+ return err
+ }
+ expr.Terms = terms
+ default:
+ return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"])
+ }
+ if x, ok := v["with"]; ok {
+ if sl, ok := x.([]interface{}); ok {
+ ws := make([]*With, len(sl))
+ for i := range sl {
+ var err error
+ ws[i], err = unmarshalWith(sl[i])
+ if err != nil {
+ return err
+ }
+ }
+ expr.With = ws
+ }
+ }
+ return nil
+}
+
+func unmarshalExprIndex(expr *Expr, v map[string]interface{}) error {
+ if x, ok := v["index"]; ok {
+ if n, ok := x.(json.Number); ok {
+ i, err := n.Int64()
+ if err == nil {
+ expr.Index = int(i)
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"])
+}
+
+func unmarshalTerm(m map[string]interface{}) (*Term, error) {
+ v, err := unmarshalValue(m)
+ if err != nil {
+ return nil, err
+ }
+ return &Term{Value: v}, nil
+}
+
+func unmarshalTermSlice(s []interface{}) ([]*Term, error) {
+ buf := []*Term{}
+ for _, x := range s {
+ if m, ok := x.(map[string]interface{}); ok {
+ if t, err := unmarshalTerm(m); err == nil {
+ buf = append(buf, t)
+ continue
+ } else {
+ return nil, err
+ }
+ }
+ return nil, fmt.Errorf("ast: unable to unmarshal term")
+ }
+ return buf, nil
+}
+
+func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) {
+ if s, ok := d["value"].([]interface{}); ok {
+ return unmarshalTermSlice(s)
+ }
+ return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`)
+}
+
+func unmarshalWith(i interface{}) (*With, error) {
+ if m, ok := i.(map[string]interface{}); ok {
+ tgt, _ := m["target"].(map[string]interface{})
+ target, err := unmarshalTerm(tgt)
+ if err == nil {
+ val, _ := m["value"].(map[string]interface{})
+ value, err := unmarshalTerm(val)
+ if err == nil {
+ return &With{
+ Target: target,
+ Value: value,
+ }, nil
+ }
+ return nil, err
+ }
+ return nil, err
+ }
+ return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`)
+}
+
+func unmarshalValue(d map[string]interface{}) (Value, error) {
+ v := d["value"]
+ switch d["type"] {
+ case "null":
+ return Null{}, nil
+ case "boolean":
+ if b, ok := v.(bool); ok {
+ return Boolean(b), nil
+ }
+ case "number":
+ if n, ok := v.(json.Number); ok {
+ return Number(n), nil
+ }
+ case "string":
+ if s, ok := v.(string); ok {
+ return String(s), nil
+ }
+ case "var":
+ if s, ok := v.(string); ok {
+ return Var(s), nil
+ }
+ case "ref":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ return Ref(s), nil
+ }
+ case "array":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ return NewArray(s...), nil
+ }
+ case "set":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ set := NewSet()
+ for _, x := range s {
+ set.Add(x)
+ }
+ return set, nil
+ }
+ case "object":
+ if s, ok := v.([]interface{}); ok {
+ buf := NewObject()
+ for _, x := range s {
+ if i, ok := x.([]interface{}); ok && len(i) == 2 {
+ p, err := unmarshalTermSlice(i)
+ if err == nil {
+ buf.Insert(p[0], p[1])
+ continue
+ }
+ }
+ goto unmarshal_error
+ }
+ return buf, nil
+ }
+ case "arraycomprehension", "setcomprehension":
+ if m, ok := v.(map[string]interface{}); ok {
+ t, ok := m["term"].(map[string]interface{})
+ if !ok {
+ goto unmarshal_error
+ }
+
+ term, err := unmarshalTerm(t)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ b, ok := m["body"].([]interface{})
+ if !ok {
+ goto unmarshal_error
+ }
+
+ body, err := unmarshalBody(b)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ if d["type"] == "arraycomprehension" {
+ return &ArrayComprehension{Term: term, Body: body}, nil
+ }
+ return &SetComprehension{Term: term, Body: body}, nil
+ }
+ case "objectcomprehension":
+ if m, ok := v.(map[string]interface{}); ok {
+ k, ok := m["key"].(map[string]interface{})
+ if !ok {
+ goto unmarshal_error
+ }
+
+ key, err := unmarshalTerm(k)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ v, ok := m["value"].(map[string]interface{})
+ if !ok {
+ goto unmarshal_error
+ }
+
+ value, err := unmarshalTerm(v)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ b, ok := m["body"].([]interface{})
+ if !ok {
+ goto unmarshal_error
+ }
+
+ body, err := unmarshalBody(b)
+ if err != nil {
+ goto unmarshal_error
+ }
+
+ return &ObjectComprehension{Key: key, Value: value, Body: body}, nil
+ }
+ case "call":
+ if s, err := unmarshalTermSliceValue(d); err == nil {
+ return Call(s), nil
+ }
+ }
+unmarshal_error:
+ return nil, fmt.Errorf("ast: unable to unmarshal term")
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/transform.go b/vendor/github.com/open-policy-agent/opa/ast/transform.go
new file mode 100644
index 00000000..7e9af9cc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/transform.go
@@ -0,0 +1,397 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import "fmt"
+
+// Transformer defines the interface for transforming AST elements. If the
+// transformer returns nil and does not indicate an error, the AST element will
+// be set to nil and no transformations will be applied to children of the
+// element.
+type Transformer interface {
+ Transform(v interface{}) (interface{}, error)
+}
+
+// Transform iterates the AST and calls the Transform function on the
+// Transformer t for x before recursing.
+func Transform(t Transformer, x interface{}) (interface{}, error) {
+
+ if term, ok := x.(*Term); ok {
+ return Transform(t, term.Value)
+ }
+
+ y, err := t.Transform(x)
+ if err != nil {
+ return x, err
+ }
+
+ if y == nil {
+ return nil, nil
+ }
+
+ var ok bool
+ switch y := y.(type) {
+ case *Module:
+ p, err := Transform(t, y.Package)
+ if err != nil {
+ return nil, err
+ }
+ if y.Package, ok = p.(*Package); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p)
+ }
+ for i := range y.Imports {
+ imp, err := Transform(t, y.Imports[i])
+ if err != nil {
+ return nil, err
+ }
+ if y.Imports[i], ok = imp.(*Import); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp)
+ }
+ }
+ for i := range y.Rules {
+ rule, err := Transform(t, y.Rules[i])
+ if err != nil {
+ return nil, err
+ }
+ if y.Rules[i], ok = rule.(*Rule); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule)
+ }
+ }
+ for i := range y.Comments {
+ comment, err := Transform(t, y.Comments[i])
+ if err != nil {
+ return nil, err
+ }
+ if y.Comments[i], ok = comment.(*Comment); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment)
+ }
+ }
+ return y, nil
+ case *Package:
+ ref, err := Transform(t, y.Path)
+ if err != nil {
+ return nil, err
+ }
+ if y.Path, ok = ref.(Ref); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref)
+ }
+ return y, nil
+ case *Import:
+ y.Path, err = transformTerm(t, y.Path)
+ if err != nil {
+ return nil, err
+ }
+ if y.Alias, err = transformVar(t, y.Alias); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *Rule:
+ if y.Head, err = transformHead(t, y.Head); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ if y.Else != nil {
+ rule, err := Transform(t, y.Else)
+ if err != nil {
+ return nil, err
+ }
+ if y.Else, ok = rule.(*Rule); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule)
+ }
+ }
+ return y, nil
+ case *Head:
+ if y.Name, err = transformVar(t, y.Name); err != nil {
+ return nil, err
+ }
+ if y.Args, err = transformArgs(t, y.Args); err != nil {
+ return nil, err
+ }
+ if y.Key != nil {
+ if y.Key, err = transformTerm(t, y.Key); err != nil {
+ return nil, err
+ }
+ }
+ if y.Value != nil {
+ if y.Value, err = transformTerm(t, y.Value); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ case Args:
+ for i := range y {
+ if y[i], err = transformTerm(t, y[i]); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ case Body:
+ for i, e := range y {
+ e, err := Transform(t, e)
+ if err != nil {
+ return nil, err
+ }
+ if y[i], ok = e.(*Expr); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e)
+ }
+ }
+ return y, nil
+ case *Expr:
+ switch ts := y.Terms.(type) {
+ case *SomeDecl:
+ decl, err := Transform(t, ts)
+ if err != nil {
+ return nil, err
+ }
+ if y.Terms, ok = decl.(*SomeDecl); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y, decl)
+ }
+ return y, nil
+ case []*Term:
+ for i := range ts {
+ if ts[i], err = transformTerm(t, ts[i]); err != nil {
+ return nil, err
+ }
+ }
+ case *Term:
+ if y.Terms, err = transformTerm(t, ts); err != nil {
+ return nil, err
+ }
+ }
+ for i, w := range y.With {
+ w, err := Transform(t, w)
+ if err != nil {
+ return nil, err
+ }
+ if y.With[i], ok = w.(*With); !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w)
+ }
+ }
+ return y, nil
+ case *With:
+ if y.Target, err = transformTerm(t, y.Target); err != nil {
+ return nil, err
+ }
+ if y.Value, err = transformTerm(t, y.Value); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case Ref:
+ for i, term := range y {
+ if y[i], err = transformTerm(t, term); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ case *object:
+ return y.Map(func(k, v *Term) (*Term, *Term, error) {
+ k, err := transformTerm(t, k)
+ if err != nil {
+ return nil, nil, err
+ }
+ v, err = transformTerm(t, v)
+ if err != nil {
+ return nil, nil, err
+ }
+ return k, v, nil
+ })
+ case *Array:
+ for i := 0; i < y.Len(); i++ {
+ v, err := transformTerm(t, y.Elem(i))
+ if err != nil {
+ return nil, err
+ }
+ y.set(i, v)
+ }
+ return y, nil
+ case Set:
+ y, err = y.Map(func(term *Term) (*Term, error) {
+ return transformTerm(t, term)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *ArrayComprehension:
+ if y.Term, err = transformTerm(t, y.Term); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *ObjectComprehension:
+ if y.Key, err = transformTerm(t, y.Key); err != nil {
+ return nil, err
+ }
+ if y.Value, err = transformTerm(t, y.Value); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case *SetComprehension:
+ if y.Term, err = transformTerm(t, y.Term); err != nil {
+ return nil, err
+ }
+ if y.Body, err = transformBody(t, y.Body); err != nil {
+ return nil, err
+ }
+ return y, nil
+ case Call:
+ for i := range y {
+ if y[i], err = transformTerm(t, y[i]); err != nil {
+ return nil, err
+ }
+ }
+ return y, nil
+ default:
+ return y, nil
+ }
+}
+
+// TransformRefs calls the function f on all references under x.
+func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) {
+ t := &GenericTransformer{func(x interface{}) (interface{}, error) {
+ if r, ok := x.(Ref); ok {
+ return f(r)
+ }
+ return x, nil
+ }}
+ return Transform(t, x)
+}
+
+// TransformVars calls the function f on all vars under x.
+func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) {
+ t := &GenericTransformer{func(x interface{}) (interface{}, error) {
+ if v, ok := x.(Var); ok {
+ return f(v)
+ }
+ return x, nil
+ }}
+ return Transform(t, x)
+}
+
+// TransformComprehensions calls the functio nf on all comprehensions under x.
+func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) {
+ t := &GenericTransformer{func(x interface{}) (interface{}, error) {
+ switch x := x.(type) {
+ case *ArrayComprehension:
+ return f(x)
+ case *SetComprehension:
+ return f(x)
+ case *ObjectComprehension:
+ return f(x)
+ }
+ return x, nil
+ }}
+ return Transform(t, x)
+}
+
+// GenericTransformer implements the Transformer interface to provide a utility
+// to transform AST nodes using a closure.
+type GenericTransformer struct {
+ f func(x interface{}) (interface{}, error)
+}
+
+// NewGenericTransformer returns a new GenericTransformer that will transform
+// AST nodes using the function f.
+func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer {
+ return &GenericTransformer{
+ f: f,
+ }
+}
+
+// Transform calls the function f on the GenericTransformer.
+func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) {
+ return t.f(x)
+}
+
+func transformHead(t Transformer, head *Head) (*Head, error) {
+ y, err := Transform(t, head)
+ if err != nil {
+ return nil, err
+ }
+ h, ok := y.(*Head)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", head, y)
+ }
+ return h, nil
+}
+
+func transformArgs(t Transformer, args Args) (Args, error) {
+ y, err := Transform(t, args)
+ if err != nil {
+ return nil, err
+ }
+ a, ok := y.(Args)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", args, y)
+ }
+ return a, nil
+}
+
+func transformBody(t Transformer, body Body) (Body, error) {
+ y, err := Transform(t, body)
+ if err != nil {
+ return nil, err
+ }
+ r, ok := y.(Body)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", body, y)
+ }
+ return r, nil
+}
+
+func transformExpr(t Transformer, expr *Expr) (*Expr, error) {
+ y, err := Transform(t, expr)
+ if err != nil {
+ return nil, err
+ }
+ h, ok := y.(*Expr)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", expr, y)
+ }
+ return h, nil
+}
+
+func transformTerm(t Transformer, term *Term) (*Term, error) {
+ v, err := transformValue(t, term.Value)
+ if err != nil {
+ return nil, err
+ }
+ r := &Term{
+ Value: v,
+ Location: term.Location,
+ }
+ return r, nil
+}
+
+func transformValue(t Transformer, v Value) (Value, error) {
+ v1, err := Transform(t, v)
+ if err != nil {
+ return nil, err
+ }
+ r, ok := v1.(Value)
+ if !ok {
+ return nil, fmt.Errorf("illegal transform: %T != %T", v, v1)
+ }
+ return r, nil
+}
+
+func transformVar(t Transformer, v Var) (Var, error) {
+ v1, err := Transform(t, v)
+ if err != nil {
+ return "", err
+ }
+ r, ok := v1.(Var)
+ if !ok {
+ return "", fmt.Errorf("illegal transform: %T != %T", v, v1)
+ }
+ return r, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/unify.go b/vendor/github.com/open-policy-agent/opa/ast/unify.go
new file mode 100644
index 00000000..7a87ea75
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/unify.go
@@ -0,0 +1,196 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+// Unify returns a set of variables that will be unified when the equality expression defined by
+// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already
+// unified.
+func Unify(safe VarSet, a *Term, b *Term) VarSet {
+ u := &unifier{
+ safe: safe,
+ unified: VarSet{},
+ unknown: map[Var]VarSet{},
+ }
+ u.unify(a, b)
+ return u.unified
+}
+
+type unifier struct {
+ safe VarSet
+ unified VarSet
+ unknown map[Var]VarSet
+}
+
+func (u *unifier) isSafe(x Var) bool {
+ return u.safe.Contains(x) || u.unified.Contains(x)
+}
+
+func (u *unifier) isHeadSafe(r Ref) bool {
+ if v, ok := r[0].Value.(Var); ok {
+ return u.isSafe(v)
+ }
+ for v := range r[0].Vars() {
+ if !u.isSafe(v) {
+ return false
+ }
+ }
+ return true
+}
+
+func (u *unifier) unify(a *Term, b *Term) {
+
+ switch a := a.Value.(type) {
+
+ case Var:
+ switch b := b.Value.(type) {
+ case Var:
+ if u.isSafe(b) {
+ u.markSafe(a)
+ } else if u.isSafe(a) {
+ u.markSafe(b)
+ } else {
+ u.markUnknown(a, b)
+ u.markUnknown(b, a)
+ }
+ case *Array, Object:
+ u.unifyAll(a, b)
+ case Ref:
+ if u.isHeadSafe(b) {
+ u.markSafe(a)
+ }
+ default:
+ u.markSafe(a)
+ }
+
+ case Ref:
+ if u.isHeadSafe(a) {
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ case *Array, Object:
+ u.markAllSafe(b)
+ }
+ }
+
+ case *ArrayComprehension:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ case *Array:
+ u.markAllSafe(b)
+ }
+ case *ObjectComprehension:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ case *object:
+ u.markAllSafe(b)
+ }
+ case *SetComprehension:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ }
+
+ case *Array:
+ switch b := b.Value.(type) {
+ case Var:
+ u.unifyAll(b, a)
+ case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ u.markAllSafe(a)
+ case *Array:
+ if a.Len() == b.Len() {
+ for i := 0; i < a.Len(); i++ {
+ u.unify(a.Elem(i), b.Elem(i))
+ }
+ }
+ }
+
+ case *object:
+ switch b := b.Value.(type) {
+ case Var:
+ u.unifyAll(b, a)
+ case Ref:
+ u.markAllSafe(a)
+ case *object:
+ if a.Len() == b.Len() {
+ a.Iter(func(k, v *Term) error {
+ if v2 := b.Get(k); v2 != nil {
+ u.unify(v, v2)
+ }
+ return nil
+ })
+ }
+ }
+
+ default:
+ switch b := b.Value.(type) {
+ case Var:
+ u.markSafe(b)
+ }
+ }
+}
+
+func (u *unifier) markAllSafe(x Value) {
+ vis := u.varVisitor()
+ vis.Walk(x)
+ for v := range vis.Vars() {
+ u.markSafe(v)
+ }
+}
+
+func (u *unifier) markSafe(x Var) {
+ u.unified.Add(x)
+
+ // Add dependencies of 'x' to safe set
+ vs := u.unknown[x]
+ delete(u.unknown, x)
+ for v := range vs {
+ u.markSafe(v)
+ }
+
+ // Add dependants of 'x' to safe set if they have no more
+ // dependencies.
+ for v, deps := range u.unknown {
+ if deps.Contains(x) {
+ delete(deps, x)
+ if len(deps) == 0 {
+ u.markSafe(v)
+ }
+ }
+ }
+}
+
+func (u *unifier) markUnknown(a, b Var) {
+ if _, ok := u.unknown[a]; !ok {
+ u.unknown[a] = NewVarSet()
+ }
+ u.unknown[a].Add(b)
+}
+
+func (u *unifier) unifyAll(a Var, b Value) {
+ if u.isSafe(a) {
+ u.markAllSafe(b)
+ } else {
+ vis := u.varVisitor()
+ vis.Walk(b)
+ unsafe := vis.Vars().Diff(u.safe).Diff(u.unified)
+ if len(unsafe) == 0 {
+ u.markSafe(a)
+ } else {
+ for v := range unsafe {
+ u.markUnknown(a, v)
+ }
+ }
+ }
+}
+
+func (u *unifier) varVisitor() *VarVisitor {
+ return NewVarVisitor().WithParams(VarVisitorParams{
+ SkipRefHead: true,
+ SkipObjectKeys: true,
+ SkipClosures: true,
+ })
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/varset.go b/vendor/github.com/open-policy-agent/opa/ast/varset.go
new file mode 100644
index 00000000..16dc3f58
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/varset.go
@@ -0,0 +1,100 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "fmt"
+ "sort"
+)
+
+// VarSet represents a set of variables.
+type VarSet map[Var]struct{}
+
+// NewVarSet returns a new VarSet containing the specified variables.
+func NewVarSet(vs ...Var) VarSet {
+ s := VarSet{}
+ for _, v := range vs {
+ s.Add(v)
+ }
+ return s
+}
+
+// Add updates the set to include the variable "v".
+func (s VarSet) Add(v Var) {
+ s[v] = struct{}{}
+}
+
+// Contains returns true if the set contains the variable "v".
+func (s VarSet) Contains(v Var) bool {
+ _, ok := s[v]
+ return ok
+}
+
+// Copy returns a shallow copy of the VarSet.
+func (s VarSet) Copy() VarSet {
+ cpy := VarSet{}
+ for v := range s {
+ cpy.Add(v)
+ }
+ return cpy
+}
+
+// Diff returns a VarSet containing variables in s that are not in vs.
+func (s VarSet) Diff(vs VarSet) VarSet {
+ r := VarSet{}
+ for v := range s {
+ if !vs.Contains(v) {
+ r.Add(v)
+ }
+ }
+ return r
+}
+
+// Equal returns true if s contains exactly the same elements as vs.
+func (s VarSet) Equal(vs VarSet) bool {
+ if len(s.Diff(vs)) > 0 {
+ return false
+ }
+ return len(vs.Diff(s)) == 0
+}
+
+// Intersect returns a VarSet containing variables in s that are in vs.
+func (s VarSet) Intersect(vs VarSet) VarSet {
+ r := VarSet{}
+ for v := range s {
+ if vs.Contains(v) {
+ r.Add(v)
+ }
+ }
+ return r
+}
+
+// Sorted returns a sorted slice of vars from s.
+func (s VarSet) Sorted() []Var {
+ sorted := make([]Var, 0, len(s))
+ for v := range s {
+ sorted = append(sorted, v)
+ }
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Compare(sorted[j]) < 0
+ })
+ return sorted
+}
+
+// Update merges the other VarSet into this VarSet.
+func (s VarSet) Update(vs VarSet) {
+ for v := range vs {
+ s.Add(v)
+ }
+}
+
+func (s VarSet) String() string {
+ tmp := []string{}
+ for v := range s {
+ tmp = append(tmp, string(v))
+ }
+ sort.Strings(tmp)
+ return fmt.Sprintf("%v", tmp)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/ast/visit.go b/vendor/github.com/open-policy-agent/opa/ast/visit.go
new file mode 100644
index 00000000..105fb58a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/ast/visit.go
@@ -0,0 +1,686 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ast
+
+// Visitor defines the interface for iterating AST elements. The Visit function
+// can return a Visitor w which will be used to visit the children of the AST
+// element v. If the Visit function returns nil, the children will not be
+// visited. This is deprecated.
+type Visitor interface {
+ Visit(v interface{}) (w Visitor)
+}
+
+// BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before
+// and after the AST has been visited. This is deprecated.
+type BeforeAndAfterVisitor interface {
+ Visitor
+ Before(x interface{})
+ After(x interface{})
+}
+
+// Walk iterates the AST by calling the Visit function on the Visitor
+// v for x before recursing. This is deprecated.
+func Walk(v Visitor, x interface{}) {
+ if bav, ok := v.(BeforeAndAfterVisitor); !ok {
+ walk(v, x)
+ } else {
+ bav.Before(x)
+ defer bav.After(x)
+ walk(bav, x)
+ }
+}
+
+// WalkBeforeAndAfter iterates the AST by calling the Visit function on the
+// Visitor v for x before recursing. This is deprecated.
+func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) {
+ Walk(v, x)
+}
+
+func walk(v Visitor, x interface{}) {
+ w := v.Visit(x)
+ if w == nil {
+ return
+ }
+ switch x := x.(type) {
+ case *Module:
+ Walk(w, x.Package)
+ for _, i := range x.Imports {
+ Walk(w, i)
+ }
+ for _, r := range x.Rules {
+ Walk(w, r)
+ }
+ for _, c := range x.Comments {
+ Walk(w, c)
+ }
+ case *Package:
+ Walk(w, x.Path)
+ case *Import:
+ Walk(w, x.Path)
+ Walk(w, x.Alias)
+ case *Rule:
+ Walk(w, x.Head)
+ Walk(w, x.Body)
+ if x.Else != nil {
+ Walk(w, x.Else)
+ }
+ case *Head:
+ Walk(w, x.Name)
+ Walk(w, x.Args)
+ if x.Key != nil {
+ Walk(w, x.Key)
+ }
+ if x.Value != nil {
+ Walk(w, x.Value)
+ }
+ case Body:
+ for _, e := range x {
+ Walk(w, e)
+ }
+ case Args:
+ for _, t := range x {
+ Walk(w, t)
+ }
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *SomeDecl:
+ Walk(w, ts)
+ case []*Term:
+ for _, t := range ts {
+ Walk(w, t)
+ }
+ case *Term:
+ Walk(w, ts)
+ }
+ for i := range x.With {
+ Walk(w, x.With[i])
+ }
+ case *With:
+ Walk(w, x.Target)
+ Walk(w, x.Value)
+ case *Term:
+ Walk(w, x.Value)
+ case Ref:
+ for _, t := range x {
+ Walk(w, t)
+ }
+ case *object:
+ x.Foreach(func(k, vv *Term) {
+ Walk(w, k)
+ Walk(w, vv)
+ })
+ case *Array:
+ x.Foreach(func(t *Term) {
+ Walk(w, t)
+ })
+ case Set:
+ x.Foreach(func(t *Term) {
+ Walk(w, t)
+ })
+ case *ArrayComprehension:
+ Walk(w, x.Term)
+ Walk(w, x.Body)
+ case *ObjectComprehension:
+ Walk(w, x.Key)
+ Walk(w, x.Value)
+ Walk(w, x.Body)
+ case *SetComprehension:
+ Walk(w, x.Term)
+ Walk(w, x.Body)
+ case Call:
+ for _, t := range x {
+ Walk(w, t)
+ }
+ }
+}
+
+// WalkVars calls the function f on all vars under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkVars(x interface{}, f func(Var) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if v, ok := x.(Var); ok {
+ return f(v)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkClosures calls the function f on all closures under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkClosures(x interface{}, f func(interface{}) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ switch x.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return f(x)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkRefs calls the function f on all references under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkRefs(x interface{}, f func(Ref) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if r, ok := x.(Ref); ok {
+ return f(r)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkTerms calls the function f on all terms under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkTerms(x interface{}, f func(*Term) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if term, ok := x.(*Term); ok {
+ return f(term)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkWiths calls the function f on all with modifiers under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkWiths(x interface{}, f func(*With) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if w, ok := x.(*With); ok {
+ return f(w)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkExprs calls the function f on all expressions under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkExprs(x interface{}, f func(*Expr) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if r, ok := x.(*Expr); ok {
+ return f(r)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkBodies calls the function f on all bodies under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkBodies(x interface{}, f func(Body) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if b, ok := x.(Body); ok {
+ return f(b)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkRules calls the function f on all rules under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkRules(x interface{}, f func(*Rule) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if r, ok := x.(*Rule); ok {
+ stop := f(r)
+ // NOTE(tsandall): since rules cannot be embedded inside of queries
+ // we can stop early if there is no else block.
+ if stop || r.Else == nil {
+ return true
+ }
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// WalkNodes calls the function f on all nodes under x. If the function f
+// returns true, AST nodes under the last node will not be visited.
+func WalkNodes(x interface{}, f func(Node) bool) {
+ vis := &GenericVisitor{func(x interface{}) bool {
+ if n, ok := x.(Node); ok {
+ return f(n)
+ }
+ return false
+ }}
+ vis.Walk(x)
+}
+
+// GenericVisitor provides a utility to walk over AST nodes using a
+// closure. If the closure returns true, the visitor will not walk
+// over AST nodes under x.
+type GenericVisitor struct {
+ f func(x interface{}) bool
+}
+
+// NewGenericVisitor returns a new GenericVisitor that will invoke the function
+// f on AST nodes.
+func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor {
+ return &GenericVisitor{f}
+}
+
+// Walk iterates the AST by calling the function f on the
+// GenericVisitor before recursing. Contrary to the generic Walk, this
+// does not require allocating the visitor from heap.
+func (vis *GenericVisitor) Walk(x interface{}) {
+ if vis.f(x) {
+ return
+ }
+
+ switch x := x.(type) {
+ case *Module:
+ vis.Walk(x.Package)
+ for _, i := range x.Imports {
+ vis.Walk(i)
+ }
+ for _, r := range x.Rules {
+ vis.Walk(r)
+ }
+ for _, c := range x.Comments {
+ vis.Walk(c)
+ }
+ case *Package:
+ vis.Walk(x.Path)
+ case *Import:
+ vis.Walk(x.Path)
+ vis.Walk(x.Alias)
+ case *Rule:
+ vis.Walk(x.Head)
+ vis.Walk(x.Body)
+ if x.Else != nil {
+ vis.Walk(x.Else)
+ }
+ case *Head:
+ vis.Walk(x.Name)
+ vis.Walk(x.Args)
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ if x.Value != nil {
+ vis.Walk(x.Value)
+ }
+ case Body:
+ for _, e := range x {
+ vis.Walk(e)
+ }
+ case Args:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *SomeDecl:
+ vis.Walk(ts)
+ case []*Term:
+ for _, t := range ts {
+ vis.Walk(t)
+ }
+ case *Term:
+ vis.Walk(ts)
+ }
+ for i := range x.With {
+ vis.Walk(x.With[i])
+ }
+ case *With:
+ vis.Walk(x.Target)
+ vis.Walk(x.Value)
+ case *Term:
+ vis.Walk(x.Value)
+ case Ref:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ case *object:
+ x.Foreach(func(k, v *Term) {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ })
+ case *Array:
+ x.Foreach(func(t *Term) {
+ vis.Walk(t)
+ })
+ case Set:
+ for _, t := range x.Slice() {
+ vis.Walk(t)
+ }
+ case *ArrayComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case *ObjectComprehension:
+ vis.Walk(x.Key)
+ vis.Walk(x.Value)
+ vis.Walk(x.Body)
+ case *SetComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case Call:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ }
+}
+
+// BeforeAfterVisitor provides a utility to walk over AST nodes using
+// closures. If the before closure returns true, the visitor will not
+// walk over AST nodes under x. The after closure is invoked always
+// after visiting a node.
+type BeforeAfterVisitor struct {
+ before func(x interface{}) bool
+ after func(x interface{})
+}
+
+// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that
+// will invoke the functions before and after AST nodes.
+func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor {
+ return &BeforeAfterVisitor{before, after}
+}
+
+// Walk iterates the AST by calling the functions on the
+// BeforeAndAfterVisitor before and after recursing. Contrary to the
+// generic Walk, this does not require allocating the visitor from
+// heap.
+func (vis *BeforeAfterVisitor) Walk(x interface{}) {
+ defer vis.after(x)
+ if vis.before(x) {
+ return
+ }
+
+ switch x := x.(type) {
+ case *Module:
+ vis.Walk(x.Package)
+ for _, i := range x.Imports {
+ vis.Walk(i)
+ }
+ for _, r := range x.Rules {
+ vis.Walk(r)
+ }
+ for _, c := range x.Comments {
+ vis.Walk(c)
+ }
+ case *Package:
+ vis.Walk(x.Path)
+ case *Import:
+ vis.Walk(x.Path)
+ vis.Walk(x.Alias)
+ case *Rule:
+ vis.Walk(x.Head)
+ vis.Walk(x.Body)
+ if x.Else != nil {
+ vis.Walk(x.Else)
+ }
+ case *Head:
+ vis.Walk(x.Name)
+ vis.Walk(x.Args)
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ if x.Value != nil {
+ vis.Walk(x.Value)
+ }
+ case Body:
+ for _, e := range x {
+ vis.Walk(e)
+ }
+ case Args:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *SomeDecl:
+ vis.Walk(ts)
+ case []*Term:
+ for _, t := range ts {
+ vis.Walk(t)
+ }
+ case *Term:
+ vis.Walk(ts)
+ }
+ for i := range x.With {
+ vis.Walk(x.With[i])
+ }
+ case *With:
+ vis.Walk(x.Target)
+ vis.Walk(x.Value)
+ case *Term:
+ vis.Walk(x.Value)
+ case Ref:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ case *object:
+ x.Foreach(func(k, v *Term) {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ })
+ case *Array:
+ x.Foreach(func(t *Term) {
+ vis.Walk(t)
+ })
+ case Set:
+ for _, t := range x.Slice() {
+ vis.Walk(t)
+ }
+ case *ArrayComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case *ObjectComprehension:
+ vis.Walk(x.Key)
+ vis.Walk(x.Value)
+ vis.Walk(x.Body)
+ case *SetComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case Call:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ }
+}
+
+// VarVisitor walks AST nodes under a given node and collects all encountered
+// variables. The collected variables can be controlled by specifying
+// VarVisitorParams when creating the visitor.
+type VarVisitor struct {
+ params VarVisitorParams
+ vars VarSet
+}
+
+// VarVisitorParams contains settings for a VarVisitor.
+type VarVisitorParams struct {
+ SkipRefHead bool
+ SkipRefCallHead bool
+ SkipObjectKeys bool
+ SkipClosures bool
+ SkipWithTarget bool
+ SkipSets bool
+}
+
+// NewVarVisitor returns a new VarVisitor object.
+func NewVarVisitor() *VarVisitor {
+ return &VarVisitor{
+ vars: NewVarSet(),
+ }
+}
+
+// WithParams sets the parameters in params on vis.
+func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor {
+ vis.params = params
+ return vis
+}
+
+// Vars returns a VarSet that contains collected vars.
+func (vis *VarVisitor) Vars() VarSet {
+ return vis.vars
+}
+
+func (vis *VarVisitor) visit(v interface{}) bool {
+ if vis.params.SkipObjectKeys {
+ if o, ok := v.(Object); ok {
+ o.Foreach(func(k, v *Term) {
+ vis.Walk(v)
+ })
+ return true
+ }
+ }
+ if vis.params.SkipRefHead {
+ if r, ok := v.(Ref); ok {
+ for _, t := range r[1:] {
+ vis.Walk(t)
+ }
+ return true
+ }
+ }
+ if vis.params.SkipClosures {
+ switch v.(type) {
+ case *ArrayComprehension, *ObjectComprehension, *SetComprehension:
+ return true
+ }
+ }
+ if vis.params.SkipWithTarget {
+ if v, ok := v.(*With); ok {
+ vis.Walk(v.Value)
+ return true
+ }
+ }
+ if vis.params.SkipSets {
+ if _, ok := v.(Set); ok {
+ return true
+ }
+ }
+ if vis.params.SkipRefCallHead {
+ switch v := v.(type) {
+ case *Expr:
+ if terms, ok := v.Terms.([]*Term); ok {
+ for _, t := range terms[0].Value.(Ref)[1:] {
+ vis.Walk(t)
+ }
+ for i := 1; i < len(terms); i++ {
+ vis.Walk(terms[i])
+ }
+ for _, w := range v.With {
+ vis.Walk(w)
+ }
+ return true
+ }
+ case Call:
+ operator := v[0].Value.(Ref)
+ for i := 1; i < len(operator); i++ {
+ vis.Walk(operator[i])
+ }
+ for i := 1; i < len(v); i++ {
+ vis.Walk(v[i])
+ }
+ return true
+ }
+ }
+ if v, ok := v.(Var); ok {
+ vis.vars.Add(v)
+ }
+ return false
+}
+
+// Walk iterates the AST by calling the function f on the
+// GenericVisitor before recursing. Contrary to the generic Walk, this
+// does not require allocating the visitor from heap.
+func (vis *VarVisitor) Walk(x interface{}) {
+ if vis.visit(x) {
+ return
+ }
+
+ switch x := x.(type) {
+ case *Module:
+ vis.Walk(x.Package)
+ for _, i := range x.Imports {
+ vis.Walk(i)
+ }
+ for _, r := range x.Rules {
+ vis.Walk(r)
+ }
+ for _, c := range x.Comments {
+ vis.Walk(c)
+ }
+ case *Package:
+ vis.Walk(x.Path)
+ case *Import:
+ vis.Walk(x.Path)
+ vis.Walk(x.Alias)
+ case *Rule:
+ vis.Walk(x.Head)
+ vis.Walk(x.Body)
+ if x.Else != nil {
+ vis.Walk(x.Else)
+ }
+ case *Head:
+ vis.Walk(x.Name)
+ vis.Walk(x.Args)
+ if x.Key != nil {
+ vis.Walk(x.Key)
+ }
+ if x.Value != nil {
+ vis.Walk(x.Value)
+ }
+ case Body:
+ for _, e := range x {
+ vis.Walk(e)
+ }
+ case Args:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ case *Expr:
+ switch ts := x.Terms.(type) {
+ case *SomeDecl:
+ vis.Walk(ts)
+ case []*Term:
+ for _, t := range ts {
+ vis.Walk(t)
+ }
+ case *Term:
+ vis.Walk(ts)
+ }
+ for i := range x.With {
+ vis.Walk(x.With[i])
+ }
+ case *With:
+ vis.Walk(x.Target)
+ vis.Walk(x.Value)
+ case *Term:
+ vis.Walk(x.Value)
+ case Ref:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ case *object:
+ x.Foreach(func(k, v *Term) {
+ vis.Walk(k)
+ vis.Walk(x.Get(k))
+ })
+ case *Array:
+ x.Foreach(func(t *Term) {
+ vis.Walk(t)
+ })
+ case Set:
+ for _, t := range x.Slice() {
+ vis.Walk(t)
+ }
+ case *ArrayComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case *ObjectComprehension:
+ vis.Walk(x.Key)
+ vis.Walk(x.Value)
+ vis.Walk(x.Body)
+ case *SetComprehension:
+ vis.Walk(x.Term)
+ vis.Walk(x.Body)
+ case Call:
+ for _, t := range x {
+ vis.Walk(t)
+ }
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/bundle.go b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go
new file mode 100644
index 00000000..faa29949
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/bundle.go
@@ -0,0 +1,1134 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle implements bundle loading.
+package bundle
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/url"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/format"
+ "github.com/open-policy-agent/opa/internal/file/archive"
+ "github.com/open-policy-agent/opa/internal/merge"
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/util"
+)
+
+// Common file extensions and file names.
+const (
+ RegoExt = ".rego"
+ WasmFile = "policy.wasm"
+ ManifestExt = ".manifest"
+ SignaturesFile = "signatures.json"
+ dataFile = "data.json"
+ yamlDataFile = "data.yaml"
+ defaultHashingAlg = "SHA-256"
+ DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs
+)
+
+// Bundle represents a loaded bundle. The bundle can contain data and policies.
+type Bundle struct {
+ Signatures SignaturesConfig
+ Manifest Manifest
+ Data map[string]interface{}
+ Modules []ModuleFile
+ Wasm []byte // Deprecated. Use WasmModules instead
+ WasmModules []WasmModuleFile
+}
+
+// SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle.
+type SignaturesConfig struct {
+ Signatures []string `json:"signatures,omitempty"`
+}
+
+// isEmpty returns if the SignaturesConfig is empty.
+func (s SignaturesConfig) isEmpty() bool {
+ return reflect.DeepEqual(s, SignaturesConfig{})
+}
+
+// DecodedSignature represents the decoded JWT payload.
+type DecodedSignature struct {
+ Files []FileInfo `json:"files"`
+ KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead.
+ Scope string `json:"scope"`
+ IssuedAt int64 `json:"iat"`
+ Issuer string `json:"iss"`
+}
+
+// FileInfo contains the hashing algorithm used, resulting digest etc.
+type FileInfo struct {
+ Name string `json:"name"`
+ Hash string `json:"hash"`
+ Algorithm string `json:"algorithm"`
+}
+
+// NewFile returns a new FileInfo.
+func NewFile(name, hash, alg string) FileInfo {
+ return FileInfo{
+ Name: name,
+ Hash: hash,
+ Algorithm: alg,
+ }
+}
+
+// Manifest represents the manifest from a bundle. The manifest may contain
+// metadata such as the bundle revision.
+type Manifest struct {
+ Revision string `json:"revision"`
+ Roots *[]string `json:"roots,omitempty"`
+ WasmResolvers []WasmResolver `json:"wasm,omitempty"`
+}
+
+// WasmResolver maps a wasm module to an entrypoint ref.
+type WasmResolver struct {
+ Entrypoint string `json:"entrypoint,omitempty"`
+ Module string `json:"module,omitempty"`
+}
+
+// Init initializes the manifest. If you instantiate a manifest
+// manually, call Init to ensure that the roots are set properly.
+func (m *Manifest) Init() {
+ if m.Roots == nil {
+ defaultRoots := []string{""}
+ m.Roots = &defaultRoots
+ }
+}
+
+// AddRoot adds r to the roots of m. This function is idempotent.
+func (m *Manifest) AddRoot(r string) {
+ m.Init()
+ if !RootPathsContain(*m.Roots, r) {
+ *m.Roots = append(*m.Roots, r)
+ }
+}
+
+// Equal returns true if m is semantically equivalent to other.
+func (m Manifest) Equal(other Manifest) bool {
+
+ // This is safe since both are passed by value.
+ m.Init()
+ other.Init()
+
+ if m.Revision != other.Revision {
+ return false
+ }
+
+ if len(m.WasmResolvers) != len(other.WasmResolvers) {
+ return false
+ }
+
+ for i := 0; i < len(m.WasmResolvers); i++ {
+ if m.WasmResolvers[i] != other.WasmResolvers[i] {
+ return false
+ }
+ }
+
+ return m.rootSet().Equal(other.rootSet())
+}
+
+// Copy returns a deep copy of the manifest.
+func (m Manifest) Copy() Manifest {
+ m.Init()
+ roots := make([]string, len(*m.Roots))
+ copy(roots, *m.Roots)
+ m.Roots = &roots
+
+ wasmModules := make([]WasmResolver, len(m.WasmResolvers))
+ copy(wasmModules, m.WasmResolvers)
+ m.WasmResolvers = wasmModules
+
+ return m
+}
+
+func (m Manifest) String() string {
+ m.Init()
+ return fmt.Sprintf("", m.Revision, *m.Roots, m.WasmResolvers)
+}
+
+func (m Manifest) rootSet() stringSet {
+ rs := map[string]struct{}{}
+
+ for _, r := range *m.Roots {
+ rs[r] = struct{}{}
+ }
+
+ return stringSet(rs)
+}
+
+type stringSet map[string]struct{}
+
+func (ss stringSet) Equal(other stringSet) bool {
+ if len(ss) != len(other) {
+ return false
+ }
+ for k := range other {
+ if _, ok := ss[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *Manifest) validateAndInjectDefaults(b Bundle) error {
+
+ m.Init()
+
+ // Validate roots in bundle.
+ roots := *m.Roots
+
+ // Standardize the roots (no starting or trailing slash)
+ for i := range roots {
+ roots[i] = strings.Trim(roots[i], "/")
+ }
+
+ for i := 0; i < len(roots)-1; i++ {
+ for j := i + 1; j < len(roots); j++ {
+ if RootPathsOverlap(roots[i], roots[j]) {
+ return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j])
+ }
+ }
+ }
+
+ // Validate modules in bundle.
+ for _, module := range b.Modules {
+ found := false
+ if path, err := module.Parsed.Package.Path.Ptr(); err == nil {
+ for i := range roots {
+ if strings.HasPrefix(path, roots[i]) {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path)
+ }
+ }
+
+ // Build a set of wasm module entrypoints to validate
+ wasmModuleToEps := map[string]string{}
+ seenEps := map[string]struct{}{}
+ for _, wm := range b.WasmModules {
+ wasmModuleToEps[wm.Path] = ""
+ }
+
+ for _, wmConfig := range b.Manifest.WasmResolvers {
+ _, ok := wasmModuleToEps[wmConfig.Module]
+ if !ok {
+ return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module)
+ }
+
+ // Ensure wasm module entrypoint in within bundle roots
+ found := false
+ for i := range roots {
+ if strings.HasPrefix(wmConfig.Entrypoint, roots[i]) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module)
+ }
+
+ if _, ok := seenEps[wmConfig.Entrypoint]; ok {
+ return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint)
+ }
+ seenEps[wmConfig.Entrypoint] = struct{}{}
+
+ wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint
+ }
+
+ // Validate data in bundle.
+ return dfs(b.Data, "", func(path string, node interface{}) (bool, error) {
+ path = strings.Trim(path, "/")
+ for i := range roots {
+ if strings.HasPrefix(path, roots[i]) {
+ return true, nil
+ }
+ }
+ if _, ok := node.(map[string]interface{}); ok {
+ for i := range roots {
+ if strings.HasPrefix(roots[i], path) {
+ return false, nil
+ }
+ }
+ }
+ return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path)
+ })
+}
+
+// ModuleFile represents a single module contained in a bundle.
+type ModuleFile struct {
+ URL string
+ Path string
+ Raw []byte
+ Parsed *ast.Module
+}
+
+// WasmModuleFile represents a single wasm module contained in a bundle.
+type WasmModuleFile struct {
+ URL string
+ Path string
+ Entrypoints []ast.Ref
+ Raw []byte
+}
+
+// Reader contains the reader to load the bundle from.
+type Reader struct {
+ loader DirectoryLoader
+ includeManifestInData bool
+ metrics metrics.Metrics
+ baseDir string
+ verificationConfig *VerificationConfig
+ skipVerify bool
+ files map[string]FileInfo // files in the bundle signature payload
+ sizeLimitBytes int64
+}
+
+// NewReader is deprecated. Use NewCustomReader instead.
+func NewReader(r io.Reader) *Reader {
+ return NewCustomReader(NewTarballLoader(r))
+}
+
+// NewCustomReader returns a new Reader configured to use the
+// specified DirectoryLoader.
+func NewCustomReader(loader DirectoryLoader) *Reader {
+ nr := Reader{
+ loader: loader,
+ metrics: metrics.New(),
+ files: make(map[string]FileInfo),
+ sizeLimitBytes: DefaultSizeLimitBytes + 1,
+ }
+ return &nr
+}
+
+// IncludeManifestInData sets whether the manifest metadata should be
+// included in the bundle's data.
+func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader {
+ r.includeManifestInData = includeManifestInData
+ return r
+}
+
+// WithMetrics sets the metrics object to be used while loading bundles
+func (r *Reader) WithMetrics(m metrics.Metrics) *Reader {
+ r.metrics = m
+ return r
+}
+
+// WithBaseDir sets a base directory for file paths of loaded Rego
+// modules. This will *NOT* affect the loaded path of data files.
+func (r *Reader) WithBaseDir(dir string) *Reader {
+ r.baseDir = dir
+ return r
+}
+
+// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle
+func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader {
+ r.verificationConfig = config
+ return r
+}
+
+// WithSkipBundleVerification skips verification of a signed bundle
+func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader {
+ r.skipVerify = skipVerify
+ return r
+}
+
+// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger
+// than this, an error will be returned by the reader.
+func (r *Reader) WithSizeLimitBytes(n int64) *Reader {
+ r.sizeLimitBytes = n + 1
+ return r
+}
+
+// Read returns a new Bundle loaded from the reader.
+func (r *Reader) Read() (Bundle, error) {
+
+ var bundle Bundle
+ var descriptors []*Descriptor
+ var err error
+
+ bundle.Data = map[string]interface{}{}
+
+ bundle.Signatures, descriptors, err = listSignaturesAndDescriptors(r.loader, r.skipVerify, r.sizeLimitBytes)
+ if err != nil {
+ return bundle, err
+ }
+
+ err = r.checkSignaturesAndDescriptors(bundle.Signatures)
+ if err != nil {
+ return bundle, err
+ }
+
+ for _, f := range descriptors {
+ var buf bytes.Buffer
+ n, err := f.Read(&buf, r.sizeLimitBytes)
+ f.Close() // always close, even on error
+
+ if err != nil && err != io.EOF {
+ return bundle, err
+ } else if err == nil && n >= r.sizeLimitBytes {
+ return bundle, fmt.Errorf("bundle file exceeded max size (%v bytes)", r.sizeLimitBytes-1)
+ }
+
+ // verify the file content
+ if !bundle.Signatures.isEmpty() {
+ path := f.Path()
+ if r.baseDir != "" {
+ path = f.URL()
+ }
+ path = strings.TrimPrefix(path, "/")
+
+ // check if the file is to be excluded from bundle verification
+ if r.isFileExcluded(path) {
+ delete(r.files, path)
+ } else {
+ if err = r.verifyBundleFile(path, buf); err != nil {
+ return bundle, err
+ }
+ }
+ }
+
+ // Normalize the paths to use `/` separators
+ path := filepath.ToSlash(f.Path())
+
+ if strings.HasSuffix(path, RegoExt) {
+ fullPath := r.fullPath(path)
+ r.metrics.Timer(metrics.RegoModuleParse).Start()
+ module, err := ast.ParseModule(fullPath, buf.String())
+ r.metrics.Timer(metrics.RegoModuleParse).Stop()
+ if err != nil {
+ return bundle, err
+ }
+
+ mf := ModuleFile{
+ URL: f.URL(),
+ Path: fullPath,
+ Raw: buf.Bytes(),
+ Parsed: module,
+ }
+ bundle.Modules = append(bundle.Modules, mf)
+
+ } else if filepath.Base(path) == WasmFile {
+ bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{
+ URL: f.URL(),
+ Path: r.fullPath(path),
+ Raw: buf.Bytes(),
+ })
+ } else if filepath.Base(path) == dataFile {
+ var value interface{}
+
+ r.metrics.Timer(metrics.RegoDataParse).Start()
+ err := util.NewJSONDecoder(&buf).Decode(&value)
+ r.metrics.Timer(metrics.RegoDataParse).Stop()
+
+ if err != nil {
+ return bundle, errors.Wrapf(err, "bundle load failed on %v", r.fullPath(path))
+ }
+
+ if err := insertValue(&bundle, path, value); err != nil {
+ return bundle, err
+ }
+
+ } else if filepath.Base(path) == yamlDataFile {
+
+ var value interface{}
+
+ r.metrics.Timer(metrics.RegoDataParse).Start()
+ err := util.Unmarshal(buf.Bytes(), &value)
+ r.metrics.Timer(metrics.RegoDataParse).Stop()
+
+ if err != nil {
+ return bundle, errors.Wrapf(err, "bundle load failed on %v", r.fullPath(path))
+ }
+
+ if err := insertValue(&bundle, path, value); err != nil {
+ return bundle, err
+ }
+
+ } else if strings.HasSuffix(path, ManifestExt) {
+ if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil {
+ return bundle, errors.Wrap(err, "bundle load failed on manifest decode")
+ }
+ }
+ }
+
+ // check if the bundle signatures specify any files that weren't found in the bundle
+ if len(r.files) != 0 {
+ extra := []string{}
+ for k := range r.files {
+ extra = append(extra, k)
+ }
+ return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra)
+ }
+
+ if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil {
+ return bundle, err
+ }
+
+ // Inject the wasm module entrypoint refs into the WasmModuleFile structs
+ epMap := map[string][]string{}
+ for _, r := range bundle.Manifest.WasmResolvers {
+ epMap[r.Module] = append(epMap[r.Module], r.Entrypoint)
+ }
+ for i := 0; i < len(bundle.WasmModules); i++ {
+ entrypoints := epMap[bundle.WasmModules[i].Path]
+ for _, entrypoint := range entrypoints {
+ ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint)
+ if err != nil {
+ return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err)
+ }
+ bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref)
+ }
+ }
+
+ if r.includeManifestInData {
+ var metadata map[string]interface{}
+
+ b, err := json.Marshal(&bundle.Manifest)
+ if err != nil {
+ return bundle, errors.Wrap(err, "bundle load failed on manifest marshal")
+ }
+
+ err = util.UnmarshalJSON(b, &metadata)
+ if err != nil {
+ return bundle, errors.Wrap(err, "bundle load failed on manifest unmarshal")
+ }
+
+ // For backwards compatibility always write to the old unnamed manifest path
+ // This will *not* be correct if >1 bundle is in use...
+ if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil {
+ return bundle, errors.Wrapf(err, "bundle load failed on %v", legacyRevisionStoragePath)
+ }
+ }
+
+ return bundle, nil
+}
+
+func (r *Reader) isFileExcluded(path string) bool {
+ for _, e := range r.verificationConfig.Exclude {
+ match, _ := filepath.Match(e, path)
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error {
+ if r.skipVerify {
+ return nil
+ }
+
+ if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" {
+ return fmt.Errorf("bundle missing .signatures.json file")
+ }
+
+ if !signatures.isEmpty() {
+ if r.verificationConfig == nil {
+ return fmt.Errorf("verification key not provided")
+ }
+
+ // verify the JWT signatures included in the `.signatures.json` file
+ if err := r.verifyBundleSignature(signatures); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error {
+ var err error
+ r.files, err = VerifyBundleSignature(sc, r.verificationConfig)
+ return err
+}
+
+func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error {
+ return VerifyBundleFile(path, data, r.files)
+}
+
+func (r *Reader) fullPath(path string) string {
+ if r.baseDir != "" {
+ path = filepath.Join(r.baseDir, path)
+ }
+ return path
+}
+
+// Write is deprecated. Use NewWriter instead.
+func Write(w io.Writer, bundle Bundle) error {
+ return NewWriter(w).
+ UseModulePath(true).
+ DisableFormat(true).
+ Write(bundle)
+}
+
+// Writer implements bundle serialization.
+type Writer struct {
+ usePath bool
+ disableFormat bool
+ w io.Writer
+ signingConfig *SigningConfig
+}
+
+// NewWriter returns a bundle writer that writes to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ }
+}
+
+// UseModulePath configures the writer to use the module file path instead of the
+// module file URL during serialization. This is for backwards compatibility.
+func (w *Writer) UseModulePath(yes bool) *Writer {
+ w.usePath = yes
+ return w
+}
+
+// DisableFormat configures the writer to just write out raw bytes instead
+// of formatting modules before serialization.
+func (w *Writer) DisableFormat(yes bool) *Writer {
+ w.disableFormat = yes
+ return w
+}
+
+// Write writes the bundle to the writer's output stream.
+func (w *Writer) Write(bundle Bundle) error {
+ gw := gzip.NewWriter(w.w)
+ tw := tar.NewWriter(gw)
+
+ var buf bytes.Buffer
+
+ if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil {
+ return err
+ }
+
+ if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil {
+ return err
+ }
+
+ for _, module := range bundle.Modules {
+ path := module.URL
+ if w.usePath {
+ path = module.Path
+ }
+
+ if err := archive.WriteFile(tw, path, module.Raw); err != nil {
+ return err
+ }
+ }
+
+ if err := w.writeWasm(tw, bundle); err != nil {
+ return err
+ }
+
+ if err := writeManifest(tw, bundle); err != nil {
+ return err
+ }
+
+ if err := writeSignatures(tw, bundle); err != nil {
+ return err
+ }
+
+ if err := tw.Close(); err != nil {
+ return err
+ }
+
+ return gw.Close()
+}
+
+func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error {
+ for _, wm := range bundle.WasmModules {
+ path := wm.URL
+ if w.usePath {
+ path = wm.Path
+ }
+
+ err := archive.WriteFile(tw, path, wm.Raw)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(bundle.Wasm) > 0 {
+ err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writeManifest(tw *tar.Writer, bundle Bundle) error {
+
+ var buf bytes.Buffer
+
+ if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil {
+ return err
+ }
+
+ return archive.WriteFile(tw, ManifestExt, buf.Bytes())
+}
+
+func writeSignatures(tw *tar.Writer, bundle Bundle) error {
+
+ if bundle.Signatures.isEmpty() {
+ return nil
+ }
+
+ bs, err := json.MarshalIndent(bundle.Signatures, "", " ")
+ if err != nil {
+ return err
+ }
+
+ return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs)
+}
+
+func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) {
+
+ files := []FileInfo{}
+
+ bs, err := hash.HashFile(b.Data)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg))
+
+ if len(b.Wasm) != 0 {
+ bs, err := hash.HashFile(b.Wasm)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg))
+ }
+
+ for _, wasmModule := range b.WasmModules {
+ bs, err := hash.HashFile(wasmModule.Raw)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg))
+ }
+
+ bs, err = hash.HashFile(b.Manifest)
+ if err != nil {
+ return files, err
+ }
+ files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg))
+
+ return files, err
+}
+
+// FormatModules formats Rego modules
+func (b *Bundle) FormatModules(useModulePath bool) error {
+ var err error
+
+ for i, module := range b.Modules {
+ if module.Raw == nil {
+ module.Raw, err = format.Ast(module.Parsed)
+ if err != nil {
+ return err
+ }
+ } else {
+ path := module.URL
+ if useModulePath {
+ path = module.Path
+ }
+
+ module.Raw, err = format.Source(path, module.Raw)
+ if err != nil {
+ return err
+ }
+ }
+ b.Modules[i].Raw = module.Raw
+ }
+ return nil
+}
+
+// GenerateSignature generates the signature for the given bundle.
+func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error {
+
+ hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg))
+ if err != nil {
+ return err
+ }
+
+ files := []FileInfo{}
+
+ for _, module := range b.Modules {
+ bytes, err := hash.HashFile(module.Raw)
+ if err != nil {
+ return err
+ }
+
+ path := module.URL
+ if useModulePath {
+ path = module.Path
+ }
+ files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg))
+ }
+
+ result, err := hashBundleFiles(hash, b)
+ if err != nil {
+ return err
+ }
+ files = append(files, result...)
+
+ // generate signed token
+ token, err := GenerateSignedToken(files, signingConfig, keyID)
+ if err != nil {
+ return err
+ }
+
+ if b.Signatures.isEmpty() {
+ b.Signatures = SignaturesConfig{}
+ }
+
+ b.Signatures.Signatures = []string{string(token)}
+
+ return nil
+}
+
+// ParsedModules returns a map of parsed modules with names that are
+// unique and human readable for the given a bundle name.
+func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module {
+
+ mods := make(map[string]*ast.Module, len(b.Modules))
+
+ for _, mf := range b.Modules {
+ mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed
+ }
+
+ return mods
+}
+
+// Equal returns true if this bundle's contents equal the other bundle's
+// contents.
+func (b Bundle) Equal(other Bundle) bool {
+ if !reflect.DeepEqual(b.Data, other.Data) {
+ return false
+ }
+
+ if len(b.Modules) != len(other.Modules) {
+ return false
+ }
+ for i := range b.Modules {
+ if b.Modules[i].URL != other.Modules[i].URL {
+ return false
+ }
+ if b.Modules[i].Path != other.Modules[i].Path {
+ return false
+ }
+ if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) {
+ return false
+ }
+ if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) {
+ return false
+ }
+ }
+ if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) {
+ return false
+ }
+
+ return bytes.Equal(b.Wasm, other.Wasm)
+}
+
+// Copy returns a deep copy of the bundle.
+func (b Bundle) Copy() Bundle {
+
+ // Copy data.
+ var x interface{} = b.Data
+
+ if err := util.RoundTrip(&x); err != nil {
+ panic(err)
+ }
+
+ if x != nil {
+ b.Data = x.(map[string]interface{})
+ }
+
+ // Copy modules.
+ for i := range b.Modules {
+ bs := make([]byte, len(b.Modules[i].Raw))
+ copy(bs, b.Modules[i].Raw)
+ b.Modules[i].Raw = bs
+ b.Modules[i].Parsed = b.Modules[i].Parsed.Copy()
+ }
+
+ // Copy manifest.
+ b.Manifest = b.Manifest.Copy()
+
+ return b
+}
+
+func (b *Bundle) insertData(key []string, value interface{}) error {
+ // Build an object with the full structure for the value
+ obj, err := mktree(key, value)
+ if err != nil {
+ return err
+ }
+
+ // Merge the new data in with the current bundle data object
+ merged, ok := merge.InterfaceMaps(b.Data, obj)
+ if !ok {
+ return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...))
+ }
+
+ b.Data = merged
+
+ return nil
+}
+
+func (b *Bundle) readData(key []string) *interface{} {
+
+ if len(key) == 0 {
+ if len(b.Data) == 0 {
+ return nil
+ }
+ var result interface{} = b.Data
+ return &result
+ }
+
+ node := b.Data
+
+ for i := 0; i < len(key)-1; i++ {
+
+ child, ok := node[key[i]]
+ if !ok {
+ return nil
+ }
+
+ childObj, ok := child.(map[string]interface{})
+ if !ok {
+ return nil
+ }
+
+ node = childObj
+ }
+
+ child, ok := node[key[len(key)-1]]
+ if !ok {
+ return nil
+ }
+
+ return &child
+}
+
+func mktree(path []string, value interface{}) (map[string]interface{}, error) {
+ if len(path) == 0 {
+ // For 0 length path the value is the full tree.
+ obj, ok := value.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("root value must be object")
+ }
+ return obj, nil
+ }
+
+ dir := map[string]interface{}{}
+ for i := len(path) - 1; i > 0; i-- {
+ dir[path[i]] = value
+ value = dir
+ dir = map[string]interface{}{}
+ }
+ dir[path[0]] = value
+
+ return dir, nil
+}
+
+// Merge accepts a set of bundles and merges them into a single result bundle. If there are
+// any conflicts during the merge (e.g., with roots) an error is returned. The result bundle
+// will have an empty revision except in the special case where a single bundle is provided
+// (and in that case the bundle is just returned unmodified.)
+func Merge(bundles []*Bundle) (*Bundle, error) {
+
+ if len(bundles) == 0 {
+ return nil, errors.New("expected at least one bundle")
+ }
+
+ if len(bundles) == 1 {
+ return bundles[0], nil
+ }
+
+ var roots []string
+ var result Bundle
+
+ for _, b := range bundles {
+
+ if b.Manifest.Roots == nil {
+ return nil, errors.New("bundle manifest not initialized")
+ }
+
+ roots = append(roots, *b.Manifest.Roots...)
+
+ result.Modules = append(result.Modules, b.Modules...)
+
+ for _, root := range *b.Manifest.Roots {
+ key := strings.Split(root, "/")
+ if val := b.readData(key); val != nil {
+ if err := result.insertData(key, *val); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...)
+ result.WasmModules = append(result.WasmModules, b.WasmModules...)
+
+ }
+
+ result.Manifest.Roots = &roots
+
+ if err := result.Manifest.validateAndInjectDefaults(result); err != nil {
+ return nil, err
+ }
+
+ return &result, nil
+}
+
+// RootPathsOverlap takes in two bundle root paths and returns true if they overlap.
+func RootPathsOverlap(pathA string, pathB string) bool {
+ a := rootPathSegments(pathA)
+ b := rootPathSegments(pathB)
+ return rootContains(a, b) || rootContains(b, a)
+}
+
+// RootPathsContain takes a set of bundle root paths and returns true if the path is contained.
+func RootPathsContain(roots []string, path string) bool {
+ segments := rootPathSegments(path)
+ for i := range roots {
+ if rootContains(rootPathSegments(roots[i]), segments) {
+ return true
+ }
+ }
+ return false
+}
+
+func rootPathSegments(path string) []string {
+ return strings.Split(path, "/")
+}
+
+func rootContains(root []string, other []string) bool {
+
+ // A single segment, empty string root always contains the other.
+ if len(root) == 1 && root[0] == "" {
+ return true
+ }
+
+ if len(root) > len(other) {
+ return false
+ }
+
+ for j := range root {
+ if root[j] != other[j] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func insertValue(b *Bundle, path string, value interface{}) error {
+
+ // Remove leading / and . characters from the directory path. If the bundle
+ // was written with OPA then the paths will contain a leading slash. On the
+ // other hand, if the path is empty, filepath.Dir will return '.'.
+ // Note: filepath.Dir can return paths with '\' separators, always use
+ // filepath.ToSlash to keep them normalized.
+ dirpath := strings.TrimLeft(filepath.ToSlash(filepath.Dir(path)), "/.")
+ var key []string
+ if dirpath != "" {
+ key = strings.Split(dirpath, "/")
+ }
+ if err := b.insertData(key, value); err != nil {
+ return errors.Wrapf(err, "bundle load failed on %v", path)
+ }
+ return nil
+}
+
+func dfs(value interface{}, path string, fn func(string, interface{}) (bool, error)) error {
+ if stop, err := fn(path, value); err != nil {
+ return err
+ } else if stop {
+ return nil
+ }
+ obj, ok := value.(map[string]interface{})
+ if !ok {
+ return nil
+ }
+ for key := range obj {
+ if err := dfs(obj[key], path+"/"+key, fn); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func modulePathWithPrefix(bundleName string, modulePath string) string {
+ // Default prefix is just the bundle name
+ prefix := bundleName
+
+ // Bundle names are sometimes just file paths, some of which
+ // are full urls (file:///foo/). Parse these and only use the path.
+ parsed, err := url.Parse(bundleName)
+ if err == nil {
+ prefix = filepath.Join(parsed.Host, parsed.Path)
+ }
+
+ return filepath.Join(prefix, modulePath)
+}
+
+// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json"
+func IsStructuredDoc(name string) bool {
+ return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile ||
+ filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt
+}
+
+func listSignaturesAndDescriptors(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, []*Descriptor, error) {
+ descriptors := []*Descriptor{}
+ var signatures SignaturesConfig
+
+ for {
+ f, err := loader.NextFile()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return signatures, nil, errors.Wrap(err, "bundle read failed")
+ }
+
+ // check for the signatures file
+ if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) {
+ var buf bytes.Buffer
+ n, err := f.Read(&buf, sizeLimitBytes)
+ f.Close() // always close, even on error
+ if err != nil && err != io.EOF {
+ return signatures, nil, err
+ } else if err == nil && n >= sizeLimitBytes {
+ return signatures, nil, fmt.Errorf("bundle signatures file exceeded max size (%v bytes)", sizeLimitBytes-1)
+ }
+
+ if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil {
+ return signatures, nil, errors.Wrap(err, "bundle load failed on signatures decode")
+ }
+ } else if !strings.HasSuffix(f.Path(), SignaturesFile) {
+ descriptors = append(descriptors, f)
+ }
+ }
+ return signatures, descriptors, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/file.go b/vendor/github.com/open-policy-agent/opa/bundle/file.go
new file mode 100644
index 00000000..db8beb11
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/file.go
@@ -0,0 +1,235 @@
+package bundle
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/pkg/errors"
+)
+
+// Descriptor contains information about a file and
+// can be used to read the file contents.
+type Descriptor struct {
+ url string
+ path string
+ reader io.Reader
+ closer io.Closer
+ closeOnce *sync.Once
+}
+
+func newDescriptor(url, path string, reader io.Reader) *Descriptor {
+ return &Descriptor{
+ url: url,
+ path: path,
+ reader: reader,
+ }
+}
+
+func (d *Descriptor) withCloser(closer io.Closer) *Descriptor {
+ d.closer = closer
+ d.closeOnce = new(sync.Once)
+ return d
+}
+
+// Path returns the path of the file.
+func (d *Descriptor) Path() string {
+ return d.path
+}
+
+// URL returns the url of the file.
+func (d *Descriptor) URL() string {
+ return d.url
+}
+
+// Read will read all the contents from the file the Descriptor refers to
+// into the dest writer up n bytes. Will return an io.EOF error
+// if EOF is encountered before n bytes are read.
+func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) {
+ n, err := io.CopyN(dest, d.reader, n)
+ return n, err
+}
+
+// Close the file, on some Loader implementations this might be a no-op.
+// It should *always* be called regardless of file.
+func (d *Descriptor) Close() error {
+ var err error
+ if d.closer != nil {
+ d.closeOnce.Do(func() {
+ err = d.closer.Close()
+ })
+ }
+ return err
+}
+
+// DirectoryLoader defines an interface which can be used to load
+// files from a directory by iterating over each one in the tree.
+type DirectoryLoader interface {
+ // NextFile must return io.EOF if there is no next value. The returned
+ // descriptor should *always* be closed when no longer needed.
+ NextFile() (*Descriptor, error)
+}
+
+type dirLoader struct {
+ root string
+ files []string
+ idx int
+}
+
+// NewDirectoryLoader returns a basic DirectoryLoader implementation
+// that will load files from a given root directory path.
+func NewDirectoryLoader(root string) DirectoryLoader {
+
+ if len(root) > 1 {
+ // Normalize relative directories, ex "./src/bundle" -> "src/bundle"
+ // We don't need an absolute path, but this makes the joined/trimmed
+ // paths more uniform.
+ if root[0] == '.' && root[1] == filepath.Separator {
+ if len(root) == 2 {
+ root = root[:1] // "./" -> "."
+ } else {
+ root = root[2:] // remove leading "./"
+ }
+ }
+ }
+
+ d := dirLoader{
+ root: root,
+ }
+ return &d
+}
+
+// NextFile iterates to the next file in the directory tree
+// and returns a file Descriptor for the file.
+func (d *dirLoader) NextFile() (*Descriptor, error) {
+ // build a list of all files we will iterate over and read, but only one time
+ if d.files == nil {
+ d.files = []string{}
+ err := filepath.Walk(d.root, func(path string, info os.FileInfo, err error) error {
+ if info != nil && info.Mode().IsRegular() {
+ d.files = append(d.files, filepath.ToSlash(path))
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to list files")
+ }
+ }
+
+ // If done reading files then just return io.EOF
+ // errors for each NextFile() call
+ if d.idx >= len(d.files) {
+ return nil, io.EOF
+ }
+
+ fileName := d.files[d.idx]
+ d.idx++
+ fh, err := os.Open(fileName)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to open file %s", fileName)
+ }
+
+ // Trim off the root directory and return path as if chrooted
+ cleanedPath := strings.TrimPrefix(fileName, d.root)
+ if d.root == "." && filepath.Base(fileName) == ManifestExt {
+ cleanedPath = fileName
+ }
+
+ if !strings.HasPrefix(cleanedPath, "/") {
+ cleanedPath = "/" + cleanedPath
+ }
+
+ f := newDescriptor(path.Join(d.root, cleanedPath), cleanedPath, fh).withCloser(fh)
+ return f, nil
+}
+
+type tarballLoader struct {
+ baseURL string
+ r io.Reader
+ tr *tar.Reader
+ files []file
+ idx int
+}
+
+type file struct {
+ name string
+ reader io.Reader
+}
+
+// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead.
+func NewTarballLoader(r io.Reader) DirectoryLoader {
+ l := tarballLoader{
+ r: r,
+ }
+ return &l
+}
+
+// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads
+// files out of a gzipped tar archive. The file URLs will be prefixed
+// with the baseURL.
+func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader {
+ l := tarballLoader{
+ baseURL: strings.TrimSuffix(baseURL, "/"),
+ r: r,
+ }
+ return &l
+}
+
+// NextFile iterates to the next file in the directory tree
+// and returns a file Descriptor for the file.
+func (t *tarballLoader) NextFile() (*Descriptor, error) {
+ if t.tr == nil {
+ gr, err := gzip.NewReader(t.r)
+ if err != nil {
+ return nil, errors.Wrap(err, "archive read failed")
+ }
+
+ t.tr = tar.NewReader(gr)
+ }
+
+ if t.files == nil {
+ t.files = []file{}
+
+ for {
+ header, err := t.tr.Next()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Keep iterating on the archive until we find a normal file
+ if header.Typeflag == tar.TypeReg {
+ f := file{name: header.Name}
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, t.tr); err != nil {
+ return nil, errors.Wrapf(err, "failed to copy file %s", header.Name)
+ }
+
+ f.reader = &buf
+
+ t.files = append(t.files, f)
+ }
+ }
+ }
+
+ // If done reading files then just return io.EOF
+ // errors for each NextFile() call
+ if t.idx >= len(t.files) {
+ return nil, io.EOF
+ }
+
+ f := t.files[t.idx]
+ t.idx++
+
+ return newDescriptor(path.Join(t.baseURL, f.name), f.name, f.reader), nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/hash.go b/vendor/github.com/open-policy-agent/opa/bundle/hash.go
new file mode 100644
index 00000000..b7f582cb
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/hash.go
@@ -0,0 +1,141 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package bundle
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "sort"
+ "strings"
+)
+
+// HashingAlgorithm represents a subset of hashing algorithms implemented in Go
+type HashingAlgorithm string
+
+// Supported values for HashingAlgorithm
+const (
+ MD5 HashingAlgorithm = "MD5"
+ SHA1 HashingAlgorithm = "SHA-1"
+ SHA224 HashingAlgorithm = "SHA-224"
+ SHA256 HashingAlgorithm = "SHA-256"
+ SHA384 HashingAlgorithm = "SHA-384"
+ SHA512 HashingAlgorithm = "SHA-512"
+ SHA512224 HashingAlgorithm = "SHA-512-224"
+ SHA512256 HashingAlgorithm = "SHA-512-256"
+)
+
+// String returns the string representation of a HashingAlgorithm
+func (alg HashingAlgorithm) String() string {
+ return string(alg)
+}
+
+// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy
+type SignatureHasher interface {
+ HashFile(v interface{}) ([]byte, error)
+}
+
+type hasher struct {
+ h func() hash.Hash // hash function factory
+}
+
+// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm
+func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) {
+ h := &hasher{}
+
+ switch alg {
+ case MD5:
+ h.h = md5.New
+ case SHA1:
+ h.h = sha1.New
+ case SHA224:
+ h.h = sha256.New224
+ case SHA256:
+ h.h = sha256.New
+ case SHA384:
+ h.h = sha512.New384
+ case SHA512:
+ h.h = sha512.New
+ case SHA512224:
+ h.h = sha512.New512_224
+ case SHA512256:
+ h.h = sha512.New512_256
+ default:
+ return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg)
+ }
+
+ return h, nil
+}
+
+// HashFile hashes the file content, JSON or binary, both in golang native format.
+func (h *hasher) HashFile(v interface{}) ([]byte, error) {
+ hf := h.h()
+ walk(v, hf)
+ return hf.Sum(nil), nil
+}
+
+// walk hashes the file content, JSON or binary, both in golang native format.
+//
+// Computation for unstructured documents is a hash of the document.
+//
+// Computation for the types of structured JSON document is as follows:
+//
+// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }.
+//
+// array: Hash [, then digest of the value, then comma (between items) and finally ].
+func walk(v interface{}, h io.Writer) {
+
+ switch x := v.(type) {
+ case map[string]interface{}:
+ h.Write([]byte("{"))
+
+ var keys []string
+ for k := range x {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for i, key := range keys {
+ if i > 0 {
+ h.Write([]byte(","))
+ }
+
+ h.Write(encodePrimitive(key))
+ h.Write([]byte(":"))
+ walk(x[key], h)
+ }
+
+ h.Write([]byte("}"))
+ case []interface{}:
+ h.Write([]byte("["))
+
+ for i, e := range x {
+ if i > 0 {
+ h.Write([]byte(","))
+ }
+ walk(e, h)
+ }
+
+ h.Write([]byte("]"))
+ case []byte:
+ h.Write(x)
+ default:
+ h.Write(encodePrimitive(x))
+ }
+}
+
+func encodePrimitive(v interface{}) []byte {
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ encoder.SetEscapeHTML(false)
+ encoder.Encode(v)
+ return []byte(strings.Trim(string(buf.Bytes()), "\n"))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/keys.go b/vendor/github.com/open-policy-agent/opa/bundle/keys.go
new file mode 100644
index 00000000..e1b916d2
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/keys.go
@@ -0,0 +1,127 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle provide helpers that assist in creating the verification and signing key configuration
+package bundle
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+ "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
+ "github.com/open-policy-agent/opa/keys"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+const (
+ defaultTokenSigningAlg = "RS256"
+)
+
+// KeyConfig holds the keys used to sign or verify bundles and tokens
+// Moved to own package, alias kept for backwards compatibility
+type KeyConfig = keys.Config
+
+// VerificationConfig represents the key configuration used to verify a signed bundle
+type VerificationConfig struct {
+ PublicKeys map[string]*KeyConfig
+ KeyID string `json:"keyid"`
+ Scope string `json:"scope"`
+ Exclude []string `json:"exclude_files"`
+}
+
+// NewVerificationConfig return a new VerificationConfig
+func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig {
+ return &VerificationConfig{
+ PublicKeys: keys,
+ KeyID: id,
+ Scope: scope,
+ Exclude: exclude,
+ }
+}
+
+// ValidateAndInjectDefaults validates the config and inserts default values
+func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error {
+ vc.PublicKeys = keys
+
+ if vc.KeyID != "" {
+ found := false
+ for key := range keys {
+ if key == vc.KeyID {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Errorf("key id %s not found", vc.KeyID)
+ }
+ }
+ return nil
+}
+
+// GetPublicKey returns the public key corresponding to the given key id
+func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) {
+ var kc *KeyConfig
+ var ok bool
+
+ if kc, ok = vc.PublicKeys[id]; !ok {
+ return nil, fmt.Errorf("verification key corresponding to ID %v not found", id)
+ }
+ return kc, nil
+}
+
+// SigningConfig represents the key configuration used to generate a signed bundle
+type SigningConfig struct {
+ Key string
+ Algorithm string
+ ClaimsPath string
+}
+
+// NewSigningConfig return a new SigningConfig
+func NewSigningConfig(key, alg, claimsPath string) *SigningConfig {
+ if alg == "" {
+ alg = defaultTokenSigningAlg
+ }
+
+ return &SigningConfig{
+ Key: key,
+ Algorithm: alg,
+ ClaimsPath: claimsPath,
+ }
+}
+
+// GetPrivateKey returns the private key or secret from the signing config
+func (s *SigningConfig) GetPrivateKey() (interface{}, error) {
+ var priv string
+ if _, err := os.Stat(s.Key); err == nil {
+ bs, err := ioutil.ReadFile(s.Key)
+ if err != nil {
+ return nil, err
+ }
+ priv = string(bs)
+ } else if os.IsNotExist(err) {
+ priv = s.Key
+ } else {
+ return nil, err
+ }
+ return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm))
+}
+
+// GetClaims returns the claims by reading the file specified in the signing config
+func (s *SigningConfig) GetClaims() (map[string]interface{}, error) {
+ var claims map[string]interface{}
+
+ bs, err := ioutil.ReadFile(s.ClaimsPath)
+ if err != nil {
+ return claims, err
+ }
+
+ if err := util.UnmarshalJSON(bs, &claims); err != nil {
+ return claims, err
+ }
+ return claims, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/sign.go b/vendor/github.com/open-policy-agent/opa/bundle/sign.go
new file mode 100644
index 00000000..673d364f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/sign.go
@@ -0,0 +1,73 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle provide helpers that assist in the creating a signed bundle
+package bundle
+
+import (
+ "encoding/json"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+ "github.com/open-policy-agent/opa/internal/jwx/jws"
+)
+
+// GenerateSignedToken generates a signed token given the list of files to be
+// included in the payload and the bundle signing config. The keyID if non-empty,
+// represents the value for the "keyid" claim in the token
+func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) {
+ payload, err := generatePayload(files, sc, keyID)
+ if err != nil {
+ return "", err
+ }
+
+ privateKey, err := sc.GetPrivateKey()
+ if err != nil {
+ return "", err
+ }
+
+ var headers jws.StandardHeaders
+
+ if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil {
+ return "", err
+ }
+
+ if keyID != "" {
+ if err := headers.Set(jws.KeyIDKey, keyID); err != nil {
+ return "", err
+ }
+ }
+
+ hdr, err := json.Marshal(headers)
+ if err != nil {
+ return "", err
+ }
+
+ token, err := jws.SignLiteral(payload, jwa.SignatureAlgorithm(sc.Algorithm), privateKey, hdr)
+ if err != nil {
+ return "", err
+ }
+ return string(token), nil
+}
+
+func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) {
+ payload := make(map[string]interface{})
+ payload["files"] = files
+
+ if sc.ClaimsPath != "" {
+ claims, err := sc.GetClaims()
+ if err != nil {
+ return nil, err
+ }
+
+ for claim, value := range claims {
+ payload[claim] = value
+ }
+ } else {
+ if keyID != "" {
+ // keyid claim is deprecated but include it for backwards compatibility.
+ payload["keyid"] = keyID
+ }
+ }
+ return json.Marshal(payload)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/store.go b/vendor/github.com/open-policy-agent/opa/bundle/store.go
new file mode 100644
index 00000000..69a1bda6
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/store.go
@@ -0,0 +1,616 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package bundle
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/util"
+)
+
+// BundlesBasePath is the storage path used for storing bundle metadata
+var BundlesBasePath = storage.MustParsePath("/system/bundles")
+
+// Note: As needed these helpers could be memoized.
+
+// ManifestStoragePath is the storage path used for the given named bundle manifest.
+func ManifestStoragePath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest")
+}
+
+func namedBundlePath(name string) storage.Path {
+ return append(BundlesBasePath, name)
+}
+
+func rootsPath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest", "roots")
+}
+
+func revisionPath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest", "revision")
+}
+
+func wasmModulePath(name string) storage.Path {
+ return append(BundlesBasePath, name, "wasm")
+}
+
+func wasmEntrypointsPath(name string) storage.Path {
+ return append(BundlesBasePath, name, "manifest", "wasm")
+}
+
+// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored.
+func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) {
+ value, err := store.Read(ctx, txn, BundlesBasePath)
+ if err != nil {
+ return nil, err
+ }
+
+ bundleMap, ok := value.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("corrupt manifest roots")
+ }
+
+ bundles := make([]string, len(bundleMap))
+ idx := 0
+ for name := range bundleMap {
+ bundles[idx] = name
+ idx++
+ }
+ return bundles, nil
+}
+
+// WriteManifestToStore will write the manifest into the storage. This function is called when
+// the bundle is activated.
+func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error {
+ return write(ctx, store, txn, ManifestStoragePath(name), manifest)
+}
+
+func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error {
+ if err := util.RoundTrip(&value); err != nil {
+ return err
+ }
+
+ var dir []string
+ if len(path) > 1 {
+ dir = path[:len(path)-1]
+ }
+
+ if err := storage.MakeDir(ctx, store, txn, dir); err != nil {
+ return err
+ }
+
+ return store.Write(ctx, txn, storage.AddOp, path, value)
+}
+
+// EraseManifestFromStore will remove the manifest from storage. This function is called
+// when the bundle is deactivated.
+func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
+ path := namedBundlePath(name)
+ err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
+ if err != nil && !storage.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error {
+ basePath := wasmModulePath(name)
+ for _, wm := range b.WasmModules {
+ path := append(basePath, wm.Path)
+ err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error {
+ path := wasmModulePath(name)
+
+ err := store.Write(ctx, txn, storage.RemoveOp, path, nil)
+ if err != nil && !storage.IsNotFound(err) {
+ return err
+ }
+ return nil
+}
+
+// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store.
+func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) {
+ path := wasmEntrypointsPath(name)
+ value, err := store.Read(ctx, txn, path)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := json.Marshal(value)
+ if err != nil {
+ return nil, fmt.Errorf("corrupt wasm manifest data")
+ }
+
+ var wasmMetadata []WasmResolver
+
+ err = util.UnmarshalJSON(bs, &wasmMetadata)
+ if err != nil {
+ return nil, fmt.Errorf("corrupt wasm manifest data")
+ }
+
+ return wasmMetadata, nil
+}
+
+// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store.
+func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) {
+ path := wasmModulePath(name)
+ value, err := store.Read(ctx, txn, path)
+ if err != nil {
+ return nil, err
+ }
+
+ encodedModules, ok := value.(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("corrupt wasm modules")
+ }
+
+ rawModules := map[string][]byte{}
+ for path, enc := range encodedModules {
+ encStr, ok := enc.(string)
+ if !ok {
+ return nil, fmt.Errorf("corrupt wasm modules")
+ }
+ bs, err := base64.StdEncoding.DecodeString(encStr)
+ if err != nil {
+ return nil, err
+ }
+ rawModules[path] = bs
+ }
+ return rawModules, nil
+}
+
+// ReadBundleRootsFromStore returns the roots in the specified bundle.
+// If the bundle is not activated, this function will return
+// storage NotFound error.
+func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) {
+ value, err := store.Read(ctx, txn, rootsPath(name))
+ if err != nil {
+ return nil, err
+ }
+
+ sl, ok := value.([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("corrupt manifest roots")
+ }
+
+ roots := make([]string, len(sl))
+
+ for i := range sl {
+ roots[i], ok = sl[i].(string)
+ if !ok {
+ return nil, fmt.Errorf("corrupt manifest root")
+ }
+ }
+
+ return roots, nil
+}
+
+// ReadBundleRevisionFromStore returns the revision in the specified bundle.
+// If the bundle is not activated, this function will return
+// storage NotFound error.
+func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) {
+ return readRevisionFromStore(ctx, store, txn, revisionPath(name))
+}
+
+func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) {
+ value, err := store.Read(ctx, txn, path)
+ if err != nil {
+ return "", err
+ }
+
+ str, ok := value.(string)
+ if !ok {
+ return "", fmt.Errorf("corrupt manifest revision")
+ }
+
+ return str, nil
+}
+
+// ActivateOpts defines options for the Activate API call.
+type ActivateOpts struct {
+ Ctx context.Context
+ Store storage.Store
+ Txn storage.Transaction
+ TxnCtx *storage.Context
+ Compiler *ast.Compiler
+ Metrics metrics.Metrics
+ Bundles map[string]*Bundle // Optional
+ ExtraModules map[string]*ast.Module // Optional
+
+ legacy bool
+}
+
+// Activate the bundle(s) by loading into the given Store. This will load policies, data, and record
+// the manifest in storage. The compiler provided will have had the polices compiled on it.
+func Activate(opts *ActivateOpts) error {
+ opts.legacy = false
+ return activateBundles(opts)
+}
+
+// DeactivateOpts defines options for the Deactivate API call
+type DeactivateOpts struct {
+ Ctx context.Context
+ Store storage.Store
+ Txn storage.Transaction
+ BundleNames map[string]struct{}
+}
+
+// Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store.
+func Deactivate(opts *DeactivateOpts) error {
+ erase := map[string]struct{}{}
+ for name := range opts.BundleNames {
+ if roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name); err == nil {
+ for _, root := range roots {
+ erase[root] = struct{}{}
+ }
+ } else if !storage.IsNotFound(err) {
+ return err
+ }
+ }
+ _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.BundleNames, erase)
+ return err
+}
+
+func activateBundles(opts *ActivateOpts) error {
+
+ // Build collections of bundle names, modules, and roots to erase
+ erase := map[string]struct{}{}
+ names := map[string]struct{}{}
+
+ for name, b := range opts.Bundles {
+ names[name] = struct{}{}
+
+ if roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name); err == nil {
+ for _, root := range roots {
+ erase[root] = struct{}{}
+ }
+ } else if !storage.IsNotFound(err) {
+ return err
+ }
+
+ // Erase data at new roots to prepare for writing the new data
+ for _, root := range *b.Manifest.Roots {
+ erase[root] = struct{}{}
+ }
+ }
+
+ // Before changing anything make sure the roots don't collide with any
+ // other bundles that already are activated or other bundles being activated.
+ err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles)
+ if err != nil {
+ return err
+ }
+
+ // Erase data and policies at new + old roots, and remove the old
+ // manifests before activating a new bundles.
+ remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, names, erase)
+ if err != nil {
+ return err
+ }
+
+ for _, b := range opts.Bundles {
+ // Write data from each new bundle into the store. Only write under the
+ // roots contained in their manifest. This should be done *before* the
+ // policies so that path conflict checks can occur.
+ if err := writeData(opts.Ctx, opts.Store, opts.Txn, *b.Manifest.Roots, b.Data); err != nil {
+ return err
+ }
+ }
+
+ // Write and compile the modules all at once to avoid having to re-do work.
+ remainingAndExtra := make(map[string]*ast.Module)
+ for name, mod := range remaining {
+ remainingAndExtra[name] = mod
+ }
+ for name, mod := range opts.ExtraModules {
+ remainingAndExtra[name] = mod
+ }
+
+ err = writeModules(opts.Ctx, opts.Store, opts.Txn, opts.Compiler, opts.Metrics, opts.Bundles, remainingAndExtra, opts.legacy)
+ if err != nil {
+ return err
+ }
+
+ for name, b := range opts.Bundles {
+ // Always write manifests to the named location. If the plugin is in the older style config
+ // then also write to the old legacy unnamed location.
+ if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, b.Manifest); err != nil {
+ return err
+ }
+ if opts.legacy {
+ if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, b.Manifest); err != nil {
+ return err
+ }
+ }
+
+ if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// erase bundles by name and roots. This will clear all policies and data at its roots and remove its
+// manifest from storage.
+func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) {
+
+ if err := eraseData(ctx, store, txn, roots); err != nil {
+ return nil, err
+ }
+
+ remaining, err := erasePolicies(ctx, store, txn, roots)
+ if err != nil {
+ return nil, err
+ }
+
+ for name := range names {
+ if err := EraseManifestFromStore(ctx, store, txn, name); err != nil && !storage.IsNotFound(err) {
+ return nil, err
+ }
+
+ if err := LegacyEraseManifestFromStore(ctx, store, txn); err != nil && !storage.IsNotFound(err) {
+ return nil, err
+ }
+
+ if err := eraseWasmModulesFromStore(ctx, store, txn, name); err != nil && !storage.IsNotFound(err) {
+ return nil, err
+ }
+ }
+
+ return remaining, nil
+}
+
+func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error {
+ for root := range roots {
+ path, ok := storage.ParsePathEscaped("/" + root)
+ if !ok {
+ return fmt.Errorf("manifest root path invalid: %v", root)
+ }
+ if len(path) > 0 {
+ if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); err != nil {
+ if !storage.IsNotFound(err) {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) (map[string]*ast.Module, error) {
+
+ ids, err := store.ListPolicies(ctx, txn)
+ if err != nil {
+ return nil, err
+ }
+
+ remaining := map[string]*ast.Module{}
+
+ for _, id := range ids {
+ bs, err := store.GetPolicy(ctx, txn, id)
+ if err != nil {
+ return nil, err
+ }
+ module, err := ast.ParseModule(id, string(bs))
+ if err != nil {
+ return nil, err
+ }
+ path, err := module.Package.Path.Ptr()
+ if err != nil {
+ return nil, err
+ }
+ deleted := false
+ for root := range roots {
+ if strings.HasPrefix(path, root) {
+ if err := store.DeletePolicy(ctx, txn, id); err != nil {
+ return nil, err
+ }
+ deleted = true
+ break
+ }
+ }
+ if !deleted {
+ remaining[id] = module
+ }
+ }
+
+ return remaining, nil
+}
+
+func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error {
+ for _, root := range roots {
+ path, ok := storage.ParsePathEscaped("/" + root)
+ if !ok {
+ return fmt.Errorf("manifest root path invalid: %v", root)
+ }
+ if value, ok := lookup(path, data); ok {
+ if len(path) > 0 {
+ if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
+ return err
+ }
+ }
+ if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error {
+
+ m.Timer(metrics.RegoModuleCompile).Start()
+ defer m.Timer(metrics.RegoModuleCompile).Stop()
+
+ modules := map[string]*ast.Module{}
+
+ // preserve any modules already on the compiler
+ for name, module := range compiler.Modules {
+ modules[name] = module
+ }
+
+ // preserve any modules passed in from the store
+ for name, module := range extraModules {
+ modules[name] = module
+ }
+
+ // include all the new bundle modules
+ for bundleName, b := range bundles {
+ if legacy {
+ for _, mf := range b.Modules {
+ modules[mf.Path] = mf.Parsed
+ }
+ } else {
+ for name, module := range b.ParsedModules(bundleName) {
+ modules[name] = module
+ }
+ }
+ }
+
+ if compiler.Compile(modules); compiler.Failed() {
+ return compiler.Errors
+ }
+ for bundleName, b := range bundles {
+ for _, mf := range b.Modules {
+ var path string
+
+ // For backwards compatibility, in legacy mode, upsert policies to
+ // the unprefixed path.
+ if legacy {
+ path = mf.Path
+ } else {
+ path = modulePathWithPrefix(bundleName, mf.Path)
+ }
+
+ if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) {
+ if len(path) == 0 {
+ return data, true
+ }
+ for i := 0; i < len(path)-1; i++ {
+ value, ok := data[path[i]]
+ if !ok {
+ return nil, false
+ }
+ obj, ok := value.(map[string]interface{})
+ if !ok {
+ return nil, false
+ }
+ data = obj
+ }
+ value, ok := data[path[len(path)-1]]
+ return value, ok
+}
+
+func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error {
+ collisions := map[string][]string{}
+ allBundles, err := ReadBundleNamesFromStore(ctx, store, txn)
+ if err != nil && !storage.IsNotFound(err) {
+ return err
+ }
+
+ allRoots := map[string][]string{}
+
+ // Build a map of roots for existing bundles already in the system
+ for _, name := range allBundles {
+ roots, err := ReadBundleRootsFromStore(ctx, store, txn, name)
+ if err != nil && !storage.IsNotFound(err) {
+ return err
+ }
+ allRoots[name] = roots
+ }
+
+ // Add in any bundles that are being activated, overwrite existing roots
+ // with new ones where bundles are in both groups.
+ for name, bundle := range bundles {
+ allRoots[name] = *bundle.Manifest.Roots
+ }
+
+ // Now check for each new bundle if it conflicts with any of the others
+ for name, bundle := range bundles {
+ for otherBundle, otherRoots := range allRoots {
+ if name == otherBundle {
+ // Skip the current bundle being checked
+ continue
+ }
+
+ // Compare the "new" roots with other existing (or a different bundles new roots)
+ for _, newRoot := range *bundle.Manifest.Roots {
+ for _, otherRoot := range otherRoots {
+ if RootPathsOverlap(newRoot, otherRoot) {
+ collisions[otherBundle] = append(collisions[otherBundle], newRoot)
+ }
+ }
+ }
+ }
+ }
+
+ if len(collisions) > 0 {
+ var bundleNames []string
+ for name := range collisions {
+ bundleNames = append(bundleNames, name)
+ }
+ return fmt.Errorf("detected overlapping roots in bundle manifest with: %s", bundleNames)
+ }
+ return nil
+}
+
+// Helpers for the older single (unnamed) bundle style manifest storage.
+
+// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored.
+// Deprecated: Use ManifestStoragePath and named bundles instead.
+var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest")
+var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision")
+
+// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location.
+// Deprecated: Use WriteManifestToStore and named bundles instead.
+func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error {
+ return write(ctx, store, txn, legacyManifestStoragePath, manifest)
+}
+
+// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location.
+// Deprecated: Use WriteManifestToStore and named bundles instead.
+func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error {
+ err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location.
+// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead.
+func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) {
+ return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath)
+}
+
+// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location.
+// Deprecated: Use Activate with named bundles instead.
+func ActivateLegacy(opts *ActivateOpts) error {
+ opts.legacy = true
+ return activateBundles(opts)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/bundle/verify.go b/vendor/github.com/open-policy-agent/opa/bundle/verify.go
new file mode 100644
index 00000000..d26b8e17
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/bundle/verify.go
@@ -0,0 +1,173 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package bundle provide helpers that assist in the bundle signature verification process
+package bundle
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+ "github.com/open-policy-agent/opa/internal/jwx/jws"
+ "github.com/open-policy-agent/opa/internal/jwx/jws/verify"
+ "github.com/open-policy-agent/opa/util"
+
+ "github.com/pkg/errors"
+)
+
+// VerifyBundleSignature verifies the bundle signature using the given public keys or secret.
+// If a signature is verified, it keeps track of the files specified in the JWT payload
+func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
+ files := make(map[string]FileInfo)
+
+ if len(sc.Signatures) == 0 {
+ return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)")
+ }
+
+ if len(sc.Signatures) > 1 {
+ return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)")
+ }
+
+ for _, token := range sc.Signatures {
+ payload, err := verifyJWTSignature(token, bvc)
+ if err != nil {
+ return files, err
+ }
+
+ for _, file := range payload.Files {
+ files[file.Name] = file
+ }
+ }
+ return files, nil
+}
+
+func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) {
+ // decode JWT to check if the header specifies the key to use and/or if claims have the scope.
+
+ parts, err := jws.SplitCompact(token)
+ if err != nil {
+ return nil, err
+ }
+
+ var decodedHeader []byte
+ if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil {
+ return nil, errors.Wrap(err, "failed to base64 decode JWT headers")
+ }
+
+ var hdr jws.StandardHeaders
+ if err := json.Unmarshal(decodedHeader, &hdr); err != nil {
+ return nil, errors.Wrap(err, "failed to parse JWT headers")
+ }
+
+ payload, err := base64.RawURLEncoding.DecodeString(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ var ds DecodedSignature
+ if err := json.Unmarshal(payload, &ds); err != nil {
+ return nil, err
+ }
+
+ // check for the id of the key to use for JWT signature verification
+ // first in the OPA config. If not found, then check the JWT kid.
+ keyID := bvc.KeyID
+ if keyID == "" {
+ keyID = hdr.KeyID
+ }
+ if keyID == "" {
+ // If header has no key id, check the deprecated key claim.
+ keyID = ds.KeyID
+ }
+
+ if keyID == "" {
+ return nil, fmt.Errorf("verification key ID is empty")
+ }
+
+ // now that we have the keyID, fetch the actual key
+ keyConfig, err := bvc.GetPublicKey(keyID)
+ if err != nil {
+ return nil, err
+ }
+
+ // verify JWT signature
+ alg := jwa.SignatureAlgorithm(keyConfig.Algorithm)
+ key, err := verify.GetSigningKey(keyConfig.Key, alg)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = jws.Verify([]byte(token), alg, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // verify the scope
+ scope := bvc.Scope
+ if scope == "" {
+ scope = keyConfig.Scope
+ }
+
+ if ds.Scope != scope {
+ return nil, fmt.Errorf("scope mismatch")
+ }
+ return &ds, nil
+}
+
+// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature
+func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error {
+ var file FileInfo
+ var ok bool
+
+ if file, ok = files[path]; !ok {
+ return fmt.Errorf("file %v not included in bundle signature", path)
+ }
+
+ if file.Algorithm == "" {
+ return fmt.Errorf("no hashing algorithm provided for file %v", path)
+ }
+
+ hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm))
+ if err != nil {
+ return err
+ }
+
+ // hash the file content
+ // For unstructured files, hash the byte stream of the file
+ // For structured files, read the byte stream and parse into a JSON structure;
+ // then recursively order the fields of all objects alphabetically and then apply
+ // the hash function to result to compute the hash. This ensures that the digital signature is
+ // independent of whitespace and other non-semantic JSON features.
+ var value interface{}
+ if IsStructuredDoc(path) {
+ err := util.Unmarshal(data.Bytes(), &value)
+ if err != nil {
+ return err
+ }
+ } else {
+ value = data.Bytes()
+ }
+
+ bs, err := hash.HashFile(value)
+ if err != nil {
+ return err
+ }
+
+ // compare file hash with same file in the JWT payloads
+ fb, err := hex.DecodeString(file.Hash)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(fb, bs) {
+ return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs)
+ }
+
+ delete(files, path)
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/format/format.go b/vendor/github.com/open-policy-agent/opa/format/format.go
new file mode 100644
index 00000000..3baa3b70
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/format/format.go
@@ -0,0 +1,1108 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package format implements formatting of Rego source files.
+package format
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "sort"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// Source formats a Rego source file. The bytes provided must describe a complete
+// Rego module. If they don't, Source will return an error resulting from the attempt
+// to parse the bytes.
+func Source(filename string, src []byte) ([]byte, error) {
+ module, err := ast.ParseModule(filename, string(src))
+ if err != nil {
+ return nil, err
+ }
+ formatted, err := Ast(module)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", filename, err)
+ }
+ return formatted, nil
+}
+
+// MustAst is a helper function to format a Rego AST element. If any errors
+// occurs this function will panic. This is mostly used for test
+func MustAst(x interface{}) []byte {
+ bs, err := Ast(x)
+ if err != nil {
+ panic(err)
+ }
+ return bs
+}
+
+// Ast formats a Rego AST element. If the passed value is not a valid AST
+// element, Ast returns nil and an error. If AST nodes are missing locations
+// an arbitrary location will be used.
+func Ast(x interface{}) (formatted []byte, err error) {
+
+ // The node has to be deep copied because it may be mutated below. Alternatively,
+ // we could avoid the copy by checking if mtuation will occur first. For now,
+ // since format is not latency sensitive, just deep copy in all cases.
+ x = ast.Copy(x)
+
+ wildcards := map[ast.Var]*ast.Term{}
+
+ // Preprocess the AST. Set any required defaults and calculate
+ // values required for printing the formatted output.
+ ast.WalkNodes(x, func(x ast.Node) bool {
+ switch n := x.(type) {
+ case ast.Body:
+ if len(n) == 0 {
+ return false
+ }
+ case *ast.Term:
+ unmangleWildcardVar(wildcards, n)
+ }
+ if x.Loc() == nil {
+ x.SetLoc(defaultLocation(x))
+ }
+ return false
+ })
+
+ w := &writer{indent: "\t"}
+ switch x := x.(type) {
+ case *ast.Module:
+ w.writeModule(x)
+ case *ast.Package:
+ w.writePackage(x, nil)
+ case *ast.Import:
+ w.writeImports([]*ast.Import{x}, nil)
+ case *ast.Rule:
+ w.writeRule(x, false, nil)
+ case *ast.Head:
+ w.writeHead(x, false, false, nil)
+ case ast.Body:
+ w.writeBody(x, nil)
+ case *ast.Expr:
+ w.writeExpr(x, nil)
+ case *ast.With:
+ w.writeWith(x, nil)
+ case *ast.Term:
+ w.writeTerm(x, nil)
+ case ast.Value:
+ w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil)
+ case *ast.Comment:
+ w.writeComments([]*ast.Comment{x})
+ default:
+ return nil, fmt.Errorf("not an ast element: %v", x)
+ }
+
+ return squashTrailingNewlines(w.buf.Bytes()), nil
+}
+
+func unmangleWildcardVar(wildcards map[ast.Var]*ast.Term, n *ast.Term) {
+
+ v, ok := n.Value.(ast.Var)
+ if !ok || !v.IsWildcard() {
+ return
+ }
+
+ first, ok := wildcards[v]
+ if !ok {
+ wildcards[v] = n
+ return
+ }
+
+ w := v[len(ast.WildcardPrefix):]
+
+ // Prepend an underscore to ensure the variable will parse.
+ if len(w) == 0 || w[0] != '_' {
+ w = "_" + w
+ }
+
+ if first != nil {
+ first.Value = w
+ wildcards[v] = nil
+ }
+
+ n.Value = w
+}
+
+func squashTrailingNewlines(bs []byte) []byte {
+ if bytes.HasSuffix(bs, []byte("\n")) {
+ return append(bytes.TrimRight(bs, "\n"), '\n')
+ }
+ return bs
+}
+
+func defaultLocation(x ast.Node) *ast.Location {
+ return ast.NewLocation([]byte(x.String()), "", 1, 1)
+}
+
+type writer struct {
+ buf bytes.Buffer
+
+ indent string
+ level int
+ inline bool
+ beforeEnd *ast.Comment
+ delay bool
+ wildcardNames map[string]string
+}
+
+func (w *writer) writeModule(module *ast.Module) {
+ var pkg *ast.Package
+ var others []interface{}
+ var comments []*ast.Comment
+ visitor := ast.NewGenericVisitor(func(x interface{}) bool {
+ switch x := x.(type) {
+ case *ast.Comment:
+ comments = append(comments, x)
+ return true
+ case *ast.Import, *ast.Rule:
+ others = append(others, x)
+ return true
+ case *ast.Package:
+ pkg = x
+ return true
+ default:
+ return false
+ }
+ })
+ visitor.Walk(module)
+
+ sort.Slice(comments, func(i, j int) bool {
+ return locLess(comments[i], comments[j])
+ })
+
+ // XXX: The parser currently duplicates comments for some reason, so we need
+ // to remove duplicates here.
+ comments = dedupComments(comments)
+ sort.Slice(others, func(i, j int) bool {
+ return locLess(others[i], others[j])
+ })
+
+ comments = w.writePackage(pkg, comments)
+ var imports []*ast.Import
+ var rules []*ast.Rule
+ for len(others) > 0 {
+ imports, others = gatherImports(others)
+ comments = w.writeImports(imports, comments)
+ rules, others = gatherRules(others)
+ comments = w.writeRules(rules, comments)
+ }
+
+ for i, c := range comments {
+ w.writeLine(c.String())
+ if i == len(comments)-1 {
+ w.write("\n")
+ }
+ }
+}
+
+func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) []*ast.Comment {
+ comments = w.insertComments(comments, pkg.Location)
+
+ w.startLine()
+ w.write(pkg.String())
+ w.blankLine()
+
+ return comments
+}
+
+func (w *writer) writeComments(comments []*ast.Comment) {
+ for i := 0; i < len(comments); i++ {
+ if i > 0 && locCmp(comments[i], comments[i-1]) > 1 {
+ w.blankLine()
+ }
+ w.writeLine(comments[i].String())
+ }
+}
+
+func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) []*ast.Comment {
+ for _, rule := range rules {
+ comments = w.insertComments(comments, rule.Location)
+ comments = w.writeRule(rule, false, comments)
+ w.blankLine()
+ }
+ return comments
+}
+
+func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) []*ast.Comment {
+ if rule == nil {
+ return comments
+ }
+
+ if !isElse {
+ w.startLine()
+ }
+
+ if rule.Default {
+ w.write("default ")
+ }
+
+ // OPA transforms lone bodies like `foo = {"a": "b"}` into rules of the form
+ // `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation
+ // in the formatted code instead of expanding the bodies into rules, so we
+ // pretend that the rule has no body in this case.
+ isExpandedConst := rule.Body.Equal(ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))) && rule.Else == nil
+
+ comments = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments)
+
+ if (len(rule.Body) == 0 || isExpandedConst) && !isElse {
+ w.endLine()
+ return comments
+ }
+
+ w.write(" {")
+ w.endLine()
+ w.up()
+
+ comments = w.writeBody(rule.Body, comments)
+
+ var close *ast.Location
+
+ if len(rule.Head.Args) > 0 {
+ close = closingLoc('(', ')', '{', '}', rule.Location)
+ } else {
+ close = closingLoc('[', ']', '{', '}', rule.Location)
+ }
+
+ comments = w.insertComments(comments, close)
+
+ w.down()
+ w.startLine()
+ w.write("}")
+ if rule.Else != nil {
+ comments = w.writeElse(rule, comments)
+ }
+ return comments
+}
+
+func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comment {
+ // If there was nothing else on the line before the "else" starts
+ // then preserve this style of else block, otherwise it will be
+ // started as an "inline" else eg:
+ //
+ // p {
+ // ...
+ // }
+ //
+ // else {
+ // ...
+ // }
+ //
+ // versus
+ //
+ // p {
+ // ...
+ // } else {
+ // ...
+ // }
+ //
+ // Note: This doesn't use the `close` as it currently isn't accurate for all
+ // types of values. Checking the actual line text is the most consistent approach.
+ wasInline := false
+ ruleLines := bytes.Split(rule.Location.Text, []byte("\n"))
+ relativeElseRow := rule.Else.Location.Row - rule.Location.Row
+ if relativeElseRow > 0 && relativeElseRow < len(ruleLines) {
+ elseLine := ruleLines[relativeElseRow]
+ if !bytes.HasPrefix(bytes.TrimSpace(elseLine), []byte("else")) {
+ wasInline = true
+ }
+ }
+
+ // If there are any comments between the closing brace of the previous rule and the start
+ // of the else block we will always insert a new blank line between them.
+ hasCommentAbove := len(comments) > 0 && comments[0].Location.Row-rule.Else.Head.Location.Row < 0 || w.beforeEnd != nil
+
+ if !hasCommentAbove && wasInline {
+ w.write(" ")
+ } else {
+ w.blankLine()
+ w.startLine()
+ }
+
+ rule.Else.Head.Name = ast.Var("else")
+ rule.Else.Head.Args = nil
+ comments = w.insertComments(comments, rule.Else.Head.Location)
+
+ if hasCommentAbove && !wasInline {
+ // The comments would have ended the line, be sure to start one again
+ // before writing the rest of the "else" rule.
+ w.startLine()
+ }
+
+ // For backwards compatibility adjust the rule head value location
+ // TODO: Refactor the logic for inserting comments, or special
+ // case comments in a rule head value so this can be removed
+ if rule.Else.Head.Value != nil {
+ rule.Else.Head.Value.Location = rule.Else.Head.Location
+ }
+
+ return w.writeRule(rule.Else, true, comments)
+}
+
+func (w *writer) writeHead(head *ast.Head, isDefault bool, isExpandedConst bool, comments []*ast.Comment) []*ast.Comment {
+ w.write(head.Name.String())
+ if len(head.Args) > 0 {
+ w.write("(")
+ var args []interface{}
+ for _, arg := range head.Args {
+ args = append(args, arg)
+ }
+ comments = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter())
+ w.write(")")
+ }
+ if head.Key != nil {
+ w.write("[")
+ comments = w.writeTerm(head.Key, comments)
+ w.write("]")
+ }
+ if head.Value != nil && (head.Key != nil || ast.Compare(head.Value, ast.BooleanTerm(true)) != 0 || isExpandedConst || isDefault) {
+ if head.Assign {
+ w.write(" := ")
+ } else {
+ w.write(" = ")
+ }
+ comments = w.writeTerm(head.Value, comments)
+ }
+ return comments
+}
+
+func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) []*ast.Comment {
+ before, at, comments := partitionComments(comments, loc)
+ w.writeComments(before)
+ if len(before) > 0 && loc.Row-before[len(before)-1].Location.Row > 1 {
+ w.blankLine()
+ }
+
+ w.beforeLineEnd(at)
+ return comments
+}
+
+func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Comment {
+ comments = w.insertComments(comments, body.Loc())
+ offset := 0
+ for i, expr := range body {
+ if i > 0 && expr.Location.Row-body[i-1].Location.Row-offset > 1 {
+ w.blankLine()
+ }
+ w.startLine()
+
+ comments = w.writeExpr(expr, comments)
+ w.endLine()
+ }
+ return comments
+}
+
+func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment {
+ comments = w.insertComments(comments, expr.Location)
+ if !w.inline {
+ w.startLine()
+ }
+
+ if expr.Negated {
+ w.write("not ")
+ }
+
+ switch t := expr.Terms.(type) {
+ case *ast.SomeDecl:
+ comments = w.writeSomeDecl(t, comments)
+ case []*ast.Term:
+ comments = w.writeFunctionCall(expr, comments)
+ case *ast.Term:
+ comments = w.writeTerm(t, comments)
+ }
+
+ var indented bool
+ for i, with := range expr.With {
+ if i > 0 && with.Location.Row-expr.With[i-1].Location.Row > 0 {
+ if !indented {
+ indented = true
+
+ w.up()
+ defer w.down()
+ }
+ w.endLine()
+ w.startLine()
+ }
+ comments = w.writeWith(with, comments)
+ }
+
+ return comments
+}
+
+func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*ast.Comment {
+ comments = w.insertComments(comments, decl.Location)
+ w.write("some ")
+
+ row := decl.Location.Row
+
+ for i, term := range decl.Symbols {
+
+ if term.Location.Row > row {
+ w.endLine()
+ w.startLine()
+ w.write(w.indent)
+ row = term.Location.Row
+ } else if i > 0 {
+ w.write(" ")
+ }
+
+ comments = w.writeTerm(term, comments)
+
+ if i < len(decl.Symbols)-1 {
+ w.write(",")
+ }
+ }
+
+ return comments
+}
+
+func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment {
+
+ terms := expr.Terms.([]*ast.Term)
+
+ bi, ok := ast.BuiltinMap[terms[0].Value.String()]
+ if !ok || bi.Infix == "" {
+ return w.writeFunctionCallPlain(terms, comments)
+ }
+
+ numDeclArgs := len(bi.Decl.Args())
+ numCallArgs := len(terms) - 1
+
+ if numCallArgs == numDeclArgs {
+ // Print infix where result is unassigned (e.g., x != y)
+ comments = w.writeTerm(terms[1], comments)
+ w.write(" " + string(bi.Infix) + " ")
+ return w.writeTerm(terms[2], comments)
+ } else if numCallArgs == numDeclArgs+1 {
+ // Print infix where result is assigned (e.g., z = x + y)
+ comments = w.writeTerm(terms[3], comments)
+ w.write(" " + ast.Equality.Infix + " ")
+ comments = w.writeTerm(terms[1], comments)
+ w.write(" " + bi.Infix + " ")
+ comments = w.writeTerm(terms[2], comments)
+ return comments
+ }
+
+ return w.writeFunctionCallPlain(terms, comments)
+}
+
+func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) []*ast.Comment {
+ w.write(string(terms[0].String()) + "(")
+ if len(terms) > 1 {
+ for _, v := range terms[1 : len(terms)-1] {
+ comments = w.writeTerm(v, comments)
+ w.write(", ")
+ }
+ comments = w.writeTerm(terms[len(terms)-1], comments)
+ }
+ w.write(")")
+ return comments
+}
+
+func (w *writer) writeWith(with *ast.With, comments []*ast.Comment) []*ast.Comment {
+ comments = w.insertComments(comments, with.Location)
+ w.write(" with ")
+ comments = w.writeTerm(with.Target, comments)
+ w.write(" as ")
+ return w.writeTerm(with.Value, comments)
+}
+
+func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) []*ast.Comment {
+ return w.writeTermParens(false, term, comments)
+}
+
+func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) []*ast.Comment {
+ comments = w.insertComments(comments, term.Location)
+ if !w.inline {
+ w.startLine()
+ }
+
+ switch x := term.Value.(type) {
+ case ast.Ref:
+ w.writeRef(x)
+ case ast.Object:
+ comments = w.writeObject(x, term.Location, comments)
+ case *ast.Array:
+ comments = w.writeArray(x, term.Location, comments)
+ case ast.Set:
+ comments = w.writeSet(x, term.Location, comments)
+ case *ast.ArrayComprehension:
+ comments = w.writeArrayComprehension(x, term.Location, comments)
+ case *ast.ObjectComprehension:
+ comments = w.writeObjectComprehension(x, term.Location, comments)
+ case *ast.SetComprehension:
+ comments = w.writeSetComprehension(x, term.Location, comments)
+ case ast.String:
+ if term.Location.Text[0] == '`' {
+ // To preserve raw strings, we need to output the original text,
+ // not what x.String() would give us.
+ w.write(string(term.Location.Text))
+ } else {
+ w.write(x.String())
+ }
+ case ast.Var:
+ w.write(w.formatVar(x))
+ case ast.Call:
+ comments = w.writeCall(parens, x, term.Location, comments)
+ case fmt.Stringer:
+ w.write(x.String())
+ }
+
+ if !w.inline {
+ w.startLine()
+ }
+ return comments
+}
+
+func (w *writer) writeRef(x ast.Ref) {
+ if len(x) > 0 {
+ w.writeTerm(x[0], nil)
+ path := x[1:]
+ for _, p := range path {
+ switch p := p.Value.(type) {
+ case ast.String:
+ w.writeRefStringPath(p)
+ case ast.Var:
+ w.writeBracketed(w.formatVar(p))
+ default:
+ w.writeBracketed(p.String())
+ }
+ }
+ }
+}
+
+func (w *writer) writeBracketed(str string) {
+ w.write("[" + str + "]")
+}
+
+var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
+
+func (w *writer) writeRefStringPath(s ast.String) {
+ str := string(s)
+ if varRegexp.MatchString(str) && !ast.IsKeyword(str) {
+ w.write("." + str)
+ } else {
+ w.writeBracketed(s.String())
+ }
+}
+
+func (w *writer) formatVar(v ast.Var) string {
+ if v.IsWildcard() {
+ return ast.Wildcard.String()
+ }
+ return v.String()
+}
+
+func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+
+ bi, ok := ast.BuiltinMap[x[0].String()]
+ if !ok || bi.Infix == "" {
+ return w.writeFunctionCallPlain([]*ast.Term(x), comments)
+ }
+
+ // TODO(tsandall): improve to consider precedence?
+ if parens {
+ w.write("(")
+ }
+ comments = w.writeTermParens(true, x[1], comments)
+ w.write(" " + bi.Infix + " ")
+ comments = w.writeTermParens(true, x[2], comments)
+ if parens {
+ w.write(")")
+ }
+
+ return comments
+}
+
+func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+ w.write("{")
+ defer w.write("}")
+
+ var s []interface{}
+ obj.Foreach(func(k, v *ast.Term) {
+ s = append(s, ast.Item(k, v))
+ })
+ return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.objectWriter())
+}
+
+func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+ w.write("[")
+ defer w.write("]")
+
+ var s []interface{}
+ arr.Foreach(func(t *ast.Term) {
+ s = append(s, t)
+ })
+ return w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter())
+}
+
+func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+
+ if set.Len() == 0 {
+ w.write("set()")
+ return w.insertComments(comments, closingLoc(0, 0, '(', ')', loc))
+ }
+
+ w.write("{")
+ defer w.write("}")
+
+ var s []interface{}
+ set.Foreach(func(t *ast.Term) {
+ s = append(s, t)
+ })
+ return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter())
+}
+
+func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+ w.write("[")
+ defer w.write("]")
+
+ return w.writeComprehension('[', ']', arr.Term, arr.Body, loc, comments)
+}
+
+func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+ w.write("{")
+ defer w.write("}")
+
+ return w.writeComprehension('{', '}', set.Term, set.Body, loc, comments)
+}
+
+func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+ w.write("{")
+ defer w.write("}")
+
+ object.Value.Location = object.Key.Location // Ensure the value is not written on the next line.
+ if object.Key.Location.Row-loc.Row > 1 {
+ w.endLine()
+ w.startLine()
+ }
+
+ comments = w.writeTerm(object.Key, comments)
+ w.write(": ")
+ return w.writeComprehension('{', '}', object.Value, object.Body, loc, comments)
+}
+
+func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) []*ast.Comment {
+ if term.Location.Row-loc.Row > 1 {
+ w.endLine()
+ w.startLine()
+ }
+
+ comments = w.writeTerm(term, comments)
+ w.write(" |")
+
+ return w.writeComprehensionBody(open, close, body, term.Location, loc, comments)
+}
+
+func (w *writer) writeComprehensionBody(open, close byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) []*ast.Comment {
+ var exprs []interface{}
+ for _, expr := range body {
+ exprs = append(exprs, expr)
+ }
+ lines := groupIterable(exprs, term)
+
+ if body.Loc().Row-term.Row > 0 || len(lines) > 1 {
+ w.endLine()
+ w.up()
+ defer w.startLine()
+ defer w.down()
+
+ comments = w.writeBody(body, comments)
+ } else {
+ w.write(" ")
+ i := 0
+ for ; i < len(body)-1; i++ {
+ comments = w.writeExpr(body[i], comments)
+ w.write("; ")
+ }
+ comments = w.writeExpr(body[i], comments)
+ }
+
+ return w.insertComments(comments, closingLoc(0, 0, open, close, compr))
+}
+
+func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []*ast.Comment {
+ m, comments := mapImportsToComments(imports, comments)
+
+ groups := groupImports(imports)
+ for _, group := range groups {
+ comments = w.insertComments(comments, group[0].Loc())
+
+ // Sort imports within a newline grouping.
+ sort.Slice(group, func(i, j int) bool {
+ a := group[i]
+ b := group[j]
+ return a.Compare(b) < 0
+ })
+ for _, i := range group {
+ w.startLine()
+ w.write(i.String())
+ if c, ok := m[i]; ok {
+ w.write(" " + c.String())
+ }
+ w.endLine()
+ }
+ w.blankLine()
+ }
+
+ return comments
+}
+
+type entryWriter func(interface{}, []*ast.Comment) []*ast.Comment
+
+func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) []*ast.Comment {
+ lines := groupIterable(elements, last)
+ if len(lines) > 1 {
+ w.delayBeforeEnd()
+ w.startMultilineSeq()
+ }
+
+ i := 0
+ for ; i < len(lines)-1; i++ {
+ comments = w.writeIterableLine(lines[i], comments, fn)
+ w.write(",")
+
+ w.endLine()
+ w.startLine()
+ }
+
+ comments = w.writeIterableLine(lines[i], comments, fn)
+
+ if len(lines) > 1 {
+ w.write(",")
+ w.endLine()
+ comments = w.insertComments(comments, close)
+ w.down()
+ w.startLine()
+ }
+
+ return comments
+}
+
+func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comment, fn entryWriter) []*ast.Comment {
+ if len(elements) == 0 {
+ return comments
+ }
+
+ i := 0
+ for ; i < len(elements)-1; i++ {
+ comments = fn(elements[i], comments)
+ w.write(", ")
+ }
+
+ return fn(elements[i], comments)
+}
+
+func (w *writer) objectWriter() entryWriter {
+ return func(x interface{}, comments []*ast.Comment) []*ast.Comment {
+ entry := x.([2]*ast.Term)
+ comments = w.writeTerm(entry[0], comments)
+ w.write(": ")
+ return w.writeTerm(entry[1], comments)
+ }
+}
+
+func (w *writer) listWriter() entryWriter {
+ return func(x interface{}, comments []*ast.Comment) []*ast.Comment {
+ return w.writeTerm(x.(*ast.Term), comments)
+ }
+}
+
+func groupIterable(elements []interface{}, last *ast.Location) (lines [][]interface{}) {
+ var cur []interface{}
+ for i, t := range elements {
+ loc := getLoc(t)
+ lineDiff := loc.Row - last.Row
+ if lineDiff > 0 && i > 0 {
+ lines = append(lines, cur)
+ cur = nil
+ }
+
+ last = loc
+ cur = append(cur, t)
+ }
+ return append(lines, cur)
+}
+
+func mapImportsToComments(imports []*ast.Import, comments []*ast.Comment) (map[*ast.Import]*ast.Comment, []*ast.Comment) {
+ var leftovers []*ast.Comment
+ m := map[*ast.Import]*ast.Comment{}
+
+ for _, c := range comments {
+ matched := false
+ for _, i := range imports {
+ if c.Loc().Row == i.Loc().Row {
+ m[i] = c
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ leftovers = append(leftovers, c)
+ }
+ }
+
+ return m, leftovers
+}
+
+func groupImports(imports []*ast.Import) (groups [][]*ast.Import) {
+ if len(imports) == 0 {
+ return nil
+ }
+
+ last := imports[0]
+ var group []*ast.Import
+ for _, i := range imports {
+ if i.Loc().Row-last.Loc().Row > 1 {
+ groups = append(groups, group)
+ group = []*ast.Import{}
+ }
+ group = append(group, i)
+ last = i
+ }
+ if len(group) > 0 {
+ groups = append(groups, group)
+ }
+
+ return groups
+}
+
+func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.Comment, at *ast.Comment, after []*ast.Comment) {
+ for _, c := range comments {
+ switch cmp := c.Location.Row - l.Row; {
+ case cmp < 0:
+ before = append(before, c)
+ case cmp > 0:
+ after = append(after, c)
+ case cmp == 0:
+ at = c
+ }
+ }
+
+ return before, at, after
+}
+
+func gatherImports(others []interface{}) (imports []*ast.Import, rest []interface{}) {
+ i := 0
+loop:
+ for ; i < len(others); i++ {
+ switch x := others[i].(type) {
+ case *ast.Import:
+ imports = append(imports, x)
+ case *ast.Rule:
+ break loop
+ }
+ }
+ return imports, others[i:]
+}
+
+func gatherRules(others []interface{}) (rules []*ast.Rule, rest []interface{}) {
+ i := 0
+loop:
+ for ; i < len(others); i++ {
+ switch x := others[i].(type) {
+ case *ast.Rule:
+ rules = append(rules, x)
+ case *ast.Import:
+ break loop
+ }
+ }
+ return rules, others[i:]
+}
+
+func locLess(a, b interface{}) bool {
+ return locCmp(a, b) < 0
+}
+
+func locCmp(a, b interface{}) int {
+ al := getLoc(a)
+ bl := getLoc(b)
+ if cmp := al.Row - bl.Row; cmp != 0 {
+ return cmp
+ }
+ return al.Col - bl.Col
+}
+
+func getLoc(x interface{}) *ast.Location {
+ switch x := x.(type) {
+ case ast.Statement:
+ return x.Loc()
+ case *ast.Head:
+ return x.Location
+ case *ast.Expr:
+ return x.Location
+ case *ast.With:
+ return x.Location
+ case *ast.Term:
+ return x.Location
+ case *ast.Location:
+ return x
+ case [2]*ast.Term:
+ // Special case to allow for easy printing of objects.
+ return x[0].Location
+ default:
+ panic("Not reached")
+ }
+}
+
+func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.Location {
+ i, offset := 0, 0
+
+ // Skip past parens/brackets/braces in rule heads.
+ if skipOpen > 0 {
+ i, offset = skipPast(skipOpen, skipClose, loc)
+ }
+
+ for ; i < len(loc.Text) && loc.Text[i] != open; i++ {
+ }
+
+ if i >= len(loc.Text) {
+ return &ast.Location{Row: -1}
+ }
+
+ state := 1
+ for state > 0 {
+ i++
+ if i >= len(loc.Text) {
+ return &ast.Location{Row: -1}
+ }
+
+ switch loc.Text[i] {
+ case open:
+ state++
+ case close:
+ state--
+ case '\n':
+ offset++
+ }
+ }
+
+ return &ast.Location{Row: loc.Row + offset}
+}
+
+func skipPast(open, close byte, loc *ast.Location) (int, int) {
+ i := 0
+ for ; i < len(loc.Text) && loc.Text[i] != open; i++ {
+ }
+
+ state := 1
+ offset := 0
+ for state > 0 {
+ i++
+ if i >= len(loc.Text) {
+ return i, offset
+ }
+
+ switch loc.Text[i] {
+ case open:
+ state++
+ case close:
+ state--
+ case '\n':
+ offset++
+ }
+ }
+
+ return i, offset
+}
+
+func dedupComments(comments []*ast.Comment) []*ast.Comment {
+ if len(comments) == 0 {
+ return nil
+ }
+
+ filtered := []*ast.Comment{comments[0]}
+ for i := 1; i < len(comments); i++ {
+ if comments[i].Location.Equal(comments[i-1].Location) {
+ continue
+ }
+ filtered = append(filtered, comments[i])
+ }
+ return filtered
+}
+
+// startLine begins a line with the current indentation level.
+func (w *writer) startLine() {
+ w.inline = true
+ for i := 0; i < w.level; i++ {
+ w.write(w.indent)
+ }
+}
+
+// endLine ends a line with a newline.
+func (w *writer) endLine() {
+ w.inline = false
+ if w.beforeEnd != nil && !w.delay {
+ w.write(" " + w.beforeEnd.String())
+ w.beforeEnd = nil
+ }
+ w.delay = false
+ w.write("\n")
+}
+
+// beforeLineEnd registers a comment to be printed at the end of the current line.
+func (w *writer) beforeLineEnd(c *ast.Comment) {
+ if w.beforeEnd != nil {
+ if c == nil {
+ return
+ }
+ panic("overwriting non-nil beforeEnd")
+ }
+ w.beforeEnd = c
+}
+
+func (w *writer) delayBeforeEnd() {
+ w.delay = true
+}
+
+// line prints a blank line. If the writer is currently in the middle of a line,
+// line ends it and then prints a blank one.
+func (w *writer) blankLine() {
+ if w.inline {
+ w.endLine()
+ }
+ w.write("\n")
+}
+
+// write the input string and writes it to the buffer.
+func (w *writer) write(s string) {
+ w.buf.WriteString(s)
+}
+
+// writeLine writes the string on a newly started line, then terminate the line.
+func (w *writer) writeLine(s string) {
+ if !w.inline {
+ w.startLine()
+ }
+ w.write(s)
+ w.endLine()
+}
+
+func (w *writer) startMultilineSeq() {
+ w.endLine()
+ w.up()
+ w.startLine()
+}
+
+func (w *writer) endMultilineSeq() {
+ w.write(",")
+ w.endLine()
+ w.down()
+ w.startLine()
+}
+
+// up increases the indentation level
+func (w *writer) up() {
+ w.level++
+}
+
+// down decreases the indentation level
+func (w *writer) down() {
+ if w.level == 0 {
+ panic("negative indentation level")
+ }
+ w.level--
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
new file mode 100644
index 00000000..0e0775b1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go
@@ -0,0 +1,85 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package bundle
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/bundle"
+ "github.com/open-policy-agent/opa/resolver/wasm"
+ "github.com/open-policy-agent/opa/storage"
+)
+
+// LoadWasmResolversFromStore will lookup all Wasm modules from the store along with the
+// associated bundle manifest configuration and instantiate the respective resolvers.
+func LoadWasmResolversFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, otherBundles map[string]*bundle.Bundle) ([]*wasm.Resolver, error) {
+ bundleNames, err := bundle.ReadBundleNamesFromStore(ctx, store, txn)
+ if err != nil && !storage.IsNotFound(err) {
+ return nil, err
+ }
+
+ var resolversToLoad []*bundle.WasmModuleFile
+ for _, bundleName := range bundleNames {
+ var wasmResolverConfigs []bundle.WasmResolver
+ rawModules := map[string][]byte{}
+
+ // Save round-tripping the bundle that was just activated
+ if _, ok := otherBundles[bundleName]; ok {
+ wasmResolverConfigs = otherBundles[bundleName].Manifest.WasmResolvers
+ for _, wmf := range otherBundles[bundleName].WasmModules {
+ rawModules[wmf.Path] = wmf.Raw
+ }
+ } else {
+ wasmResolverConfigs, err = bundle.ReadWasmMetadataFromStore(ctx, store, txn, bundleName)
+ if err != nil && !storage.IsNotFound(err) {
+ return nil, fmt.Errorf("failed to read wasm module manifest from store: %s", err)
+ }
+ rawModules, err = bundle.ReadWasmModulesFromStore(ctx, store, txn, bundleName)
+ if err != nil && !storage.IsNotFound(err) {
+ return nil, fmt.Errorf("failed to read wasm modules from store: %s", err)
+ }
+ }
+
+ for path, raw := range rawModules {
+ wmf := &bundle.WasmModuleFile{
+ URL: path,
+ Path: path,
+ Raw: raw,
+ }
+ for _, resolverConf := range wasmResolverConfigs {
+ if resolverConf.Module == path {
+ ref, err := ast.PtrRef(ast.DefaultRootDocument, resolverConf.Entrypoint)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", resolverConf.Entrypoint, err)
+ }
+ wmf.Entrypoints = append(wmf.Entrypoints, ref)
+ }
+ }
+ if len(wmf.Entrypoints) > 0 {
+ resolversToLoad = append(resolversToLoad, wmf)
+ }
+ }
+ }
+
+ var resolvers []*wasm.Resolver
+ if len(resolversToLoad) > 0 {
+ // Get a full snapshot of the current data (including any from "outside" the bundles)
+ data, err := store.Read(ctx, txn, storage.Path{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize wasm runtime: %s", err)
+ }
+
+ for _, wmf := range resolversToLoad {
+ resolver, err := wasm.New(wmf.Entrypoints, wmf.Raw, data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize wasm module for entrypoints '%s': %s", wmf.Entrypoints, err)
+ }
+ resolvers = append(resolvers, resolver)
+ }
+ }
+ return resolvers, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go
new file mode 100644
index 00000000..a019cde1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/cidr/merge/merge.go
@@ -0,0 +1,367 @@
+// Copyright 2017-2020 Authors of Cilium
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package merge provides helper functions for merging a list of
+// IP addresses and subnets into the smallest possible list of CIDRs.
+// Original Implementation: https://github.com/cilium/cilium
+package merge
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math/big"
+ "net"
+)
+
+const (
+ ipv4BitLen = 8 * net.IPv4len
+ ipv6BitLen = 8 * net.IPv6len
+)
+
+var (
+ v4Mappedv6Prefix = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff}
+ defaultIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0}
+ defaultIPv6 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+ upperIPv4 = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 255, 255, 255, 255}
+ upperIPv6 = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+ ipv4LeadingZeroes = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+)
+
+// RangeToCIDRs converts the range of IPs covered by firstIP and lastIP to
+// a list of CIDRs that contains all of the IPs covered by the range.
+func RangeToCIDRs(firstIP, lastIP net.IP) []*net.IPNet {
+ // First, create a CIDR that spans both IPs.
+ spanningCIDR := createSpanningCIDR(&firstIP, &lastIP)
+ firstIPSpanning, lastIPSpanning := GetAddressRange(spanningCIDR)
+
+ cidrList := []*net.IPNet{}
+
+ // If the first IP of the spanning CIDR passes the lower bound (firstIP),
+ // we need to split the spanning CIDR and only take the IPs that are
+ // greater than the value which we split on, as we do not want the lesser
+ // values since they are less than the lower-bound (firstIP).
+ if bytes.Compare(firstIPSpanning, firstIP) < 0 {
+ // Split on the previous IP of the first IP so that the right list of IPs
+ // of the partition includes the firstIP.
+ prevFirstRangeIP := GetPreviousIP(firstIP)
+ var bitLen int
+ if prevFirstRangeIP.To4() != nil {
+ bitLen = ipv4BitLen
+ } else {
+ bitLen = ipv6BitLen
+ }
+ _, _, right := partitionCIDR(spanningCIDR, net.IPNet{IP: prevFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
+
+ // Append all CIDRs but the first, as this CIDR includes the upper
+ // bound of the spanning CIDR, which we still need to partition on.
+ cidrList = append(cidrList, right...)
+ spanningCIDR = *right[0]
+ cidrList = cidrList[1:]
+ }
+
+ // Conversely, if the last IP of the spanning CIDR passes the upper bound
+ // (lastIP), we need to split the spanning CIDR and only take the IPs that
+ // are greater than the value which we split on, as we do not want the greater
+ // values since they are greater than the upper-bound (lastIP).
+ if bytes.Compare(lastIPSpanning, lastIP) > 0 {
+ // Split on the next IP of the last IP so that the left list of IPs
+ // of the partition include the lastIP.
+ nextFirstRangeIP := getNextIP(lastIP)
+ var bitLen int
+ if nextFirstRangeIP.To4() != nil {
+ bitLen = ipv4BitLen
+ } else {
+ bitLen = ipv6BitLen
+ }
+ left, _, _ := partitionCIDR(spanningCIDR, net.IPNet{IP: nextFirstRangeIP, Mask: net.CIDRMask(bitLen, bitLen)})
+ cidrList = append(cidrList, left...)
+ } else {
+ // Otherwise, there is no need to partition; just use add the spanning
+ // CIDR to the list of networks.
+ cidrList = append(cidrList, &spanningCIDR)
+ }
+ return cidrList
+}
+
+// GetAddressRange returns the first and last addresses in the given CIDR range.
+func GetAddressRange(ipNet net.IPNet) (net.IP, net.IP) {
+ firstIP := make(net.IP, len(ipNet.IP))
+ lastIP := make(net.IP, len(ipNet.IP))
+
+ copy(firstIP, ipNet.IP)
+ copy(lastIP, ipNet.IP)
+
+ firstIP = firstIP.Mask(ipNet.Mask)
+ lastIP = lastIP.Mask(ipNet.Mask)
+
+ if firstIP.To4() != nil {
+ firstIP = append(v4Mappedv6Prefix, firstIP...)
+ lastIP = append(v4Mappedv6Prefix, lastIP...)
+ }
+
+ lastIPMask := make(net.IPMask, len(ipNet.Mask))
+ copy(lastIPMask, ipNet.Mask)
+ for i := range lastIPMask {
+ lastIPMask[len(lastIPMask)-i-1] = ^lastIPMask[len(lastIPMask)-i-1]
+ lastIP[net.IPv6len-i-1] = lastIP[net.IPv6len-i-1] | lastIPMask[len(lastIPMask)-i-1]
+ }
+
+ return firstIP, lastIP
+}
+
+// GetPreviousIP returns the previous IP from the given IP address.
+func GetPreviousIP(ip net.IP) net.IP {
+ // Cannot go lower than zero!
+ if ip.Equal(net.IP(defaultIPv4)) || ip.Equal(net.IP(defaultIPv6)) {
+ return ip
+ }
+
+ previousIP := make(net.IP, len(ip))
+ copy(previousIP, ip)
+
+ var overflow bool
+ var lowerByteBound int
+ if ip.To4() != nil {
+ lowerByteBound = net.IPv6len - net.IPv4len
+ } else {
+ lowerByteBound = 0
+ }
+ for i := len(ip) - 1; i >= lowerByteBound; i-- {
+ if overflow || i == len(ip)-1 {
+ previousIP[i]--
+ }
+ // Track if we have overflowed and thus need to continue subtracting.
+ if ip[i] == 0 && previousIP[i] == 255 {
+ overflow = true
+ } else {
+ overflow = false
+ }
+ }
+ return previousIP
+}
+
+// createSpanningCIDR returns a single IP network spanning the
+// the lower and upper bound IP addresses.
+func createSpanningCIDR(firstIP, lastIP *net.IP) net.IPNet {
+ // Don't want to modify the values of the provided range, so make copies.
+ lowest := *firstIP
+ highest := *lastIP
+
+ var isIPv4 bool
+ var spanningMaskSize, bitLen, byteLen int
+ if lowest.To4() != nil {
+ isIPv4 = true
+ bitLen = ipv4BitLen
+ byteLen = net.IPv4len
+ } else {
+ bitLen = ipv6BitLen
+ byteLen = net.IPv6len
+ }
+
+ if isIPv4 {
+ spanningMaskSize = ipv4BitLen
+ } else {
+ spanningMaskSize = ipv6BitLen
+ }
+
+ // Convert to big Int so we can easily do bitshifting on the IP addresses,
+ // since golang only provides up to 64-bit unsigned integers.
+ lowestBig := big.NewInt(0).SetBytes(lowest)
+ highestBig := big.NewInt(0).SetBytes(highest)
+
+ // Starting from largest mask / smallest range possible, apply a mask one bit
+ // larger in each iteration to the upper bound in the range until we have
+ // masked enough to pass the lower bound in the range. This
+ // gives us the size of the prefix for the spanning CIDR to return as
+ // well as the IP for the CIDR prefix of the spanning CIDR.
+ for spanningMaskSize > 0 && lowestBig.Cmp(highestBig) < 0 {
+ spanningMaskSize--
+ mask := big.NewInt(1)
+ mask = mask.Lsh(mask, uint(bitLen-spanningMaskSize))
+ mask = mask.Mul(mask, big.NewInt(-1))
+ highestBig = highestBig.And(highestBig, mask)
+ }
+
+ // If ipv4, need to append 0s because math.Big gets rid of preceding zeroes.
+ if isIPv4 {
+ highest = append(ipv4LeadingZeroes, highestBig.Bytes()...)
+ } else {
+ highest = highestBig.Bytes()
+ }
+
+ // Int does not store leading zeroes.
+ if len(highest) == 0 {
+ highest = make([]byte, byteLen)
+ }
+
+ newNet := net.IPNet{IP: highest, Mask: net.CIDRMask(spanningMaskSize, bitLen)}
+ return newNet
+}
+
+// partitionCIDR returns a list of IP Networks partitioned upon excludeCIDR.
+// The first list contains the networks to the left of the excludeCIDR in the
+// partition, the second is a list containing the excludeCIDR itself if it is
+// contained within the targetCIDR (nil otherwise), and the
+// third is a list containing the networks to the right of the excludeCIDR in
+// the partition.
+func partitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, []*net.IPNet, []*net.IPNet) {
+ var targetIsIPv4 bool
+ if targetCIDR.IP.To4() != nil {
+ targetIsIPv4 = true
+ }
+
+ targetFirstIP, targetLastIP := GetAddressRange(targetCIDR)
+ excludeFirstIP, excludeLastIP := GetAddressRange(excludeCIDR)
+
+ targetMaskSize, _ := targetCIDR.Mask.Size()
+ excludeMaskSize, _ := excludeCIDR.Mask.Size()
+
+ if bytes.Compare(excludeLastIP, targetFirstIP) < 0 {
+ return nil, nil, []*net.IPNet{&targetCIDR}
+ } else if bytes.Compare(targetLastIP, excludeFirstIP) < 0 {
+ return []*net.IPNet{&targetCIDR}, nil, nil
+ }
+
+ if targetMaskSize >= excludeMaskSize {
+ return nil, []*net.IPNet{&targetCIDR}, nil
+ }
+
+ left := []*net.IPNet{}
+ right := []*net.IPNet{}
+
+ newPrefixLen := targetMaskSize + 1
+
+ targetFirstCopy := make(net.IP, len(targetFirstIP))
+ copy(targetFirstCopy, targetFirstIP)
+
+ iLowerOld := make(net.IP, len(targetFirstCopy))
+ copy(iLowerOld, targetFirstCopy)
+
+ // Since golang only supports up to unsigned 64-bit integers, and we need
+ // to perform addition on addresses, use math/big library, which allows
+ // for manipulation of large integers.
+
+ // Used to track the current lower and upper bounds of the ranges to compare
+ // to excludeCIDR.
+ iLower := big.NewInt(0)
+ iUpper := big.NewInt(0)
+ iLower = iLower.SetBytes(targetFirstCopy)
+
+ var bitLen int
+
+ if targetIsIPv4 {
+ bitLen = ipv4BitLen
+ } else {
+ bitLen = ipv6BitLen
+ }
+ shiftAmount := (uint)(bitLen - newPrefixLen)
+
+ targetIPInt := big.NewInt(0)
+ targetIPInt.SetBytes(targetFirstIP.To16())
+
+ exp := big.NewInt(0)
+
+ // Use left shift for exponentiation
+ exp = exp.Lsh(big.NewInt(1), shiftAmount)
+ iUpper = iUpper.Add(targetIPInt, exp)
+
+ matched := big.NewInt(0)
+
+ for excludeMaskSize >= newPrefixLen {
+ // Append leading zeros to IPv4 addresses, as math.Big.Int does not
+ // append them when the IP address is copied from a byte array to
+ // math.Big.Int. Leading zeroes are required for parsing IPv4 addresses
+ // for use with net.IP / net.IPNet.
+ var iUpperBytes, iLowerBytes []byte
+ if targetIsIPv4 {
+ iUpperBytes = append(ipv4LeadingZeroes, iUpper.Bytes()...)
+ iLowerBytes = append(ipv4LeadingZeroes, iLower.Bytes()...)
+ } else {
+ iUpperBytesLen := len(iUpper.Bytes())
+ // Make sure that the number of bytes in the array matches what net
+ // package expects, as big package doesn't append leading zeroes.
+ if iUpperBytesLen != net.IPv6len {
+ numZeroesToAppend := net.IPv6len - iUpperBytesLen
+ zeroBytes := make([]byte, numZeroesToAppend)
+ iUpperBytes = append(zeroBytes, iUpper.Bytes()...)
+ } else {
+ iUpperBytes = iUpper.Bytes()
+
+ }
+
+ iLowerBytesLen := len(iLower.Bytes())
+ if iLowerBytesLen != net.IPv6len {
+ numZeroesToAppend := net.IPv6len - iLowerBytesLen
+ zeroBytes := make([]byte, numZeroesToAppend)
+ iLowerBytes = append(zeroBytes, iLower.Bytes()...)
+ } else {
+ iLowerBytes = iLower.Bytes()
+
+ }
+ }
+ // If the IP we are excluding over is of a higher value than the current
+ // CIDR prefix we are generating, add the CIDR prefix to the set of IPs
+ // to the left of the exclude CIDR
+ if bytes.Compare(excludeFirstIP, iUpperBytes) >= 0 {
+ left = append(left, &net.IPNet{IP: iLowerBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
+ matched = matched.Set(iUpper)
+ } else {
+ // Same as above, but opposite.
+ right = append(right, &net.IPNet{IP: iUpperBytes, Mask: net.CIDRMask(newPrefixLen, bitLen)})
+ matched = matched.Set(iLower)
+ }
+
+ newPrefixLen++
+
+ if newPrefixLen > bitLen {
+ break
+ }
+
+ iLower = iLower.Set(matched)
+ iUpper = iUpper.Add(matched, big.NewInt(0).Lsh(big.NewInt(1), uint(bitLen-newPrefixLen)))
+
+ }
+ excludeList := []*net.IPNet{&excludeCIDR}
+
+ return left, excludeList, right
+}
+
+func getNextIP(ip net.IP) net.IP {
+ if ip.Equal(upperIPv4) || ip.Equal(upperIPv6) {
+ return ip
+ }
+
+ nextIP := make(net.IP, len(ip))
+ switch len(ip) {
+ case net.IPv4len:
+ ipU32 := binary.BigEndian.Uint32(ip)
+ ipU32++
+ binary.BigEndian.PutUint32(nextIP, ipU32)
+ return nextIP
+ case net.IPv6len:
+ ipU64 := binary.BigEndian.Uint64(ip[net.IPv6len/2:])
+ ipU64++
+ binary.BigEndian.PutUint64(nextIP[net.IPv6len/2:], ipU64)
+ if ipU64 == 0 {
+ ipU64 = binary.BigEndian.Uint64(ip[:net.IPv6len/2])
+ ipU64++
+ binary.BigEndian.PutUint64(nextIP[:net.IPv6len/2], ipU64)
+ } else {
+ copy(nextIP[:net.IPv6len/2], ip[:net.IPv6len/2])
+ }
+ return nextIP
+ default:
+ return ip
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.go
new file mode 100644
index 00000000..deb68ce7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// THIS FILE IS GENERATED. DO NOT EDIT.
+
+// Package opa contains bytecode for the OPA-WASM library.
+package opa
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+)
+
+// Bytes returns the OPA-WASM bytecode.
+func Bytes() ([]byte, error) {
+ gr, err := gzip.NewReader(bytes.NewBuffer(gzipped))
+ if err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(gr)
+}
+
+var gzipped = []byte("\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\xFF\xEC\xBD\x0D\xB8\x5D\x47\x75\x18\x3A\xBF\xFB\xE7\xEC\x73\xA4\x6D\x22\xB0\xE2\xEB\x36\xB3\x77\xFC\x5E\xAF\x83\xF5\xAA\xD7\x26\x52\x62\xC2\x43\x73\x1B\xE9\x4A\x36\x60\xD2\x94\xF4\xBD\x7E\xED\xC3\x6E\x70\x1B\xF6\x35\x20\xC9\xD7\x0A\xF9\x2A\x74\xAE\x6C\x13\xCC\xAF\x0D\x21\x29\x69\x93\x62\x08\xAD\x8D\xB1\xA9\x13\x08\x98\x04\x82\x43\x20\x38\xAF\xA4\x98\x94\x24\x0E\xBF\x86\xF0\xDB\x12\xE2\x80\xD3\x90\x10\xD0\xFB\xD6\xCF\xCC\x9E\x7D\xCE\xB9\x92\xAE\xB1\xC1\xB4\x55\x82\xEF\xD9\x7F\x33\x6B\xD6\x5A\xB3\x66\xAD\x35\x6B\xD6\x12\x57\x5C\xFD\x6C\x29\x84\x90\x1F\x91\x7F\xEB\x72\x3D\x9D\x4E\xE5\xF4\x72\x15\xFE\x2B\x2E\x37\xD3\x29\xFC\x91\xF8\x3F\x39\xBD\xDC\x4E\xE9\x86\xC6\xFF\xE6\xD3\x29\x5F\xE3\x7B\x72\x7A\x79\x46\x37\xE2\x9B\x72\x7A\xB9\x80\xFF\x89\xCB\xD5\xF4\x38\x36\x7A\x42\x4E\x2F\xD7\x27\xF0\x89\x84\x3B\xF2\x04\xDD\x16\x70\x79\xFC\xF2\x6A\xDA\xFF\x93\xD3\xCB\x0B\xF8\x7B\x7C\xD0\x76\xDF\x2D\xB4\x34\xC5\x06\xA1\x7B\xF8\x5B\xCE\x7D\x4B\xBF\xD5\x89\x5C\x5F\xF9\x9C\x63\xD9\xB3\xAF\x7C\xF6\x73\x8F\xFE\xB4\x12\x0A\xAE\xCA\xE7\x1E\xBE\xE2\x19\x57\xFC\xF3\xE7\x1E\x5D\x17\x06\xAE\xC7\x70\xFD\xCF\xAF\x79\xD6\x55\xEB\xCF\x7A\xCE\x6E\x21\x67\x6F\xFD\x9F\x42\xCC\xDE\xFA\x7B\xA2\x9C\xBD\xF5\xF7\x45\x35\x7B\xEB\xFB\xC5\x48\xBF\xD6\xBE\xC6\xA6\xFF\x24\xFE\x13\x52\xE2\x2F\x21\xE4\x58\x29\x65\x27\xD6\x96\xF8\xFF\xD6\x6A\x69\xAD\x15\xF0\x86\x94\xDB\xB6\xC3\xAB\x15\x5C\x59\x3B\x36\xD6\x48\xAB\x26\x93\x89\xB2\xE3\xB1\x09\xED\x51\x8B\x56\x52\xDB\x82\xDA\xC7\x6B\x61\xAD\xAD\x85\x08\x1D\xF2\x33\xBC\x1D\xFE\x09\x59\x4A\x91\xC0\x25\xB1\x19\x6B\x95\x91\xB9\xB2\xE7\xC8\xC7\xC0\x6F\x65\x6D\x3E\xB6\xF2\xBB\xAC\x1C\xDB\xF1\xD8\xAA\xEF\x52\x42\xE6\x2A\x13\x56\x19\xA3\xAC\xCC\x95\x12\x4A\x0A\x2D\x85\x52\x56\x5A\x23\x35\xFC\xB0\x76\x47\x59\x6A\xFD\xD8\xC7\x3D\x4E\x94\x72\x62\x0D\x8C\x15\xFE\xD9\x2A\x17\x32\xCB\x75\x56\x8E\xB2\xB2\x34\xA6\x14\x3A\x87\xFB\x5A\x96\xA5\x94\xA5\x18\x8B\x1E\x3E\x65\x84\x81\x7F\x2A\xB7\x79\xAE\xF3\x3C\x97\x5A\x0B\x21\x45\xA9\x85\xAC\xCA\x4C\x65\xE7\x66\x99\xD6\x3A\xCB\xB2\xEC\xDC\xEC\xDC\x4C\x9E\x5B\x8D\x32\xBC\xA3\xB5\xAC\x0A\x21\xA0\x61\x55\x5A\x6B\xAC\x99\x00\x00\xB9\x50\x42\x2A\x21\x64\x65\xF2\x91\xC8\x73\xB1\x73\xA7\xB4\xD6\xC0\xFB\x95\x3C\x57\x68\xA1\xB5\xC9\x75\xA6\x8B\xEF\x96\x32\x53\x72\xFB\x76\x93\x15\xB9\x52\x46\xC1\xBF\xF3\x6C\x59\x4A\x2D\xF4\xB9\x42\xC9\xD9\x7F\x4B\x12\xA0\x3C\x5F\x29\x25\x95\x2D\x4A\x33\xDA\xB9\xB3\xC4\x8F\x94\x50\x62\xA7\x2D\x95\x14\x56\xDA\xB2\x1C\x89\xAA\xB4\xD2\x02\x76\x94\xC8\x11\xD0\x52\x57\x65\x29\x85\x91\xA6\x1C\x51\x47\x79\x96\x43\xA7\xD6\x1A\x99\xC3\xFF\x9B\x73\x65\x5E\x9C\xAB\x4D\x2E\xAA\x5C\xE4\x46\x18\xAB\xD4\x68\x67\x69\xC6\x66\x2C\xAC\xB2\xC6\x28\xA3\x80\xB4\x95\x10\xA2\x14\x52\x49\xA9\x94\xD5\xA5\xB0\x42\x49\x65\xAC\x29\x05\x30\x96\x14\x23\xA9\xA4\x28\x47\xA5\x35\xA5\x31\x23\x49\x9D\xC1\x87\xC6\xCA\xC3\xF2\x92\x4B\xB2\x42\x4E\xA5\xBF\xF3\xAE\xBC\xCA\xDF\xA4\xB6\xD7\xC0\xC7\x57\x1E\xBB\xE2\xAA\x67\xFC\xC4\xFA\xF3\x9E\xF1\x9C\x2B\x7F\x4A\x7C\xEF\x08\x6E\x3D\xFB\x8A\xAB\xAE\x7A\xEE\x4F\x88\xA7\x3E\x6E\xF0\xFC\xEA\x2B\xD7\x9F\xF1\xAC\xE7\x1C\xBE\x66\x5D\x5C\xF0\xD8\xB9\x07\xCF\xBC\x62\xFD\x0A\xF1\xBF\x2D\xCD\xDD\xBF\xF2\x39\xEB\x47\x7F\xFA\xF0\x73\x9F\xF5\x9C\x75\xF1\xBF\x9F\x3B\x78\xFA\x2F\xAF\x5C\x7F\xC6\xD1\x2B\xAF\xBE\xE6\xAA\x75\xF1\x77\xB6\xC1\x93\xEE\xEA\xE7\x3E\xE7\x19\x87\xAF\x38\x7A\xF5\x95\xE2\x87\x27\xF1\xC6\x33\xAF\x79\xF6\x61\x71\xC9\x76\xB8\x3E\x76\xC5\x55\xD7\x5C\xC9\x6F\x3C\xB1\x80\x3B\xFF\xE2\xE8\x95\x57\x8A\xCB\xB6\xF5\x0F\xF1\xED\x4B\x71\x5C\x3F\x79\xE5\x15\x87\x9F\x71\x78\xFD\x28\x74\x24\x9E\x3C\xBC\x75\xF5\x95\xEB\xE2\x29\x8F\xE9\x3F\xBB\xE2\x99\xCF\x7C\xC6\xE1\x2B\xD6\x7F\x52\xFC\x91\x7C\x6C\x7F\xF7\xE8\x95\xCF\x7E\xEE\xB1\x2B\xE9\xC1\x7D\xB2\xFC\x39\x29\x85\x97\xD5\xA1\xDB\xE4\x3F\x38\x70\xF0\x33\xF2\xB3\xF2\x94\xDC\x50\xA7\xD4\x7F\x50\xB7\xAA\x0D\x7D\x8B\x7A\x83\xBA\x41\xBD\x44\xBD\x58\xBD\x48\xBD\x4E\xBF\x5E\xBF\x45\xBF\x55\xFF\xAA\x7E\xB3\xFE\x65\xFD\x2E\x7D\xB7\xFE\x2D\xFD\x6E\xFD\x22\xFD\x05\xFD\x5F\xF5\x7F\xD3\x7F\xA8\xEF\xD3\x1F\xD6\x1F\xD5\x1F\xD7\xF7\xEB\x4F\xE9\x4F\xEB\x9F\x35\x6F\x30\x2F\x37\x37\x9A\xDB\xCC\x4D\xE6\x8D\xE6\x76\x73\x87\xF9\x1D\xF3\xFB\xE6\x43\xE6\x23\xE6\x63\xE6\x4F\xCC\x75\xF6\x7A\xFB\x02\x7B\x83\xFD\x39\xFB\xF3\xF6\xA5\xF6\xC5\xF6\x25\xF6\x5F\xDB\x57\xD8\x57\xDA\x1B\xED\x4D\xF6\xD5\xF6\x17\xEC\xBF\xB1\xA3\x17\xFF\xC6\x63\x5E\x63\xBF\x28\x95\x9E\xCA\x13\xDF\xBB\xB1\xB1\xB1\x21\x7C\xBD\xD6\xCA\x0B\xE8\xA7\x68\x94\xDA\xD7\xFF\x9F\x13\xF5\xAD\x27\xE1\xFE\xF1\x6E\x9B\x11\x52\x69\x5B\xC1\x2B\x4E\x7A\xB1\x47\x8D\xE1\xF9\xB2\x32\xAD\x9E\x88\x15\xD1\x98\xB1\xA9\xE0\x8F\xDE\x47\xF7\x0B\xA7\x9C\x76\xD2\x8F\xBB\xFA\x75\xD8\xC6\x74\x75\x22\xFC\xC6\x0D\x1B\x1B\xA2\xC6\xCE\x2A\x67\x56\xE4\xF1\xC6\x38\xB9\xAC\xC6\x4E\x75\xAD\xE2\xE6\x2E\x9B\x98\xB1\xA8\x2A\x27\x7E\x40\x15\xF5\xDD\xF0\x6D\xA3\xC6\x1A\xAF\xC7\xF1\x5A\xCD\x5C\xCB\xCA\x99\x70\x51\x39\xE9\xEB\x8E\x86\xE4\x54\xF5\x76\x2D\xED\xF4\x34\x63\x8D\xA3\xFC\x57\xDD\x36\x0D\x53\xA4\xAA\x7F\xF5\x5A\x68\x48\xD3\x40\x0B\xA7\xBD\xA8\xFF\x18\x5B\x93\xBE\xE8\xEA\x0F\xC2\x53\xB5\x0F\x00\x2F\xF6\x4F\x84\x7F\x75\x32\x28\x1E\x3C\xDC\x16\x8D\xF1\xA6\x51\x01\x21\xC6\xA9\x6E\x59\x85\xBE\xF4\x81\x89\x70\xBA\x7E\xD7\xB5\x0C\xCA\xD8\x54\x4E\xA7\xAF\x7D\x04\x9A\xAB\x3F\xB5\x41\x80\x28\x5F\x74\x8D\x72\xC6\xCB\xAE\x35\xD8\xC3\xA1\x09\xE0\x48\xD7\x1F\xDD\x08\x08\x18\x42\x3D\x1E\x40\x3D\x1E\x40\x3D\x9E\x87\x9A\xE1\x6E\x2D\x43\xDE\x43\x6D\xBC\x5A\x07\x88\x5A\xB5\x7F\x22\xF4\x3E\xA7\x92\x41\xAC\x4E\x8C\xD3\x74\x67\x06\x5E\xA0\xA4\x9A\x88\x0A\x5B\x6D\x6C\x15\x40\xB7\xF3\x70\xA7\x68\x78\x04\x48\x27\xBF\x4D\xA4\xFB\xF4\x37\x45\x3A\xF9\x2D\x27\xDD\xA7\x1F\x5E\xD2\xFD\x95\x94\x66\xEA\x45\x23\x37\xA3\x14\x23\x5B\xC2\xB7\xDE\x34\x1A\xDE\xD5\xF4\xAE\xDC\x3F\x11\x4E\xD2\x18\x34\xA2\xF4\x36\x12\x21\x97\x4C\x64\x95\xDC\x6E\x00\x02\x40\x2F\x60\x59\xB2\x04\x01\xF4\x8E\x55\x85\xEF\x01\xB6\x08\x39\x86\x90\xA3\x42\x2F\x4C\xAF\x80\x20\x8D\x08\x4A\x3B\xD7\xB3\xDD\xEA\xD0\x21\xCB\x3C\xC6\x8F\xA9\x42\xDF\x86\xF0\x23\x1F\x91\xA1\xCB\x83\xDF\x96\xA1\x53\xB7\x67\x3F\xF4\xD7\x0E\x87\x3E\x33\x78\xD0\x96\x08\xB8\xFA\x5E\x14\xD5\x92\x20\x54\x84\x0B\xDD\x43\xA7\x03\x74\x86\x38\x57\x3A\x03\x40\xDD\x05\x5F\xC1\x4F\xD3\x9A\x08\x86\x02\x4C\xC8\xAE\xD5\x4E\xC5\xF1\xD7\xBF\x43\xED\x03\x53\x9E\x44\xF6\xFE\xC3\x93\xB8\xE6\xC8\xEA\xC5\x6A\x06\xC4\x59\x20\x8D\x30\x15\xC3\x35\x91\x5E\x36\x7A\xAC\x82\x6C\x98\x08\xBE\x46\x28\x1B\x03\x40\x7B\xD3\x20\x21\xF5\x3E\x67\x9C\x4C\xE4\x84\x5A\x9D\xF0\x92\xC6\xF7\x97\x95\xD9\x3F\x51\x28\x88\x1A\x39\x43\xB6\x0A\x1B\x96\xD5\x26\x13\x58\x22\x1A\x3C\x8A\x88\x41\x0F\x74\x8D\x2D\xE3\x4F\xD3\xCA\x04\x31\xD0\x68\x98\xBE\x2A\x4C\xDF\x77\x46\xD4\x54\x5F\x94\x52\x9F\x4E\xBE\xCE\x32\xAF\x96\x7A\x28\x26\xF5\xE6\x62\x52\xAD\x12\x03\xF2\xDD\x65\x65\xBC\x3C\x30\x41\x9E\x41\x81\xC8\x34\x8B\x02\xD1\x0B\x86\x4C\x45\x7C\x8F\xA1\x8F\xE4\xAE\x93\x2B\xD7\x6E\x6C\x6C\xD4\x7B\x75\xE1\x58\xA0\xDF\x8E\x7D\x89\xD5\xF8\x96\xAC\xBC\x0C\x17\x33\x22\xE9\x85\x52\xAA\xF9\xD1\xF2\x28\x51\x08\xB6\x02\x47\x25\x7A\x91\x1C\x94\x1D\xA7\xDC\x60\x35\xFD\x14\x3C\x75\x82\xC4\x5F\x22\x9A\x6F\x4D\x15\x1C\xC5\xA2\x52\x0C\x00\x11\x0F\x2B\x20\x0F\x6E\x02\xC8\xEF\x9E\x0D\x20\xFF\x6A\x11\xF9\x17\xC0\x91\xAE\x51\x89\x06\x08\xCB\x5E\x0A\xCB\x57\x53\x58\x10\x12\x5C\x29\x79\xBD\x98\xA1\xC6\xC3\xDC\xF7\x5F\x6E\xA5\xEF\x77\x0F\x09\xA0\x02\x42\xA8\xE3\x46\x38\xC9\xBF\xA2\x08\x47\xC9\x88\x22\xB8\xFE\x30\x75\x4A\x7F\x87\x44\x02\x61\x1C\x80\x73\x61\x09\x57\x00\xDE\x67\x86\xE0\xF1\x5F\x05\xAB\xED\xD8\xBF\xEB\x19\x47\xDA\xA0\x51\x03\xED\xBE\x90\xD2\x4E\xF7\xB4\x53\x29\xED\xEE\x19\xCE\xDD\xD9\x31\xE8\x38\x06\x83\x23\x00\x31\xEF\x40\x9A\x32\xA8\x22\x80\x2A\x9C\x76\x26\x05\xF5\x73\x08\x1A\xAF\xEC\xCE\xCC\x83\xAA\x09\x54\x68\xD1\xDF\xF0\xA2\x04\x54\xC1\xA0\x6A\x52\x0D\x08\x51\x86\xFF\xC2\xA8\xFF\x92\x1E\x0F\x46\xA2\xBF\x0D\xD4\xF8\xF3\xAD\x50\xE3\xD6\x17\x9D\x05\x35\x3E\x70\x7A\x6A\x6C\x6D\x0C\xBA\x1F\x03\x2E\x31\x61\x1C\x06\xC7\x71\x5F\x3F\x8E\xCF\x2F\x1E\xC7\xB2\x1A\xB7\xC2\x9B\x23\x13\x18\x93\x98\x1B\xD0\x7F\x4A\x07\x64\x02\xCD\x66\x88\xF2\xAE\x87\x36\x20\xD4\x3A\xEA\x7B\x00\x0E\xBC\xA6\x9F\x0F\x65\x98\x09\xB9\xBE\xB2\xE9\x30\x81\xA7\x37\x19\xC0\xAF\xC1\x00\x68\x29\x8B\x8B\x98\x5D\x45\x98\xD2\x0B\xB0\x54\x61\x0D\xEA\x58\x25\x51\x73\xEB\x9B\x01\xCD\x99\xD7\xE3\x2E\xD1\x42\x60\x19\x33\x73\xCB\x58\x34\x2D\x44\x23\xE2\xC7\x72\x93\x8F\x05\x7C\x4C\x40\xD0\xC7\xAA\x7A\x8B\x52\x6A\xAA\x12\xDB\x5C\xC7\x45\xD9\x6C\x36\x12\x0D\x17\x2A\xB9\x80\x97\x41\x91\x83\x15\xF2\x0E\xB8\x8B\x9C\xA0\xC3\x6F\x12\x08\x17\x6A\xD1\xDA\x95\xE9\xD3\xD1\x7C\xB7\x4E\xAF\x88\xBD\x1A\x66\x8F\xB3\x68\x7A\xB7\xD9\x3F\x06\x09\xEA\xB2\xBD\x5A\xB8\xAC\xB1\x55\xF8\xAA\x68\xB3\xF0\x55\x46\x5F\x15\xF0\x55\xE6\x2C\x7D\x60\x61\x79\xB6\x4D\x06\x0D\x65\xCF\xBF\x85\xF1\x6A\xB0\xC3\xF0\xBD\xFB\x99\x15\xF7\xC2\x16\x3F\xC8\x6E\xF1\x7A\xDD\x9B\xA3\xA0\x9C\x3A\x43\x78\x96\x09\xAA\x58\x5D\xC2\xAF\xED\x8A\x3C\xDE\xDA\xA7\x93\x22\xD3\x13\xDB\x54\x5F\x05\x62\x6F\xC6\x9C\x5B\x67\xCD\xED\x70\xEF\x9D\x61\xDD\xE5\x5F\x3A\xDE\x03\x42\x28\x07\x88\x56\xFB\x50\xA6\x3A\x51\xDF\x8F\x5A\xE6\x9F\x70\x3B\xC9\x55\xFD\x19\xFC\xEF\x27\xF1\xBF\x5F\xC0\xFF\x22\xC3\x42\x1F\x4E\xD6\x9F\x9B\xBD\xE3\xCC\xD2\xB0\x31\x27\xE0\xD6\x67\x17\xB7\x42\x2A\xDC\xA3\x74\xE4\x9F\x3B\xED\xC8\x3F\x33\xB8\x43\xC3\x5C\x80\x00\x1E\x39\x8D\xF6\x88\x94\xAC\xBB\xF7\x5A\xC2\x70\x74\xF8\xE4\x9D\x61\xA8\x01\xBA\x8F\x87\x57\xA5\x13\xF5\x27\x92\xC6\x23\x40\xF3\x60\x26\x2A\xF2\xA3\x14\xBF\x9F\x9F\xC3\xE9\xE7\xCF\x12\xA7\x9F\xDF\x84\x9B\xFE\x48\x4A\x39\x2F\xF5\x53\xAD\x2C\xA0\x92\x25\x1C\x0F\x70\x2C\x41\x65\x1F\xC8\x23\x89\x8A\x7E\x2F\x7E\x2E\xD4\xC5\xCA\xF4\x1F\x47\xFD\x43\x92\xB4\x37\x08\xA4\x5A\x56\x45\x14\xF8\xA6\xAB\xEF\x9F\x51\x6A\x15\x1A\x38\xC2\x7F\x31\x5D\xB9\x24\x13\x92\x17\xE4\x31\xA2\x3B\xB8\x23\x66\xD7\xE7\x9F\x57\x8B\x74\x0C\x90\xF0\xA7\x65\x25\x99\xFA\x7C\xB8\x75\x1C\x2B\x3C\x5B\x34\xC0\xA7\x4F\x06\x60\x68\x42\x4C\xCF\x91\x78\x91\x52\x82\xD0\xA9\x69\xB5\x01\x1B\x47\x13\x36\x3E\x00\x1F\x9C\xE7\x34\x0F\x72\x9E\x3B\x63\xD3\xC3\x16\xE4\x66\x2D\x2C\x5E\x25\xFF\xAD\xEA\x97\xF9\x7B\xC5\x3C\x62\x68\xF8\x26\x5D\x6D\x0C\x2D\x30\xB4\xE6\x05\x67\xC2\x72\x57\xDF\xBC\xD1\xA3\x0D\x9E\xE0\xA2\xA7\xEA\xD7\x6E\x10\x7E\xD2\x7B\xFC\x2A\x23\x52\xED\x12\xCB\xAD\x84\x3F\x82\x18\x5F\xA2\x9D\x6D\x00\x1B\xD0\xB2\x3F\xA7\x43\x75\xF5\x9C\xAE\xD1\x7A\x9F\x13\xBB\x84\x68\x8D\xD3\xBB\x84\xF0\xD3\xAB\x8F\xF8\x53\xF2\x08\x4C\x6B\x7F\xB8\xDB\x25\x84\x83\x15\x19\x7F\x1D\x59\x45\xEB\x9D\x16\xD9\x46\xE0\x42\x8D\x1E\xB6\x69\x07\xE6\xB4\x1A\xA3\xCD\xD9\x18\x50\x81\xC8\x5E\x44\xDC\xDC\x2B\x7A\xE4\xBC\x4F\xCD\xF8\xC9\x75\x32\x1F\x64\xB4\xD0\x68\x69\x06\xC8\x9C\xE9\x5A\x0B\x70\xFD\xDD\x03\x13\x09\xB3\x0E\x4D\xEF\x55\x30\xEF\xE9\x55\x85\x2F\xF5\x58\xE9\x5B\x10\xCE\x7A\xD9\x39\xE3\xA7\x57\x3B\xD9\xD1\xD2\x7D\x2D\xB9\x9F\xD0\xDF\x48\xEB\xA6\xF8\x31\x58\x31\x11\x55\xAD\xBD\x63\x45\xFF\xCC\xD3\x11\x61\x16\x11\x86\x53\x00\xD1\x64\x9C\xA5\xE5\xD4\x4F\xFD\xC6\x09\x97\xDD\x72\x8D\xCB\x56\xF2\xA7\xB7\x62\xE9\x62\x21\x5C\xB6\xF2\xBC\xE3\x2B\xC2\x89\xA5\x26\x43\x08\x41\x57\x40\xAC\x4C\x40\xC1\x85\x05\xBF\x58\xD4\x26\xB6\x77\xB8\x6B\xA5\x93\x88\x68\xC0\xF1\xC5\x02\x5F\x0A\xCD\x58\x68\xC6\xCE\x2E\xCF\xA2\xFA\xBD\x52\x56\x81\xCB\xDC\x0C\x1A\xBD\x3C\xD8\xFB\x2D\x49\xD2\x38\x03\x04\xF4\xFF\xB0\xDB\x36\x11\xAA\xFF\x27\x91\x1F\xA6\x7B\xC4\x39\x20\x25\xA0\x6B\x54\x2B\xBC\x80\x41\xC9\xFD\x13\x50\x22\xAD\xD3\x4D\xE6\x45\x93\x7B\xD1\x14\x44\x14\xC0\x55\xB9\x7F\x82\x02\xDB\x7F\x0D\x24\x88\x2B\xFD\xCE\x75\xBF\xF3\x1A\x5F\xD5\x2F\x83\x49\xD2\x8E\xD8\x75\xEA\x2C\x70\x53\x5B\xEE\x77\xB9\x17\xAB\x47\x26\x85\x1B\xD1\x17\x6B\xAE\xF4\xA3\xAB\xBA\x16\x9E\x5F\x3A\x29\x40\xD9\xB9\x58\x00\xE6\xF3\xFD\x88\xA6\x1C\x78\xB5\xF0\xFA\x92\x49\x01\x57\xAE\x00\x7C\x14\xCC\x1B\xAE\xF0\xE6\xC0\x24\x87\x77\x73\x57\xFA\xFF\x63\x75\x92\x23\xA4\x19\x40\x2E\x5D\x06\xEF\x22\xCC\x55\xE0\xD5\x48\x8E\xF8\xFD\xC1\x89\x81\x85\x61\x59\x89\x3D\x02\xD9\x7A\x6C\x2A\xA7\x56\xA6\x7B\x11\x19\x35\x60\x40\xF9\xEF\xEE\xE0\x86\x60\xA5\x8C\x35\x3A\x42\x29\x20\x21\xF7\x17\x07\xDD\x50\xEC\x12\x12\xAE\x2C\xE0\xFF\xC0\xC4\x52\xC7\x55\x00\x18\x90\xE9\x44\x53\x01\xC8\xEC\xE5\x6B\xC0\x52\x94\x5D\x33\x42\xCA\x36\x13\x57\x21\xA8\x8C\xF2\x02\x16\x65\x2F\x9A\x0C\x50\xEE\x6F\x7A\x71\xB4\x3B\x15\x5F\xB9\x9C\x50\xDE\x2A\x7F\x0E\x23\xBD\x9C\x08\x7F\x27\xBD\x49\x7F\x5D\xF2\x0C\x84\x83\x93\x80\xD0\xCC\x95\xCE\xAC\xB9\xCC\x9B\xF5\xA3\x4D\x06\xF0\x12\x7F\x97\x71\xE6\xB9\x1C\xA9\x96\xFB\x8B\x61\xA6\xE4\x88\x60\xC0\x19\xA8\xD6\x93\x4B\x27\x39\xB1\xC8\xF9\xAE\x72\xA3\x03\x93\xCC\x69\x7F\x7E\xD7\x94\xCC\x23\x86\x2C\x04\xBD\xCF\x55\x80\x22\x39\xE4\x13\x39\xC3\x27\xD9\x7E\xEC\x00\x7A\x93\xFB\x9D\x41\x1E\x19\xB9\x2C\xF0\x88\x44\x1E\xC9\x91\x47\x46\xAE\x74\x39\xF1\x88\xD9\x9F\xCA\x33\x7D\xC9\x64\x84\x72\x09\xB1\xAA\x68\x7E\x99\x03\x93\x12\xDE\x2B\x9D\x04\xF0\x11\x3A\x57\x12\x7F\x94\xF0\x5E\x49\xA2\x6A\x84\x68\x6F\xAB\x55\xB2\x16\xBC\x7E\xF2\x24\x73\x05\xF0\xC5\xF9\x7B\x84\x18\xA3\x77\x4E\x32\x8D\x8B\xA6\x44\x1A\xDA\xFD\x13\x39\xCE\x81\x3D\x15\x4C\xDE\xC9\xA5\x93\x0C\xF9\x01\x86\xB1\x1F\x3F\xCF\xFC\xCE\x75\x97\xF9\x62\xDD\x6F\x6C\xFC\x4D\x7E\xE4\xA8\xAF\x8F\x3D\x81\x67\x97\xE5\x46\x24\x92\x38\x83\xF6\x47\x4E\xE0\x6A\xB8\x4B\x00\x90\xCC\xB1\x65\x63\x9D\x69\xA4\xCB\x27\xB2\x82\x97\xCC\x84\xEE\x15\x51\x42\x52\xFF\x48\x11\xB3\x69\x9F\xF0\x0D\x2D\x0A\x16\xD7\x62\x37\x39\x00\x9C\xE8\xEC\x5A\x6B\xD0\x19\x8D\x22\xA6\x01\x92\x6E\x3F\xDA\x08\x94\xB9\x39\x48\x5D\x10\x49\x28\x06\x48\x1A\x08\x78\x0B\xC4\xD2\xB4\x6B\x0D\x39\x85\x89\xEF\xDD\x64\x15\x38\x7A\x8C\xF6\x04\xCC\x95\xB1\x2F\x3A\x94\xAE\x7B\x05\x11\x49\x57\x03\xC1\xCD\x6B\x84\xF6\xAE\xB7\x36\xBE\xA0\xA4\x79\x98\x17\xCD\x45\xCB\xA3\xE8\x57\x47\x93\xAC\x8E\x06\xB9\x49\x2E\x5A\x1D\x9D\x69\x6C\x2F\xF4\xE4\x66\x6B\xA4\x9C\x59\x23\xE5\xCC\x1A\x49\x62\x7C\xFF\x44\x8F\x89\xCB\xB8\x07\xEA\x4B\x7F\xD3\xCD\x13\x55\xFA\x25\x18\x66\xFA\x66\x4B\xF0\xF7\x80\xC2\x5F\xD7\xFF\x99\xF4\xB3\x15\xB1\x57\x01\x71\x8B\x8E\x7F\x55\x25\x3A\x96\xF6\xA8\xFE\x97\x89\xBF\xC6\x55\x4E\x5B\x20\xD5\x53\x84\x9F\x7A\x51\xE3\x4E\xC0\x79\xF0\xD3\x8B\xFA\xDA\xE4\xC2\x8B\xFA\xBA\xC1\xA5\x17\xF5\xF5\x33\x37\xBC\xA8\x5F\x80\xB7\xAA\x5B\x16\x7A\xCD\x17\x6C\x27\x4C\x71\xDB\xDC\x28\x5D\xAD\x08\xDE\x9C\xE6\x39\x0A\x0C\x00\x16\x74\x72\x77\x45\xC6\x8B\xB0\xBF\x40\xBB\x03\xE8\xF6\x04\xC3\x9A\xB6\x0E\xEA\xEB\x50\xE1\x94\x4E\x3E\x5E\x17\xF5\x6F\x9E\x0C\xCA\xAF\x9F\x77\xEF\xFE\xD4\x22\x37\xF3\x8C\x4B\xC4\xAC\x32\x9F\xF5\x8C\x29\xFD\xB8\xF3\xF7\x82\x44\xAE\xDF\xB8\x31\x74\x46\x3B\x51\xBF\x08\x55\x5B\xD9\xA0\xBF\x6F\xB1\xA3\xFD\x7E\x2D\x8B\xD0\xF1\x4D\x6A\xAD\x35\x61\xE6\xF8\xBA\xF3\x1B\xD2\x6F\xA8\xFA\x15\xA8\x9C\xC2\xEA\x81\x6A\x45\xDD\x39\xED\x2C\xF0\x4C\x87\x2B\x2B\x2A\x43\xAD\xF5\x77\x0B\x56\x9E\x2C\x48\xDB\x8B\xC5\x53\x78\x97\xCA\x93\x33\x18\x3E\xD6\xF4\xB9\xE0\xCF\x81\x25\x37\xE4\x2A\xAA\x7A\xDC\x4A\x6C\x02\x3D\x96\xC2\xE9\x35\xAF\x8F\xB4\x99\xD3\x9D\x57\xC7\xBC\xBE\xAA\xBE\x01\x85\x7B\x8E\xC2\x1D\xB4\x06\x97\x39\xD9\xC1\x1A\x04\xFD\x8C\xE0\x1E\xC8\x3E\xEA\xCF\x3F\x11\x27\xB9\x75\xF2\x32\x14\x50\xD8\x6B\xA3\x49\x68\xD1\x40\x70\x7D\xD0\x70\xBF\xAD\xFC\x86\x04\x61\x69\xFC\xB8\x73\x59\xE7\x2A\x18\x1B\x2C\xA1\x4F\x3C\x00\x4B\x29\x2C\xFD\x6D\x46\x92\xA2\x00\x8D\x6A\xD2\x6A\x6F\x8E\xC1\xAF\xB1\x57\xEB\x47\xE1\x6D\xBC\xBF\xCD\xAB\x63\x4E\xC3\x12\x08\x2B\x02\xDF\xCA\xD6\xE1\xEF\xF6\xA3\x17\x0B\xE5\x46\x13\x09\xFA\x47\xD7\x14\x6E\xD4\x64\x95\x2B\xE3\xD8\x41\x50\x54\xCC\x98\x23\x60\x49\x25\x95\xA8\x5C\xCE\x84\x14\x8D\x05\x91\x07\xC2\x19\x15\x16\x56\x5D\x2A\xA7\x5C\xE1\xF2\xB5\x3D\x0A\x70\x82\x9B\xC0\x37\xA9\x40\x61\x5B\x3D\xFB\xAC\x39\xEB\xCC\x7C\x85\x4B\x20\xB9\xEC\xEB\xFF\xBC\x90\x9F\xBE\xD9\xDE\xDE\xB4\xA5\xDE\xAE\xD7\xB4\x41\x68\x50\x3B\xFD\xC6\xA9\x53\xA7\xF4\xA5\x20\xA0\xBD\x5A\xF7\xFA\x39\xDE\x76\xAD\x75\xF2\xD0\x44\x38\xCB\x8C\x93\x05\x51\x2D\xBD\x7E\x2A\xE8\xE7\x8D\xE1\x4E\x60\xA5\xCA\x48\x73\x76\x9A\xA5\xA6\x57\xC7\x3A\x5E\xAC\x60\x95\x81\xFB\x5E\x22\xB3\x58\x20\xBD\xF4\x66\xDD\xEF\x3E\x72\xB4\x7F\x47\xD1\x3B\xAA\xA3\xCF\xB3\x63\xCE\x02\x28\x3F\x9C\xBE\xA3\x3B\x0C\xB6\x79\xD2\x91\xFE\x96\x01\x35\x09\xD6\x67\xDD\xB5\x62\x0D\xFA\xBD\x04\x96\xB3\xB0\xCD\x1B\x01\xB2\x09\x40\xD6\x9B\x75\x58\x41\x48\x2F\x5F\xE5\xD7\xAC\xDF\xCD\xED\x4A\xFF\x44\x12\x03\xFC\xB5\x6C\x05\xC0\x6C\x7B\x80\x25\xC1\x0A\xE0\xE1\xC4\x10\xC0\x39\x4F\xBC\x58\x68\x67\x9C\x00\x46\x25\xC0\x2A\x9A\xCE\x68\xDB\xEF\x47\xE7\xA4\x71\x19\x30\x5B\x05\xF8\xAA\x9C\xF9\x26\x68\x7E\xF3\x4B\xBE\x95\x1C\x46\xBD\x6D\x8D\xC3\x2E\x4A\xBD\x58\x89\xB2\x10\x9B\xAE\xDF\xD1\x3B\xA0\x2E\x20\x9F\xC1\xFF\xD7\xF7\x80\xBE\x1E\x51\xBF\x12\x98\x8F\x7A\xA8\x7E\x3D\xF1\xDB\x3C\x20\x7A\x89\x2B\x6F\x77\xC6\xDF\x2D\x3A\x3F\xAA\x4F\x9E\x44\x77\x80\xBA\x9D\xC4\x70\xBC\x21\xEA\xBF\x61\xE1\x7C\xB7\xE8\xE8\x77\x87\xAF\x84\xDF\x9A\x7F\x00\xE7\x4B\x5A\x89\x1B\xA0\x97\x66\x5F\x3D\x88\x08\x83\x5F\xEC\x51\x05\xFE\xBA\x5B\x74\x7B\x40\x91\x71\xD2\x7F\xF2\x25\xB4\x07\xF4\x75\xEE\xBD\x0E\x2B\xC1\x03\x61\x91\xAF\x7E\xD9\xCA\x62\x9A\xA0\x22\x3B\x30\x11\xF5\xEF\x9F\x44\x17\x56\xFD\x81\xE8\x94\x97\x3C\xCB\x12\xAF\x4C\x1F\x53\x00\xDA\x53\xAB\xF7\x4F\x54\xDC\x3F\x97\x83\xFD\x73\xCB\x01\x30\xCE\xF6\x8E\x65\xBB\xAC\x0C\x1A\xAB\xF8\x35\xA8\x1E\xBC\x53\xAE\x43\x18\x41\xB8\xAF\xFB\x98\x20\xDB\xFB\xF3\x6D\xE2\xA4\xB6\xBE\xE8\x70\x8D\xC2\x06\xA2\x3F\x9F\x86\x81\x84\x56\x71\x5B\x20\xA3\xFD\x72\x74\x6A\xB9\xCC\x6B\x80\xD0\xB4\x79\xFD\x26\x78\x17\x03\x1E\xA2\x22\x61\x66\x37\xDF\x69\xE3\xA0\x70\x06\x20\x2B\x71\x26\xC2\xC8\xC1\x5C\xE2\x47\xC9\x02\x89\xFD\x98\x41\x38\x85\xE5\x60\x0F\x58\x1B\x01\xFA\xDB\xE3\xE6\x40\x8F\x98\x2A\xC1\x4C\xB1\xAC\x44\x53\xC6\x00\x8B\xF2\x50\xD4\xA5\x37\x6F\x7D\x33\x2C\x2F\x68\xAB\xAA\x5C\xC9\x1E\xA3\x7E\xBF\xC5\x32\x80\x26\x22\xB9\x2D\x13\x40\xCB\x08\x25\xE1\x9C\x9B\x83\xD6\x13\xE8\x22\xA9\xCC\x26\xA4\xE2\xCF\x4C\xBF\x6F\xE3\xF2\xFA\x3D\xD4\x32\x2D\xCB\x48\xB2\x10\xD8\xF2\x8B\x46\x66\x53\x87\x53\xB4\x6E\xE5\x1E\x55\x10\x23\x82\x00\x55\x7E\x17\x4E\x5E\x34\x4D\xE1\x59\x4D\x71\x1A\xF0\xAE\x58\xC3\xF9\x7C\xD9\x84\x9C\x26\x8D\x62\x8F\x17\xA8\x08\x7E\xF7\xCC\x67\xE4\x1F\x52\x64\x73\xD6\xDF\x08\x1E\xA2\xED\x15\xF7\x3A\x68\x90\xC9\x78\x91\x10\xE1\x4D\x7C\x8B\xEC\x02\xFA\x1D\xDA\x1D\x80\x72\x28\x2C\x02\x78\xAF\x35\x74\xB7\x55\x97\x11\x19\x60\x40\x1A\x4C\xD0\x01\x64\x44\x8F\xB5\x56\x3B\xB5\x69\xCF\x6D\x6C\x91\xAC\x1C\xC3\x51\x9A\x8B\x40\x69\xB1\x2D\xEA\x98\xC3\x52\xA0\x65\x46\x92\x46\x73\xEB\x28\xE2\xE8\x33\x22\x01\xC5\x06\xDC\x5A\x00\x26\xC3\x4F\xC8\x58\x04\xAD\xEF\xE9\x30\x49\xA4\x20\xE1\x0B\xB6\x26\xBC\x9E\xBC\x8A\x51\x25\x59\x3F\x02\xBB\x00\x77\x3A\x05\x98\xBF\x9F\xC1\x9D\xB3\x0D\x75\xB1\x47\x8D\xBD\x45\x60\xAB\x77\x6B\x69\xA3\x44\x07\x5C\x80\xB5\xE6\x5B\x02\x9D\x02\x7D\x40\x36\x0A\xA7\xA0\x49\x45\x38\xD0\x8C\x7A\x20\x26\x7D\x1B\x3D\x60\xE4\xDF\xF3\x1F\x45\x43\xDB\xFA\x16\x8C\x60\xEB\xDD\x65\x13\xC9\xD2\x5F\x25\xE8\x90\xCE\x3A\xBD\xE6\xCC\x65\x13\xD3\x23\xE3\xFF\xED\xB6\xFD\x23\x91\x25\xFF\x06\x17\x67\xFA\x27\xE2\x7F\x45\x72\x47\x64\x42\x66\xD4\xBB\xC2\x11\xD5\xC1\x0E\x4D\x6E\xC1\xC2\xD0\x39\xB5\xC6\x2E\x2B\xC0\xF1\xC6\x49\x34\x67\x71\x6F\xAF\xBE\x48\xC8\xD9\x1B\x6A\xF6\x86\x4E\x6E\x10\x35\x0C\x37\x4E\x61\x57\x88\xBF\x46\x63\xEF\xB2\x4A\x50\x5C\x7B\x89\x5A\xFF\x37\xC4\xA5\x4B\x40\x25\x85\x88\xC1\xF0\x2A\xC6\x1B\x45\x3C\xC0\xFB\xF0\x76\xEE\x33\x27\x97\x68\x9D\xFD\xB2\x01\xB5\x4F\x26\x34\x9C\x27\x93\xDE\xA7\x36\xF9\x3F\x1C\x69\x2B\x01\xEF\xFF\xD4\x54\xE1\x5F\x59\xA5\xFF\x46\xD5\x19\xFE\xE5\x55\x11\x7E\x2A\xFE\x2B\xE0\x3F\x5A\xD2\x85\xAD\x32\x32\x98\x61\xBA\xAC\x39\xD3\x01\x92\xC7\xFE\x2B\x2F\xE1\x8D\x93\xFA\xEB\xC8\xCD\xE3\x1E\x6B\x30\x48\xB5\x7D\xEE\x9B\xCA\x7F\x75\xE6\x9B\x6A\xF8\x8D\x9E\xF9\xC6\x1E\x9C\x8C\xFC\xD7\xF9\x1B\xCB\xDF\x8C\xC2\x37\x16\xBF\x31\xFC\x8D\x58\x56\x3B\xF6\x4F\xCA\xB4\xBF\xD2\x7F\x69\xA6\xBF\x32\xF4\x86\x81\x73\x61\x62\x00\xB7\x9C\xDA\xE0\x2D\x31\xA0\x01\xFA\x95\x95\xC3\x98\x43\x03\x7C\x7D\xE1\x2A\x7C\x8A\xAF\xFB\x6D\xB8\x99\xF6\xD6\xB8\xFD\x15\xA8\x5A\x0C\xAE\xCA\xC1\xD5\x68\x70\x55\x0D\xAE\xC6\x83\xAB\xC9\xF6\xCA\x49\x92\x0F\x14\x0E\xE7\xA4\xDF\x05\xF3\x51\xED\xA7\x3D\x9E\x5F\xA3\x8E\x51\x1F\x39\xD5\x0B\x6B\x72\x78\x44\x21\xB2\x70\xC6\x87\x55\xE5\xED\xD9\x20\xBE\x39\xF5\x8B\x0B\x9F\xD1\xAE\x3E\x29\x5C\xB8\x31\x88\xAE\x72\x67\x1A\xE1\x14\xAE\x6E\xC2\xF5\x6E\x2F\xD9\xF5\x7E\x96\xC4\x0D\x6F\xA0\x05\x52\x42\x78\xDF\x8A\x55\x6E\x30\xFC\xC0\xA8\xE7\xC5\x96\x0C\xE7\x8F\x86\x0D\x18\xC0\x43\x7D\x33\x29\x24\x18\xB9\xDF\x37\x2D\xBB\xC6\xE2\xAC\xF3\xB6\x6B\xD0\x1F\x55\xBF\x86\x17\x67\x0B\x46\x24\x3E\xB4\x40\x59\x5B\x85\x00\x07\x14\xF2\xA0\x17\xAC\xF1\x70\x32\x1A\x8C\x74\x59\x0F\x05\xC3\xD1\xE6\xB8\xBF\x43\xC2\x8F\xAD\x6A\x77\x08\x6D\xE8\x76\x75\x22\x2B\x7F\xDD\x4B\x07\x31\xDB\x86\x7D\xB4\x02\x70\x63\x9D\x21\xF7\x5F\x0A\xAC\xAA\xC2\x90\x9C\x8E\xE7\x11\x78\x5C\xFE\x35\x2F\x4D\xA3\x7B\x96\x41\x3C\xE0\xA7\xCE\x38\x5B\xFF\x32\xBC\x49\x43\x5A\x30\xE5\x73\x2F\xBB\x8B\x10\x56\x94\xB6\x79\xF2\x6F\x70\x71\xA6\x7F\xF4\xB2\xCC\xF3\x5C\xF1\x1D\x9D\xE7\xB9\xC9\x6D\x96\x57\x3C\xA2\x71\x8E\xFA\x0E\xFC\xCA\xE0\xD7\x18\x7E\x59\xF8\x35\x82\x5F\x06\x7E\x4D\x2E\x46\x0F\xB0\xB3\xBE\x84\x5F\xFD\xA8\x07\x84\xF4\x77\xA4\x03\x16\x3E\xEB\x9A\x9C\x75\xCD\x40\xC6\x3C\x6C\x55\x49\x97\xC7\xAF\xF3\x85\x5F\x8F\xD1\xCB\xEA\xF2\xFA\xB5\x14\xE4\x51\xCD\x22\x4E\x55\xE9\x27\xC4\x37\xAA\xA7\x0D\x4E\xB8\x83\x13\x80\xBB\x8A\xBC\x9E\x55\x18\x6F\xE2\x32\x67\x5D\xB6\x16\xD8\x77\xB0\xA5\x24\xAB\x17\xEB\x18\xDE\x33\x47\x18\xE9\x4F\x74\xDB\x26\x42\x2A\x6D\x4C\x56\xD8\xA2\x28\xF2\xA2\xAA\x7F\x97\x4C\x8A\x10\x1E\xBA\x3D\x46\x9C\x92\x82\x35\xA6\x98\x5F\xB1\xD6\x06\x38\x74\x3F\xE7\x74\x23\x9D\x42\x43\x5E\xA2\xD5\x9C\x30\x19\x4F\xBD\xE0\x6A\x26\x5D\xA6\xFE\x4F\x27\xE3\x16\x3C\xC6\x60\x53\x0F\x66\xAD\xBE\x6B\x83\xA1\x68\x64\x30\x6E\x74\xF0\x1A\xBE\x6D\x83\xE8\x74\x4D\xB7\x4D\x59\x81\x1B\x20\xF2\x08\xAD\x73\x60\x83\x84\xED\xC3\xFA\xED\xF8\x9E\x64\xE5\x50\x39\x49\xAA\x2D\x48\xDF\x31\x69\x77\xA1\x2D\xE9\xCB\xD5\x89\xA8\x3F\x78\x32\x88\x2A\xE1\xE4\xF0\xF3\x80\xC2\xA4\xFB\x9F\xEA\xB6\x59\xA1\xB5\x90\x9A\x03\x52\xEA\x5F\x4F\x42\x0C\x7E\x83\x7E\xB3\x69\xA3\x2A\xA7\xAA\x0D\xB0\x3E\x83\x13\x71\xC7\xFE\x60\xBE\xB5\xDA\x49\x52\xA9\x9D\xF2\xE5\x81\x09\x8A\x45\xEA\x25\xC0\x40\x06\x43\x7C\x6F\x00\xF9\xF8\x00\x7A\x5D\xCA\xD5\x89\x62\xBD\x6C\xF8\x2D\x3B\xE7\x51\x01\xFB\x2D\x29\x15\x43\x30\xF3\x1A\xC8\x3A\x1A\x3F\x80\xE3\x54\xFD\x1B\xC3\x8E\x60\xB4\x46\x49\xD6\x1B\x17\x82\x17\xDE\x9C\xAC\x2E\x18\x02\xAC\x06\x6E\x61\xCB\x5B\x1C\xC2\x3F\xEB\x2D\x78\xD7\xC7\xDB\x29\xB0\xAF\x77\x02\x8D\x39\xF6\x17\xF4\x48\x8C\x9C\x38\xDF\xA9\x15\xB1\x57\xD7\x18\x85\x40\xA1\x08\x49\xDB\xE8\xB0\x77\xBD\x2B\x78\x4B\xAD\xCB\xAD\xB6\xFE\x0B\x1A\x83\x64\x50\x9F\x27\x7D\x19\x27\x8F\x44\x2F\xB7\xDA\xE7\xB4\xB7\x21\x48\xAE\x95\x4F\xE1\x5B\x72\xBD\x35\xEC\xD8\xD7\x13\xE1\xA7\xDB\xAB\xDE\x0E\x54\xC8\x15\xAA\xC3\xC6\x9C\x0A\xFE\xA9\x10\x35\x4F\xDB\x5F\xC2\x99\x3D\xE4\x30\x11\x8D\x72\xC2\xE9\x3D\x4A\x60\x38\x2B\x6D\xAF\xAC\xE1\xE2\x8B\xDB\x97\x9F\xA5\x09\xBB\xAC\x8C\xFF\x24\x3A\xA9\xF0\xE7\x17\xD1\x21\x85\x3F\xBF\x22\x2E\x16\xF4\x9D\xFF\x8C\xB8\x58\xB0\xDE\x69\x50\xB3\x31\x9E\xA6\x8E\x06\x2D\xF4\xDB\x38\x88\x07\xFB\x41\x7C\xB9\x1F\xC4\x5F\xF4\x83\xF8\x0C\x0D\x82\x2E\x0C\x41\x5E\xFD\x3B\x25\xB3\x40\xF9\xDD\x29\xE5\xE5\x0F\x69\x81\xB1\x20\xDE\xF9\xDF\x7B\x29\xDE\x63\x2F\x0D\x9B\xA8\xC1\x03\x14\x2C\x35\x52\xBB\xD7\xBA\xD6\x12\x0E\x32\xC2\x81\x05\x1C\xE4\x8C\x03\x03\x38\x08\x4B\x6E\xC6\x6A\x06\xBA\x3F\x2D\xA3\xC1\x06\x34\x64\x89\x77\x1E\xD0\x90\x47\x34\x58\x42\x8A\xC0\x50\x09\x44\x83\x5D\xEB\x5A\x03\x68\x08\x61\xC7\x49\xAB\x00\x68\xDF\x2A\x1B\x08\x7A\xA6\x71\x30\x4A\x10\x1F\xB0\xAE\x28\xBF\xBB\xF7\x64\xBF\x38\xC1\x4F\xB2\x97\x87\x54\x0A\x3E\x32\x42\xC8\x77\x06\x36\xB6\x8A\x8A\x7E\xB3\xCD\x56\xAF\x55\xD2\x4E\xFB\x55\x74\x97\x90\x7E\xDA\x6D\xD3\x52\x28\x5A\x09\x1E\xAF\x8B\xFA\x5D\xFD\x52\x70\xA1\x2E\xEA\xBB\xF9\x12\x5D\x65\x2A\x1E\x09\x65\xA3\x59\xFA\x31\x9D\xE2\xE9\x5A\x5E\x08\x2D\xA1\x49\x02\x9A\xB2\xB9\x89\xC3\x47\xD8\x24\xAD\x09\x8C\x23\x19\xBD\xD3\xEC\x90\x08\x23\xC9\x22\x8E\xE4\xDC\xC4\x91\x6B\x5D\xAB\x19\x47\x66\xA6\x55\xD5\x37\x89\x38\xD2\xCE\xCC\xB4\xAC\x9D\x09\x13\xDF\xDF\xDB\xAB\x2F\x7E\x5A\xDD\xBA\xAD\xDF\xDA\x9A\x0D\x90\x4B\x46\xDE\x1F\xF6\x0B\x23\x36\xE9\x88\x89\x31\xF8\x60\x91\x8D\xEE\x3A\xB2\x9D\x71\xD0\xBA\x1F\x34\x79\xAA\xE6\x07\xAD\xE7\x18\x43\x87\x41\x3B\xED\x5B\x52\x51\x4C\xD7\x66\x2E\x5B\x56\xA8\x88\x03\x8A\x54\x70\x8E\x52\xAF\x79\xD0\xBC\x97\x55\xE1\x72\xDC\x62\x28\x48\xDD\x2E\x7C\x7B\x00\xFF\xB0\x22\xEE\x4E\x3F\xB4\x72\x66\x68\xF9\xD6\x87\x56\x9E\xDD\xD0\x5C\x81\x13\x86\xC6\x14\xC2\xE0\xB6\x06\x5C\xF6\x88\x01\xE7\x3F\xCA\xD3\x79\x16\xE7\x71\x5A\x15\xA0\x71\x9C\x67\x94\xB5\xDA\x2E\xF8\x27\x45\x85\x38\x5F\x9D\x98\xAD\x8F\x6B\xF4\x2D\x43\xFA\x59\x00\x57\xCC\x00\x57\x6E\x1D\xB8\xE2\x2C\x91\xFE\x25\x31\x84\x4E\x6F\x1D\xBA\xE2\x91\x83\xEE\xCB\x33\xD0\xA9\xAD\x43\xF7\x10\x66\xD3\xD9\x42\xF7\xA0\x38\xCD\x74\xD2\xDF\xEE\xE9\xF4\x17\xBD\xD2\xB3\x3B\xEA\x3C\xBB\x51\xE5\xA1\xDF\x9A\xB7\x9A\x0A\x68\x42\xF9\x49\xE7\xB5\xFF\xEB\x19\x75\x66\x28\x9A\xD5\x60\x4C\x33\xF8\xDE\x3F\x31\x8F\x14\xA6\x9D\xDA\x25\x26\x01\xD7\x70\xB1\x0D\xA3\xFF\x00\xF1\xAA\x03\x1B\x20\xA3\x35\x2C\x87\xC6\x73\x94\xD1\xC1\x59\xBF\x35\x66\xD1\x8F\x1C\xB3\xB4\x03\xE1\xB6\x47\x85\x98\x63\xE8\x77\x26\xB2\x78\x07\xBA\xED\x9D\xA2\x7D\xAD\xFA\x3D\xE8\x32\xF8\x4B\x05\x56\xFB\x89\x79\xAB\x3D\xEE\xE9\x4D\xBB\x6D\xB9\x90\x5A\x99\xCC\xE6\x21\x52\x26\x6C\x7D\x24\x0A\xB8\x3A\xAD\x02\xAE\x4E\xAB\x47\xA8\x99\xD5\xDE\x9C\x41\x8F\xC0\xA0\x22\x27\x59\xCA\x44\xAD\x7B\xDE\x8A\xF8\xE2\x02\x05\x9C\xAD\xE6\xDF\x4C\x2C\xE8\x77\x27\xBF\x7F\xAB\xFF\xED\x3F\x22\xFC\xC7\x84\xC7\xB8\xA4\xFA\xBD\xD1\xB9\x48\xE7\x60\x93\x17\xAE\x4B\x5F\xC0\x46\xDE\x97\x34\xF2\x35\xE1\xBF\x2E\xFC\xF5\xFD\x3B\x64\xA2\xFF\x7B\x23\xCD\x34\xA0\xD3\xF6\xEC\x94\x11\x3A\x73\x42\x67\x96\xB2\x53\x16\xD1\x99\xF7\x4A\x66\x36\xAF\xBA\xE6\x33\x0A\x66\x31\x50\x5D\x33\x42\x67\xD6\xAB\xAE\x16\xD1\x69\x9D\x42\x6C\x12\xB2\x64\x50\x49\x81\x17\xD0\xCB\x62\xEB\x5F\x3B\x19\xEC\x6B\x66\xE7\x47\x15\xF8\xFE\xA2\x59\xF0\x2B\xA7\xF0\x13\xE9\x94\x33\xE7\xE0\x34\xA0\xA0\x22\x0A\xC0\x39\x1B\xD8\xD5\x37\x09\xBB\x3A\x4B\xD4\xEB\x05\xA8\xCF\x2B\x97\x57\x63\xDC\xB0\xE2\xC9\xFA\xA0\x94\x7A\x8A\x53\xF3\xAD\xE1\xDC\xD8\x70\x3A\xDA\x70\x80\xBD\xD5\x34\x08\x95\x4E\x47\x15\x07\xA1\xFB\xE9\xA8\xE6\xA7\xA3\x3E\xED\x74\x54\x34\x08\x35\x3F\x1D\xBF\xD2\x4F\xC7\xCF\xF4\xD3\xF1\xC1\x7E\x3A\x2E\x47\x9B\xFE\xC2\x45\x26\x7D\x32\x5D\x92\x29\x55\x7D\x56\xF5\x3E\xF9\x61\xAC\x7A\xD0\xF2\xEB\xE4\xB4\x9D\x59\x9D\xD0\x0E\x14\xE1\x0C\x16\x10\xDC\x11\xD3\x7E\x67\xE7\x31\x34\x42\xFB\xBA\xC3\x40\x6C\x0E\xCB\x46\xA9\x55\x74\x4E\xD5\xBF\x8B\x5F\xD8\x89\xF0\xCF\xA7\xE0\x60\xE1\x6C\xFD\x7B\xC8\xF5\x19\x35\xD7\x18\x97\xD5\x6F\xA7\x6D\xEB\xFA\x03\x14\x39\xC1\x6E\xC0\xC4\x06\x4D\x98\xC9\x26\xCC\x94\xA7\xCC\x84\x1D\xF4\xCB\x82\x41\x7E\x32\x4C\x0A\xD3\x2F\x0B\x49\xE0\xEB\xC0\x0A\x35\x73\xFC\x64\x82\x15\xEA\x8C\xBF\x78\x96\x9F\xE6\x45\xFF\x5C\x10\x2D\x73\x97\xDF\x08\x13\x16\x03\x81\x84\xDF\x18\xAE\x45\x43\xFA\xEB\xB5\xD0\x3A\xB5\xDA\xF3\xA4\xEA\x71\x91\xF8\x68\x66\x16\x08\x92\x25\x8F\x8C\x7B\x46\xCC\xE3\x60\x59\xB1\x6F\x59\x10\xF1\xBC\xA8\x9E\x3C\xF0\xC3\xC9\xE0\x6C\x88\xDC\x22\x23\xB7\x48\x0A\xE2\x97\xC0\x2B\x82\x78\x05\xBD\xCD\x89\xDB\xED\x29\x9B\x34\x56\x77\x2B\x1B\x94\x27\x21\x69\x46\xEE\x51\x3B\x4F\xDB\x5A\x25\xBC\x58\x56\x0F\xBC\xF5\x3A\x98\xFA\xB0\xB2\xFB\xBB\xDE\x72\xDD\x86\xD8\xA3\xEE\x81\x3F\x5E\xF8\xF7\xD0\xE5\xDD\x74\x09\xED\xDE\xC5\x4F\x5E\x4D\x4F\x6E\xE5\xCB\xD7\xD3\xE5\xCD\xFD\x8B\xAF\xE6\x27\x5F\x7D\x33\x3E\xB9\x81\x2F\xAF\xA7\x17\x37\xFA\x17\xF1\x05\x2F\xFC\x7D\xF4\xE2\xE7\xF9\xF2\xD3\x74\x79\xFF\x9B\xE3\x8B\xF7\xF1\x93\xBB\xE8\xC9\x3D\x7C\xF9\x1E\xBA\xBC\xBB\x7F\xF1\x2E\xFA\xE9\xC4\x1E\x1A\x9B\x17\x80\xE9\xF7\xBF\x25\xFE\x7C\x4F\xFF\xF3\xF6\xFE\xE7\xEB\xFB\x9F\x2F\xED\x7F\x5E\xDF\xFF\xFC\xE2\x9B\xE3\xCF\x4F\xF7\x3F\xDF\xDF\xFF\x7C\x4F\x0F\xC3\xAB\xDF\x16\x7F\xDE\xD4\xFF\xBC\xA1\xFF\xB9\x01\x3F\xAB\xEB\x72\x99\x71\x18\x81\x17\xBB\xC4\xCD\x70\x13\x58\xD5\xDF\x79\xD7\x0B\x37\x7A\xF8\xF7\x61\x32\x97\xFF\xB1\xA8\xA3\x7C\xBD\xBE\x47\x3D\xF8\xE8\x26\x90\x17\x5E\x5E\x4C\x64\xC1\xFD\x40\x98\x2F\x6F\x81\x46\x9C\xB8\x8C\x82\xF2\xE1\xCE\x87\xFA\x3B\x8A\xEE\x3C\xD8\xDF\xD1\x8D\xF4\x77\x02\x8C\x40\xC2\x65\xF5\xAA\xB7\xE0\x93\x43\x74\xDA\xC5\xAE\xFB\x3B\xE1\xD5\x0E\x75\x36\xBF\xA3\x6B\x74\x1F\x5F\xB0\xAC\xF0\x9C\x00\xE8\xD8\x98\x11\xC6\x69\x3C\x63\x46\xB2\x4F\x2E\x2B\x14\x9B\xE8\xC6\x51\x78\xAC\x12\x4F\x57\xE2\xAE\x83\x1F\x63\xFE\x97\x65\x65\xBA\xC6\x86\x06\x50\xFC\x29\x6A\x46\x91\x03\x48\xB4\x19\x26\x79\xA2\x6D\x58\xE8\xBF\xB5\xDC\xB2\xA0\x8E\x0C\x9E\xC7\x07\x6B\xA0\x20\xBF\x2B\x5A\x0A\x1A\x96\x84\x35\xBF\x0E\xF6\x00\x86\x7E\x81\x94\xC4\x83\x9D\x24\xD1\x10\x10\x13\x7C\x84\xF0\xF2\x21\x8E\xBA\x33\x2D\xEB\x2C\x74\x0C\x3F\x81\x1B\x10\x80\xCF\xFD\xB8\x6B\x4C\xE5\x7B\xD9\xD4\x4A\x47\xD1\x20\x78\x85\x4A\x07\xA0\x18\x2E\x68\x97\xB8\x3E\x86\xF2\x7B\x9F\x38\x8F\xBE\xC2\x47\xC8\x60\x1D\x73\x58\xC5\xBD\x30\x88\x55\x48\x16\x26\xFD\xB8\xAB\xFE\xD2\xC8\x32\xE6\x07\x4A\x28\xEC\xD7\xBB\x56\xE1\x2E\xFE\x1C\xAD\xF5\x1C\xAD\xF5\x90\xD6\xA6\xA7\xB5\x9E\xA3\x35\xAE\xA0\x35\x85\x0F\xEE\xE8\x9A\x0C\x53\x1D\x35\x39\xC6\xF3\xB5\x05\xC5\x05\x39\xD9\x94\x88\xDA\x56\x72\xA0\x14\x45\xB1\x49\x97\xAD\x92\x8D\x48\x3B\xD2\xC0\x21\x25\xA0\x99\x76\xFB\x5D\x09\x57\xA2\x95\x9D\x1F\x77\x0E\x33\xF2\x94\x68\x11\xF9\x31\x12\xCB\xF8\x9D\xCC\x4C\xC5\x81\x89\x8A\x28\x46\x75\x53\x12\x4B\x94\xF8\xA9\x5D\x9D\xA0\x97\x14\xB5\x3F\x5C\x48\xD0\x11\xE9\x41\x65\x28\x81\xF4\xA0\x5A\xE2\xB7\xAE\x70\xA0\xC9\xE1\x86\x6E\x89\xCB\x30\x2C\x97\xA4\xA3\x3A\xE1\x8F\x77\x2D\x86\x1E\x22\x4F\x95\xAC\xB0\xFA\xE7\x75\xAE\x64\x26\x13\xC4\xBE\xCA\x69\x1C\xEF\x42\x48\xE5\xE9\x21\x55\x5B\x83\x14\x13\x46\x01\x60\x16\x0F\x92\xCC\x43\x65\x23\x54\x03\x28\xC4\xE9\xA1\x90\x5B\x82\xA2\xAA\xAA\xBF\x27\xE5\xD4\x71\x80\xAD\x23\x86\xC3\xDC\x6D\x68\x50\xC8\x43\x4B\xF5\x8D\x7C\x70\x9A\xB5\x09\x27\xAB\xBF\x83\xEA\x8F\x2F\x0E\x4D\x84\x3F\x35\xDC\x99\x57\xEB\x1E\x65\x16\xB4\x51\x5D\x78\x16\xEF\xD1\x29\x9A\x5D\x52\x4D\x7D\xD0\xC8\x50\x4A\xF2\xEE\x29\x70\x37\xCC\x96\xD7\xBF\xED\xBA\x8D\x78\xD2\xE6\x07\xE1\x6D\xB1\x4C\x37\x1B\x11\x3F\xA4\x79\xC5\x1F\x82\x78\x97\xE1\x43\x7C\x62\xAA\x73\x05\xCD\x4D\xBC\xB9\xAC\x42\xBB\xD5\xF7\x0A\x27\x6E\x0F\xE7\x60\x92\x17\x48\xE5\xC3\x0D\xD6\xEA\x9F\xF4\xB1\xDA\x03\xAD\x07\x34\x9B\xDB\xEB\xF7\xF3\x4E\xB8\xA0\xC9\x1B\xBF\xC7\xA3\x33\x6F\x3C\x19\x15\x4D\x90\x2E\x74\x60\x26\x51\x81\xDE\x31\xCC\x44\x23\x82\xEE\x0F\xAB\xF1\x03\xBC\x1A\xDF\x0A\x7F\xEB\x9B\xE9\xF0\x04\x20\xE8\x2D\xB8\x34\xBC\x1F\x6F\xBF\x96\x6F\x9B\x3D\xEA\xD3\x6F\x8B\x0B\xCD\x7D\xF4\x93\x4E\xBE\x03\x3F\x3C\x48\x48\xA4\x74\x27\xD2\x4B\xFA\x1A\x25\x75\x4C\xF6\xC7\x39\xDC\xFC\x83\x37\xF6\xF4\xA2\x05\x08\x21\x01\xDA\x05\x6F\x0B\xC1\xF4\x1D\x0B\x3D\x7E\x5E\x3D\x46\x84\x7C\x0A\x7C\x7C\xBE\xAA\xDE\x5A\xA4\xE7\xAF\xEF\x15\x9B\x45\xE6\x73\xCA\x13\x4E\x0D\x75\x30\x3D\x89\x15\xF6\xB0\x94\x90\x06\xA3\x31\x1E\xAF\x8B\x1F\xD2\xB4\x05\xE5\x3A\xEF\xFC\x2D\x2F\xA3\xBC\x0A\xE8\x2B\xF4\x0E\x5A\xBA\xED\x65\x3D\xCC\x21\xFB\xD6\xC3\x8C\x45\x11\xB0\x88\x10\x3F\xD9\x89\x88\x45\xE9\xDF\x2F\x06\x19\x1F\x9F\x7C\x5A\x34\x2A\x1A\xC7\xAD\xFC\xED\x3D\xA2\xAB\x6F\xC3\x24\x05\xAA\x4F\x1C\x06\xB4\x68\xB4\xD3\xE8\xF3\xA3\x33\x2B\x68\x21\x39\x4D\x22\xA5\x15\x1D\x1F\xC6\x78\x58\xC7\xA8\x07\x63\x74\xD0\x7B\x1C\xA3\x1B\x0C\xD1\x9D\x61\x88\x22\x7E\x17\xC7\x47\x1F\x1E\x84\x0F\xDF\xFB\xB2\x41\x0A\xAA\x17\x71\x8E\x86\x9E\x72\x4E\x5C\xA8\x8B\xD6\xA0\x4D\x54\x1C\x5F\x39\x75\xEA\xD4\xA9\xED\x3F\x0E\xAA\xC2\x2D\xCD\xA3\x79\xCC\x29\x4D\x3F\xC8\x83\x72\xD2\x99\xBD\xBA\xEE\xD9\xF7\xB3\x2F\x0B\x26\xDF\xFF\x68\x1C\x5C\x25\x24\xBE\xE1\xE5\x69\x32\x3B\xCA\xF2\xF7\x85\x94\xEC\x32\xD9\xC7\x56\xD5\xCB\x4E\x93\x5F\xAF\x3F\xCA\x28\x43\xFE\x33\xCC\x1F\xF7\xA1\xC8\x2D\x9C\x3B\x2E\xA4\x9B\x52\x71\x3D\xA2\xE8\x35\x51\xBF\x31\xCC\xAB\x3E\x6F\x18\xC5\x06\xD4\xEF\x3A\xB9\x28\xCB\xDE\xF3\x1F\x06\x68\x36\x83\x02\x35\x85\xD3\xF6\xFE\x91\x6C\x51\x0E\xDD\x99\xEE\x87\x49\x06\x8B\x47\x74\x56\x8C\x07\xB3\x62\x41\x7A\xDB\xB3\x92\x04\x45\x57\xFF\x45\x92\xCC\x97\x52\x9D\xD1\xFD\xFB\xAE\x0D\xD1\x7B\x21\x17\xD7\x1B\x5F\x3E\x10\x11\x44\x36\x1D\x0E\x58\x53\x72\x16\x3E\x9F\xAD\x52\x7C\x3C\xB2\xE2\xE1\xA1\x23\x02\xCC\x99\x04\x11\x9F\xBA\x76\x98\xD5\x78\x30\x5C\xC5\xC9\x68\x64\x83\x27\xDB\x85\x57\xF5\x5F\x21\x30\xCA\xCB\x75\x5A\x0D\xC8\xEE\x32\x9D\x53\x3E\x22\xF1\xAF\x53\x0E\xDC\x14\x89\x31\x99\xA0\xC4\x60\x93\x87\x19\x5D\x76\x06\x5D\xF6\x21\xA3\x0B\x0D\x0A\x3F\x4A\x91\xF6\xB5\xD3\x21\x0D\x86\x13\x57\x92\x88\xC2\xF9\xC9\xF5\x51\x73\x16\x93\xCB\xA7\x13\xAB\xCF\x06\x04\x32\x26\xCD\x81\xA7\x1F\x66\xEC\xA9\x19\xEC\xA9\x87\x88\x3D\x3D\x9C\x75\xA7\x65\x36\x11\x99\x4D\x55\x98\x07\x60\xC4\xCC\x26\xE6\x99\x4D\xF8\x11\xFA\xFE\x23\xB3\x99\xCD\x5A\xD5\x8B\x98\xED\xE1\x46\xD7\xC3\xC7\x6C\x60\x29\x2B\xAF\x1E\x02\xB3\xCD\x70\xD7\x4B\xD4\xE6\x39\x52\x67\x73\xA3\x7E\x67\x89\xEE\x59\x26\x7A\xEF\x02\x26\x9A\x39\x34\xFB\xAA\xFF\xB1\x90\x21\x96\xA9\xAD\x14\x2B\x9F\x99\xC1\xCA\xC7\xCF\x8C\x95\x9F\x57\x8B\xD2\x74\x86\x4C\x24\x21\x3B\xDD\x30\x21\xEA\xC3\x8D\x16\x13\xD0\x12\xF2\x77\x46\xB4\xA8\x14\x2D\xEA\x2C\x04\x8D\xEB\xBF\x4C\xF0\x11\xCE\xCE\x3E\xB0\x68\x19\x97\x01\x2F\x83\xE8\x83\xFF\x09\xB8\xE5\x73\x33\xDC\xF2\x8D\xFF\xC9\xB9\xE5\xCF\x67\xB8\xE5\x95\x37\x9E\x35\xB7\xDC\xB7\x10\x2F\x21\xB7\x1E\x66\x7D\xA4\x5F\x73\xD9\x7B\x67\x7D\x16\x98\xAA\x45\xEE\x9F\xC8\xD0\xD3\x5C\x3A\xE2\xEF\x18\x7C\x7E\xF4\xDA\x05\x59\x77\x13\xFC\xDE\x7C\xE3\x60\x89\xFE\xC4\xC6\xA2\xBC\xC8\xAF\xCD\x07\x0A\xD2\x2C\x66\x75\xC4\x2C\x67\x0B\xE3\x3C\xD5\x84\x59\xFC\xAD\x7B\xCC\x4A\x67\xF0\xA0\x7B\xC4\x2C\x63\xB4\xD7\x33\x1A\x0C\xBE\xE8\xB5\x0E\x27\x41\xE5\x3D\xB4\xD4\xDA\x81\xFE\xA1\x40\xFF\xB0\x7E\x84\x0B\x75\xD0\x3F\xCE\x4B\xC6\x96\xAE\xCF\x21\x19\x15\x7A\x0C\xA5\xB3\x98\x76\x4A\x7A\xF9\x04\x49\xE7\x8C\xF1\xDC\xDD\xB4\x6B\x05\x9E\xF8\x8F\xD9\x2D\xD1\xBF\xF8\x30\xD3\x3A\x9B\xA1\x75\xF6\x10\x69\x2D\x9D\x70\xD6\x8B\xA8\x10\xAB\x54\x47\x59\x84\x03\x1C\x0E\xE9\x28\x8F\x90\x69\x64\x67\x86\x66\x1F\xE2\xD0\x04\x06\x73\x27\xC3\xFA\xE4\xCC\xB0\xEE\xB8\x71\x90\xFD\x7A\xC0\xDD\x81\x7A\x8F\xD2\x29\x2A\x13\x5D\x3C\x99\xA2\x21\x55\xBB\x98\x9B\xA2\xEF\xBC\x71\x3E\x5F\x2A\x9E\x8B\x4E\x4F\xD2\xFD\x2F\xE1\xB7\x40\xF8\x7D\xFC\x0C\xC2\xEF\x3F\x9F\x8D\xF0\xFB\x5F\x98\x5D\x80\xD9\x4F\x9E\x01\xB3\x1F\x3E\x1B\xCC\xFE\xD2\x69\xD4\xBB\x61\x86\xE2\xEF\x28\x35\x2F\xD1\xEE\xFE\x64\x66\x4A\x07\x2D\xEF\x73\x37\x0E\x7C\x3B\x11\x3D\x03\xC3\x11\xCF\xE3\x2D\x3C\xA5\xA2\xE6\x0B\x6C\x60\xF9\x21\xCA\xED\xC6\x99\x8F\x65\xC8\x72\xCD\xF9\x30\x63\x46\x6B\x0A\xF7\xD5\x55\x5F\x3B\x42\x12\x3B\xDC\x17\x0D\xF8\xC0\x10\xDF\x48\xE0\xE4\x83\xAE\x9A\x4F\x73\xCB\x83\x9C\x81\x43\xD4\x1F\xDB\x08\x81\xCB\xBF\x78\x53\x4A\x76\x67\x38\x5B\x0B\xA7\x49\x6E\xCF\xB2\x27\x67\xFC\xF4\x12\xDA\xD3\x3D\xFB\x1E\x3E\xB9\xC5\x1E\x00\xFE\x2A\xE6\xF6\xAE\x7A\x2F\x8F\xA6\x64\x86\x33\x49\xBF\x87\xAA\xE6\x8D\x4A\x96\xD3\xB0\xA5\x3A\xA8\x35\xA0\x42\x81\x1F\xAE\xD8\xC1\x49\x22\x39\x3F\x90\x5D\x56\x85\x97\x07\x30\xF4\x20\xF3\xA6\xC9\xE1\x3D\x3E\xBC\x63\x30\xCB\x8E\xCB\x71\xBF\xB8\xE0\x44\x42\x1A\xB3\x77\x15\xE1\xEC\x2B\x6D\x9E\x7A\xD1\x14\x4E\x63\x7E\x32\x4A\x90\x0B\x5A\x56\x1F\x79\x9D\x09\xCE\xB3\xEB\x65\x53\x54\xF0\x22\x26\x03\x2A\xFC\x14\x43\x51\x5C\x81\xE9\x4F\x0B\x4C\x6B\xE6\xF4\xA1\xA6\xE4\xE4\xB5\xAE\x9C\xC8\xAA\x72\xC5\x04\x53\x83\x59\x4A\x0A\x44\x80\xD3\xB1\x6B\xDE\xBA\x9D\x2D\x7A\x54\x39\x49\x36\x09\x73\x88\x5C\x58\x7A\xE8\x78\xB7\x4D\x49\xAE\x1C\xC5\x65\xB3\xB8\xEE\x50\xA8\x94\xA5\x92\xFC\x3E\x0B\xCB\x0B\x85\xAA\x58\x94\x52\xDD\xFF\x3A\xB2\x02\x86\x5A\xD6\x37\x62\x1F\xE9\x9D\x97\x20\xC6\xE6\x8A\x1C\x0D\x3A\xF3\xEF\xE8\x3F\x18\x7C\xFC\x22\xBC\xF3\xAE\xE4\xE9\xBB\x67\x9F\x9E\x0E\x50\x40\x09\x45\x6C\xFF\xB4\x34\x11\x2D\xB8\x7B\x2F\xFA\x88\x68\xA1\x17\x56\xAE\x52\x02\xBE\xA4\x24\xE0\x22\x84\x5F\x36\x74\x18\xA6\xFE\x62\x9C\x08\x74\x2E\x3D\xF2\xBF\x53\xA1\x61\xA6\xC8\x7F\xD0\xCA\x4C\xE5\x09\x4A\x25\xEB\x92\x33\xE2\x8B\xC3\xF5\xB3\x8C\xEA\xC9\x56\x8E\x36\x45\xFA\x38\x7E\xAA\x5E\xAB\x4C\x05\x8D\x30\xCF\xAB\x64\x24\x7A\xFF\x84\x6B\x7E\x39\x4D\xC0\x82\x8A\xC3\x51\xA3\x5F\xDA\x08\xE9\xB0\x30\x18\xC4\x04\x16\x82\x77\x7F\x2D\x70\xF8\xD8\xC4\x24\x5B\x1C\x89\x6C\x28\x09\x98\xED\x8B\x52\xD9\x61\xA5\x33\xAA\x66\x66\x70\xDA\x00\x16\x16\xF4\x66\x42\x22\xA8\xF9\x82\x67\x14\x6F\x8D\x39\x65\xC6\xBA\x8A\xF9\xC9\x02\x6B\xAE\x08\x9C\x9C\x69\x25\xB3\x0E\x83\x9D\x5D\x46\xFB\x37\x2B\x23\xFA\x5B\xBF\x10\xFF\xBB\xB0\x73\xCE\x6F\xC5\xE5\xDD\xB2\x15\x79\xBC\xCD\xB0\x24\xC7\x3F\x8A\xF9\xAD\x90\x43\x3E\xA0\x95\x65\x3A\xF1\x2C\xBB\xED\xE4\x5C\xE6\xFA\x6C\x35\xA6\x4D\x4B\x49\x83\x55\x74\x85\xD4\x91\x2C\x9C\x2C\x02\xD3\x14\xC0\x5A\x75\xA8\xD5\x4B\xAD\x89\xA4\xB2\x00\x1B\x17\xA8\xD0\x4B\x3C\x40\xED\x2C\x8D\x80\x72\xF5\xDA\x48\x43\xFE\x55\xFF\xD9\x46\xCC\x9A\xAC\x9C\x75\x59\xA0\xA1\x89\xE1\xFD\x98\x19\x4C\xC5\x74\x02\x3A\x14\x33\x34\x60\xF9\x21\xE7\x65\x3D\x2E\xB3\x99\x44\x63\x76\x11\x25\xFF\xAC\x47\xA6\xEE\x29\xD9\xA7\x21\x0B\x94\xE4\x24\x5F\x78\xC6\x06\xBA\xEA\x17\x29\x47\xA5\xDE\xE0\xE6\x8A\x68\xF2\x98\x92\x4C\xC4\x54\x62\x98\x2C\x6C\x73\x7A\xCE\x80\x30\xCC\x57\x96\x03\x3D\xF3\x40\xCF\x8A\x47\x09\x80\x55\x6F\xE7\xEA\x79\x69\x6C\x43\xB6\x3A\x24\xE7\x81\xB4\x82\x4D\x7A\x91\xAF\x4E\x64\x3F\xC9\x64\x3A\xC9\x7A\x71\x31\xC8\x9C\x27\x40\x5C\xD0\x62\xD0\xCF\xBB\xC6\x84\x2C\x13\x14\x2C\x63\x43\x7A\x46\xC0\x60\xA0\x9F\x4C\xE7\x20\x96\xAD\xF9\x7E\x4A\x69\xA8\x67\x20\x07\x69\x84\xA1\x3B\xD4\x98\xE0\xA2\x84\x86\x52\x1E\xFC\xF5\x82\x12\x3D\x5B\x1C\x6F\xCF\x36\xFD\x88\xCD\xE9\x47\x6C\x28\x43\x63\xE4\x98\x86\x8A\x2E\x70\xA6\x0A\xB8\xCD\xCD\xA0\x3D\x3F\x2F\xB5\x42\xF3\x48\xDB\x90\x97\xAE\x47\x98\x0C\x08\x93\x09\xC2\xC4\xB0\x55\xA7\xAA\xBF\x4F\x08\x9B\x1D\xFA\x60\xDE\x22\xE6\x1E\xDC\x88\x87\x78\x6E\x52\xB0\x26\xF4\x13\x56\xCC\x10\x19\x65\xE3\x1B\x89\x8A\x48\x23\xFA\x6D\x31\x09\x5E\x13\x0E\xB6\x21\x25\x1B\x9B\xA4\x17\x44\x24\x5B\x64\x04\x78\x67\xFE\x2E\x4E\x58\x02\xC4\x54\xA9\x38\x1E\x4A\xE3\x8A\xF7\xEC\x67\x6A\x91\x0E\x53\x31\x06\x7E\x84\x4F\x61\xD5\x7E\xD3\xC9\x58\x9F\x08\x43\x47\x13\xAC\x69\x3C\x5A\x12\x33\x34\xCE\x95\x22\x55\xD5\x8F\x2F\xC6\x62\x42\xF7\x79\x9E\x71\xB2\xFE\xD3\x8D\x48\x29\x78\xF8\xDF\x36\xA2\x14\x26\x44\x3F\x9C\xCD\xFE\x59\xDF\xEC\xAB\x16\xCC\xEF\x7C\x75\xD8\x90\x18\x2E\x67\x7A\xF3\xA2\xAE\x67\xC8\xE5\xB8\x58\xF0\x0D\xB3\x5C\x22\x0E\xDF\x73\x36\x50\xE1\xC2\x40\x81\xC6\xE3\x56\x03\xB1\x0E\x2D\xD5\xFF\x25\x08\x1B\x4E\x77\x8A\x2F\xD1\xC2\x11\x98\x20\x2C\x18\x72\x93\x94\x91\xA2\x1F\x84\x88\x83\xA0\x19\x24\x92\x51\xE0\x11\x0F\x81\x59\xC0\x98\x13\x58\x6D\x8A\x9C\xF0\x41\x19\x53\xF0\xCD\x8E\x83\x62\xD1\xFA\x5A\xA3\xD1\xE8\xD9\xA4\xDE\xA8\x93\x98\xE8\x19\xFD\x8D\x7D\x3D\xCB\x7C\x75\x42\x06\x8C\x0D\x34\xA2\xA7\x7F\x8D\xF4\xA5\xAC\x57\x74\xE7\xAB\x44\x71\x67\xF9\xE8\x8E\x6A\xAC\x53\x58\x48\x61\xB6\x72\x29\xE5\x12\x4C\xAA\x97\x82\xFE\x15\x16\xC3\xEA\xBE\xAD\xB0\x4B\x58\x38\xCF\xC8\x30\x03\xFE\x38\x23\x7B\xE0\x6C\x16\x9B\xCF\xE6\xD3\xB7\x89\xD3\x56\xC4\x36\x45\x20\xD6\x1B\x64\x3C\xE8\x30\x18\xD9\xDC\x60\x48\xEB\x38\xED\x78\x28\x03\xE2\x90\x4E\x01\x1E\xA2\x4D\xE6\x14\x53\x22\xDB\x3F\xD1\x2E\x6B\xD4\xDC\xB0\xE1\x26\xA8\xCE\x72\x76\xF8\x98\x80\x9E\xCA\x09\x48\x72\xFB\x52\xAD\x81\x09\x79\x51\xD7\xAA\x27\x63\xE2\x21\xA7\xA2\x54\xC3\x0C\x9A\x94\x48\x13\x33\x3B\xF2\x11\x38\x67\x9E\x02\xB6\xDB\xF6\x6A\x26\x57\x9B\xF2\xD3\x8E\x34\x6F\x2F\xAA\x6D\x60\x9C\x3C\xAD\xC3\xCC\x97\xA3\x43\xD5\xEE\x98\xD4\xC8\x1F\xEB\x5A\xE9\x1F\x77\x29\x9A\x7F\xCA\x4B\x27\xD7\xFD\x8B\x36\x36\xD4\x11\x3C\x7D\xE6\xCB\x03\x24\x60\x30\x6C\xF8\x13\xF2\x88\x7F\xD7\x94\x1A\xC9\x0E\xA5\x0D\x1E\xAD\x3E\x24\x95\x3E\x61\x30\x91\xE9\xCA\x93\x5E\xD8\xEA\xE3\x4E\xBF\xA0\xD1\x4E\xDD\x0E\x0B\x06\x1D\x09\xF0\x62\x59\xDD\x4B\xA1\xEA\x00\xF5\x14\xB4\xDD\x13\xCF\xBF\x25\x1E\x9E\xA5\x8C\x7C\xB6\xD1\xCE\xAE\x88\xA7\x4F\xA8\x6E\x36\x16\x84\x73\xCA\xEF\xEA\x5F\x89\x67\xA2\x24\x20\x8F\x9A\x06\xE3\x8F\x90\x87\x75\xAE\xA4\x9F\x5E\xDD\x85\x19\x99\xA1\x85\x95\x51\xFE\xB7\xA6\x70\x39\xD8\xE3\x5D\x5B\x72\xBF\x25\x67\x20\xE0\xD4\xAC\x88\xB5\x40\xA3\x17\x4B\x25\x4F\x20\x19\xA4\x97\x4F\x25\x9C\xAF\xB0\x32\xB2\x4B\x08\xBF\xEB\x40\x6B\x1C\x3C\xC0\xD2\x32\x0D\x2E\xF2\x6B\x0D\x28\xEB\xBA\xA7\x99\x0D\x78\x2A\x81\x50\x27\xB6\xC7\x62\x2C\x7A\x65\x74\xC2\xD9\x3B\x56\x4E\xC9\xEB\x8E\xAF\x3C\xED\x78\xAC\x21\x44\xBC\xBC\x22\x9C\x7E\x3E\x20\x6A\x69\xAF\x16\x5E\x54\xEF\x36\x56\x4E\xD5\x71\xF8\x9F\x1E\x42\xF4\x23\x02\xFF\x3D\xF0\xAE\xF0\xE3\x49\x11\x3A\xBD\xD4\x18\xBE\x2B\xF8\x10\x2C\xF2\x0C\x7C\x9A\xDC\xA7\x15\x1E\x33\xF5\x3D\xAD\x6B\x33\x82\xF6\xD2\x89\x74\x96\x5F\xBA\x60\xDF\xEB\x5C\xF6\xB6\x9B\xF1\x60\x40\x7F\xC0\x9B\xD4\xB7\xD7\xFD\x90\xE6\xD4\xDD\xF6\x75\x4D\x4E\xCD\x53\x62\x0F\x6E\xB3\xCD\x30\xEF\x6B\xDF\x1F\x1D\x55\x44\xD9\x84\x80\xFC\x9B\x5F\x80\x7F\x6F\x7F\x52\x63\x93\x77\x36\x85\xA9\x00\x85\xFF\x6D\xAF\xBB\xB9\x29\x12\xF0\x7E\x79\x1E\xB4\xDC\x19\x57\xBC\xEE\x66\x80\x4E\x57\xE1\xAA\xC9\x09\x3C\x19\x1A\x6F\xB2\xCA\x9F\x40\x7F\x43\x92\x18\x56\x79\xD9\x8C\x82\x71\x43\xAD\x66\x5D\x48\x05\x4B\xD6\xB0\xF6\xAA\x6B\x32\x3F\x6D\x46\x58\x19\x0B\xAF\x40\x9E\x50\x1E\xDE\x0C\x07\x06\x97\x38\x0C\xFC\xB8\xD5\x91\x11\x60\x18\x95\x1F\x5D\xE5\x74\xE7\x9F\xD6\x35\x30\xE3\x89\xFF\xC2\xB1\x81\xCC\xC9\xD5\x89\xC6\x32\xCB\x6A\x9F\xAB\x28\x57\x1C\x16\xC6\xC1\xC2\xD1\x60\x6C\xFA\xD1\x55\x91\x63\xBC\xBC\x84\x98\xC6\xC1\xF0\x46\x57\xBD\x2D\xD0\x44\xB9\xFC\x87\x34\x7A\x92\xCA\xCA\x95\xD5\x7B\xAD\xCC\x67\x05\xBF\x59\x4D\xF2\x42\xCB\xB9\x7A\xD5\x94\x00\x1B\xCB\x6D\x50\xFD\xE4\x18\x17\xD7\x17\x96\x56\xDE\x34\x06\x2E\xF9\x40\x71\x5F\x95\xB2\xCD\xFA\x5E\x4C\x28\x4E\x99\x91\xF9\xDA\x84\x04\x42\xD4\x47\x86\xF6\x52\x16\x14\x79\x97\xF9\x69\x77\x15\xBE\x56\x39\xCD\x89\xF3\xF2\xDE\x5F\xE9\x28\x91\x01\xAE\x7B\xCB\x0A\x13\x57\x1B\x3E\xC8\x90\xE1\x3B\x5E\xE1\xA9\x1D\x2A\xA7\x3D\x6E\xAC\x27\x31\x14\x0E\xA7\x72\x3E\x74\xFC\xE2\xBC\x78\xF4\xB4\x6B\x8B\x2E\x49\xC2\x3D\xDB\xF4\x79\xA4\x64\x14\x54\x92\xAA\xA0\x53\x4F\xE8\x78\xEA\x2B\x6D\xE6\x4E\x63\xC6\x3E\x0A\x7D\x06\xBE\xA3\x44\x83\xB8\xDA\x30\x14\x61\xB5\xB3\xC3\x6A\xE4\xA4\xC4\x88\x36\x38\xCE\xCC\x01\x76\xAA\x68\x72\x0F\x35\x2A\x59\xE8\x43\x0E\x71\xC6\xA1\x3E\xD4\xAB\xC9\x63\xD2\x36\x78\x51\x03\x34\x2A\x92\x9B\x21\x0F\x62\xD0\xA6\x31\x57\xCF\x14\xE9\x56\xF4\x50\x15\x73\xEB\xA0\xC1\x09\x8B\x89\x30\x33\xCE\xF9\x30\xC4\x1F\x7B\x24\xD0\xFC\x05\x04\xE3\x89\x22\x8D\xA1\xE0\xC9\x5B\x9A\x70\xDC\xE8\xD9\x95\xDC\x50\x75\x88\xB6\x08\x05\xDF\x59\x10\x07\x24\x66\x01\x89\xB4\x1A\xBD\x6C\x81\x79\x37\x5F\x14\x67\x1A\xD3\x46\xD1\x71\x19\x7D\x30\x9E\xCE\x45\xF8\x55\x47\xA7\xC3\x34\x65\x8E\x05\x45\x4E\xA5\xAF\xAF\x1D\x8C\xB5\x94\x1C\x60\x1C\x1A\xC4\x3A\x61\x57\xFB\xEF\x39\xD6\x17\x28\x51\xD5\x7F\x3C\x1B\x70\xFA\x53\xDE\xA6\xE5\x82\xEA\xC8\xA0\x0A\xDD\x4A\x4E\xAD\xB1\x62\x8E\x58\x64\xFD\x06\x67\x57\x48\x25\x89\xA7\xB5\x0C\x1D\x27\x46\xF9\x8E\x49\x03\xC3\x5A\x0F\xDA\x83\x53\x87\xD0\xCB\xAB\x9C\x0E\x25\x70\x62\x46\x4A\x99\x02\xFC\x25\xA5\x54\x7F\xFC\xC1\x6D\x56\x67\x48\xCF\x96\x82\xE5\xF4\xE3\xCA\xEF\x1C\x54\x27\xDC\xB9\x72\xE2\x78\x6B\x56\x9E\xF4\xA2\x95\xFC\xC7\x27\xC2\xFF\x82\x74\x66\x45\xDE\x70\x4B\x2B\x41\xD8\x1D\xF3\xF2\x08\x6D\x38\x25\xA5\x10\xA5\x57\xEB\x8D\x1C\xEE\x33\xED\x48\x0A\x6F\x2B\xBF\xA3\xAB\xFF\x7B\xBF\x7F\xB3\x03\xF8\xF4\x93\x37\x0D\xE2\xB2\x6F\x7F\x05\x66\x02\xC5\x54\xC2\xF0\xFA\x87\xC2\xEE\x71\xFC\xE0\xAF\x6F\x1A\xC6\xC2\x89\x46\xFA\xC7\xF6\x29\x38\x51\xFD\x99\x5E\x82\x67\xB9\x59\xBD\x10\x4E\xAC\xD1\xD1\x7A\xD9\xF9\xC7\xE2\x49\x73\x4A\xF5\x42\xB2\x54\xEC\x51\xB5\xD3\xF4\x20\x06\xC6\x9F\x07\x46\x3E\x97\x32\x60\x26\xC5\x7D\x0C\xD7\xBB\xE6\x5F\xA8\xD0\xEF\xB9\x79\x95\xC8\xD3\xF0\x4C\xC2\x91\xE6\x60\xCC\xBC\xC2\x9C\x42\x0C\x6C\x02\x03\xAB\x98\x5C\x04\x5F\x5F\x3B\xC8\xE9\xE8\x61\xC2\xC1\xB4\x5A\x99\x72\xAC\x37\xCB\x6D\x2E\x95\x39\x5E\x21\x91\xD0\x1B\xD0\x58\x32\x2A\xEC\x13\x84\xC4\xB6\x94\xA7\xF5\x67\x5F\x91\x20\x35\x5B\x91\xC7\x1B\xCC\x67\x3F\x76\xC8\x81\x9A\x76\x30\xB2\xD0\xCF\x4C\x38\xCC\x29\x45\x47\x20\x4F\x3B\xEA\x34\x05\x42\x3C\xF6\x1E\x8E\xDA\x73\x11\x11\x9C\x46\x19\x25\x1A\xE3\x2D\x07\xCA\x39\xE6\x65\x53\xD2\xFA\x0B\x6F\x8C\xD6\x5C\x01\x42\xAB\x80\x1E\x67\xD1\x36\x8A\xF3\x1E\x3E\x62\xD4\xB5\xF1\xCB\x83\x4D\x15\x90\x57\x45\xF7\x68\xE9\x74\x93\x81\x80\x0F\xA0\x35\x19\x7C\x8C\xC9\xE0\xB8\xEE\x82\x2B\xD6\xDA\x91\xCB\xBB\x56\x77\x58\x51\xED\xA9\x98\x3D\xB8\xAA\xEF\x25\xF0\x5D\x45\xC9\x6C\x73\x96\xA2\x45\xE7\x46\x41\x3E\xA2\xF4\xCD\x3A\xDE\x3E\x29\xD1\xCD\x87\xE3\x73\x16\x73\x9F\xA3\xD4\x4A\xE4\xA9\x62\xA9\x9B\x57\xAE\x60\xE8\x39\x93\xBC\xCB\x59\x72\x5A\x97\xF7\x4C\xA9\xAB\x07\x36\x71\x04\x0E\x09\x20\x09\xE7\x7C\x62\x30\x16\x10\x83\x5B\x37\x5E\xCB\xB7\xA2\x40\x1E\x5A\x87\x94\x6B\x7E\xC6\x9E\xE2\x74\xA7\x18\x5D\x6B\xE9\x34\xA3\xEE\xFB\xD3\xD1\x31\x64\xC8\xC2\x82\x1B\x2F\x62\x07\x53\x30\xB0\x6C\x43\x6B\x2C\xD5\x92\x64\xDB\xCA\x82\x38\x9B\xAD\xAD\xC1\x5F\x04\x5F\xF6\x67\x74\x9F\x86\xF1\xEC\x67\x5E\xF0\xD9\x12\x8D\x39\x7E\xD8\x04\xEF\x53\x46\x5B\x63\x87\x88\xFF\x82\x2B\x39\x4C\x9C\x9C\xF9\x2B\x64\x33\x07\x1D\x20\x5B\x6B\x6D\x8F\x51\x7C\x2D\xEB\x9C\xE5\x63\x52\xA0\x6D\x78\x4E\x59\x62\x9C\x25\xCC\x86\x2A\xD5\x94\xDC\x25\xEF\xDA\x8C\x58\x21\xF7\x58\xFD\x87\x90\x59\x85\x7C\x1D\xF6\xD0\x44\xA2\x8E\x6B\xD7\x9E\xC2\xF9\xB9\xB3\xB5\xF9\x1E\x49\xB3\x5A\xE3\x7E\xF3\x70\x1D\x7B\x8F\x77\x52\x18\x50\xB9\x8F\x59\xFD\x58\x66\xC4\xD1\xBA\x33\xC8\x08\x2C\x7D\x3F\x04\x24\x47\x5E\x1E\x33\x14\x16\x2F\x22\x08\x74\x39\xC0\x01\xCA\x97\xBC\x63\x78\x43\x96\xF3\x81\x64\xB9\x6A\xDE\xB1\xB6\x40\x9C\x26\xD2\x51\x61\x71\x84\x7E\x71\x65\xA1\x87\x53\x2C\x10\x6E\x3E\xA3\x33\xF1\xD4\x4B\x74\x06\xEB\xA7\xE4\xFF\xCD\xE5\x74\x99\x29\xA9\x6E\xCE\xAE\xA4\xFA\xCE\x41\x49\x75\x58\x5F\xC2\xA5\xBE\x50\xEF\x6C\x2D\xD6\x68\xA5\x4A\xE9\xF5\x8A\xF8\x87\x13\xE1\xDF\xF8\x8A\xB8\x01\xC8\x95\xA1\x50\x22\x81\xAA\xB6\x47\x8D\xB9\x72\xA6\xE9\x53\x84\x9A\x15\x81\x75\xA3\x94\xCB\xD0\xE9\x90\x3B\xFB\xA3\x68\x8C\xE1\xD8\xCD\x20\x8D\xF9\x02\x4A\x02\x98\xC5\x12\x3B\xC2\x72\x86\xEA\xC7\x08\xBE\xBA\x2D\x01\x3E\xE0\x41\xFB\x7C\x57\xFE\xE8\x04\x4C\x2E\x58\x1D\x72\xCA\x7D\x6E\x38\xC1\x18\x6A\x3D\xB2\x0A\xD5\x98\x9C\xC4\xFA\x8B\x03\xEE\x90\x9D\x33\xE1\xFC\xA0\x0B\x1A\xA0\x88\x1A\xE0\x6C\xBA\x97\xFD\x67\x41\x79\xF4\x9A\xDE\x78\x92\x77\xE3\xEB\x9B\x82\xE4\x22\x51\x01\x24\x7D\x9B\x5A\x14\x4E\x78\x16\x85\x17\x0D\xEB\x65\xA4\xD8\x5D\x46\x51\x0D\x89\xAC\xD0\x3A\x66\xC1\x44\xDE\x3E\x0D\x82\x43\x3A\x86\x71\x6B\xD9\x48\x31\x97\x86\x02\x8E\x41\xE9\x06\xDD\x9A\xD8\x93\xD7\xDD\xC6\xBA\x8C\xF2\xD3\xB3\xDE\x68\x82\xCB\x50\x77\x41\xD1\xB4\x84\x72\x8D\xA7\xC7\x79\xBF\xBA\x01\xEB\xD2\x99\x35\x30\x1D\x06\xE8\x87\x1E\xF0\x8D\x20\xEC\x39\x29\x4F\xAA\xDB\x24\x13\xEF\x1E\xF5\x50\xE4\x6B\x94\xAB\x9A\xE3\x36\x42\xC4\x82\x07\x42\xBF\x1E\x29\x65\x17\xB0\x20\x0E\x27\xE3\xAD\xFA\x14\xB3\xD9\x59\x62\x36\xE4\xBA\x9A\xC7\xAC\xED\x5C\xC6\x98\xCD\x09\xB3\x99\xCB\x31\xD4\x21\x5B\x0B\x98\xCD\x00\xB3\x59\x82\xD9\x2C\x60\x96\xB2\x25\xB8\x59\x5E\x0E\x8C\xAC\x23\x23\xEB\xC8\xC8\xB3\x98\x7C\xA1\xC4\x43\xEF\x67\xB0\x6A\xC4\xAC\x55\x73\x28\xD4\xF7\x4A\x8D\x19\x5E\xC4\x1B\x2E\xF8\xE8\xD4\x5A\x8B\x1E\x46\x9D\x02\xA7\xBA\x70\x50\xD7\xC9\x19\xF3\xEA\xCC\x80\xF4\xE9\x91\x66\xC0\x50\x4E\xAF\x75\xB3\xC0\xD0\x5B\x6B\x64\x40\x0D\x51\xA4\xE2\x5C\x57\x71\xAE\xAB\x14\x9C\xD7\x2A\xD2\x59\x66\xAB\x9A\xF9\x18\xD6\xC1\x93\x8F\x6D\xDE\x02\x6B\xEF\x92\x0F\xDE\x38\xAA\xA4\xA0\x48\xD1\xA7\xCD\xAB\x57\x05\x99\x1F\x0A\x26\xB5\xCA\xFF\x54\xE7\x31\x43\x89\xF2\x8E\xF6\x5C\xC8\xA2\xC2\xA4\x1A\xE4\x66\x95\x4F\x8E\xCD\x73\xD3\xBA\xF3\xD3\xD3\xB4\xBE\xB8\x6D\x74\x1D\x4D\x91\x81\x2E\x41\xFB\x97\x36\x0D\x35\x8E\x15\xC4\xA1\x9E\x17\x87\xE1\x38\xB5\x84\x2E\x39\x97\x94\xEE\xCB\x17\xC8\xEA\xC6\x61\xD4\x5A\x4C\x2D\x86\x9E\x4F\xF5\xD4\x89\xE4\x25\x81\xF2\xF4\x2F\x2C\x57\x31\x94\xF4\xF5\xBF\xE7\xCD\x3E\x10\xDE\x1E\x8B\xFD\x53\x31\x02\x72\x32\x60\x86\x17\x03\x28\x91\xDC\x64\x98\xB4\x6A\xD1\xE4\x73\xB4\x6F\x89\xA5\xC7\x5F\xB7\xE9\xDC\xB6\xD4\x29\x6B\x2F\x72\xAD\x4D\x39\x05\xD3\x2B\x47\x34\xA8\x88\x86\x58\x84\x64\x58\xC4\xE1\xB7\xB5\xCC\xB7\x54\xA1\x14\xA3\x31\x3C\x13\x23\x32\x91\x8A\x46\xF7\x45\xE8\xC8\x15\x07\x27\x21\x8B\x57\x70\xE4\x6B\x76\x5B\x23\x0B\x78\x52\x59\x6F\xB8\x96\x91\x87\xB5\x3D\x55\x6F\x53\x59\x3C\x97\x16\x51\xAF\x17\x22\x4B\xD7\xFF\xE1\x34\x4F\x33\x8C\x31\x6F\xF3\xA7\x22\xC3\xE5\x94\x91\x2D\x73\x99\xCB\x0F\x2E\xB5\x19\x60\xEC\xDE\xB0\x9D\x8A\xC7\xC5\x3A\x2A\x99\x41\xDE\x23\xDC\x4B\xB3\x9C\x5A\x9C\x26\x2D\x56\x72\x61\x74\xF6\x35\x5D\xA8\x7A\x74\x30\xB9\x7C\x8C\x5E\x30\x7D\x86\x33\xCB\x19\xCE\x0A\x72\x49\xB4\xB0\x28\x63\x2A\x80\xB0\x45\x70\xDE\xA1\xA5\xE0\xB4\xA7\xC8\x2C\xEE\x90\xD6\xA6\x60\x9E\x88\xC4\x94\x98\x39\x54\xF2\x9D\x4D\xC4\x5B\xBE\x33\x89\x78\x39\x11\xF1\x35\x0F\x17\x11\x2F\x98\x2F\x49\x89\x5A\x6F\x70\x48\x51\x29\xCA\xC5\x75\x2B\xCF\xF8\x92\x3A\x9B\x97\xEC\xD9\xBC\x94\x9F\xCD\x4B\xD9\xD9\xBC\x24\xE7\x5E\xFA\x77\x32\xBE\x35\x13\xEC\x86\xDB\x34\x94\x98\x56\x69\x63\xB3\xBC\xF2\xEF\x7F\x05\x8E\xBE\xFE\x2D\x36\x3C\x3E\x88\xD7\x79\xBC\xFE\x63\xBC\xCE\xE2\xF5\xFD\x33\xD7\x9F\xC3\x6B\x1B\xAF\xFF\x74\xE6\xF9\x83\x78\xAD\xE9\x9A\xE0\xA3\xEC\x88\x87\x8F\xF8\x8D\x3B\xF5\x81\xEA\x9D\x21\x39\xA7\xCF\x70\xF3\xF8\xC9\x71\xAB\x11\x84\x30\x15\xA0\xC3\xCC\xED\x21\x25\x2E\x15\x99\xF5\x7F\x11\xEE\xF1\xD9\x95\x7E\x6F\xD5\x85\x2A\xB4\x4F\xEB\x5A\xC5\x3B\x86\x5C\xD3\x20\xB0\x99\xBD\x14\xAB\x3C\xBD\x61\x4A\x85\xEB\xF1\x59\x90\x23\xF6\x29\xDC\xAE\xF5\x87\x3A\xB2\xF8\xCD\x3A\x2D\xAE\xA8\xF2\xFA\xDE\xF3\xE4\x4C\xF5\x03\x30\x98\xD1\xBA\xDF\xD8\xD8\xB8\xB2\x73\xD2\x6F\xDC\x23\xBA\xA3\xFE\xEB\xA7\x60\xCE\x86\x31\x2E\xE1\x75\x3F\xE6\xA5\xEA\xE5\x99\xB4\x53\x3F\x25\x4B\x1F\x56\x4C\x11\xC6\x6B\x69\x03\x6B\x43\x1E\xA1\xA4\x8F\x7B\x94\x70\x19\x0E\x3C\xF3\xF7\xCB\x23\xFE\x6E\xAC\x66\xCB\x40\xE0\x52\x9B\xF9\x6F\xE0\xFD\x03\x13\x09\x2D\x62\x33\x54\xC3\x6D\x7A\x09\x26\xD8\x82\x41\xBD\x4B\x5E\x8A\xAB\x32\x66\xA1\xCC\x7C\xB6\xEE\xEF\xDE\x7E\x04\x0B\xED\x1E\xE5\xC6\x1F\x90\x47\xFC\xFD\xDC\xB8\x8A\x8D\x5B\xC2\x76\x28\xAD\xC3\xA8\x95\xD0\x5E\x8B\x5A\x1F\x17\xAD\x42\x85\x24\xE3\x06\x94\xBF\x1C\x4B\xA9\xFB\x9B\x01\xA8\x8A\x8E\x2A\x0E\x5F\x55\x11\x2A\xF8\xEC\x7B\x08\xF3\xD5\xA5\xF8\x24\x96\xC1\xA2\xDA\x4D\x8B\xFB\xA2\x06\x5E\xC3\x0D\xFC\x29\xF7\x9B\x7C\xEA\xC4\x44\xA5\x9F\xA8\xA4\x4F\xC5\x48\xFB\x92\x5C\xC5\xFB\xFD\x4B\x20\x9B\x35\x65\xE0\x7E\xD2\x11\x9F\xAD\xBB\xCC\x8F\xD7\xFD\xC6\xFD\xFA\xC8\xD1\x80\x2D\x97\xF9\xAF\xCA\x23\xFE\x01\xC6\x95\xEE\xDA\x1C\x29\x18\x71\x55\x60\xFD\x30\xF8\xC5\x31\x1C\x5C\x8F\x40\xC5\x91\xA8\x74\x24\x32\x41\xC5\x0C\x3A\xA9\x8F\xC2\x3F\x20\x08\x41\xBB\x0F\xF5\xE8\x3C\xBB\x36\x8A\x01\x9E\xB7\x53\x33\xEA\x52\x7C\x92\x22\x4B\x31\x87\x24\xAD\x0E\x59\x47\xA6\x0F\xA8\xD5\x1B\xF1\x41\xE6\x1F\x94\xAB\x78\xAF\x7F\x01\xB0\x88\x19\xF5\x0A\xC0\xE2\x18\xB0\xF8\x18\x98\x22\x0F\x08\x46\xA3\xCF\xD6\x8F\x22\x8A\x8F\x52\x91\xE4\x77\x4A\x4A\xEC\x75\x4A\x5C\x4A\x69\xFB\x2E\x16\x18\x63\x40\x37\xB7\xF3\x4D\xF8\x70\x43\x62\xE5\x70\xB8\xCA\x8E\xF9\xBB\xE1\x4A\x78\x15\xDE\xE4\x0A\xD7\xC9\xAB\x18\xD4\xE3\xC7\xC7\xFC\xFD\xF8\x6A\xF8\x30\xB6\xE4\x35\x15\xB7\x8A\x77\xB0\xE0\xA9\x7F\xCC\x31\xFF\xC0\xC2\x0F\x62\x7B\x7D\x0B\xA6\x7A\x05\xEE\xC8\x4C\x29\x7C\x8F\x98\x81\x92\xC0\xD2\xB6\x92\x86\xF9\x88\xF4\x60\x1B\xE0\x44\xD8\x8A\x06\x3D\x9C\x8E\xFF\x1D\xEF\xA2\xF8\xA2\x4D\xE8\xBB\xE5\x11\xAE\xB9\xAE\xA9\x00\xA9\xC6\xF0\x03\xB2\x0C\x24\xAC\xD3\x97\x80\xDE\x10\x7C\xED\xA8\x66\x2B\x27\x0F\x2E\x91\xE7\xF5\xAD\x0A\xC5\x2A\x6F\x3B\x9C\x02\xE5\x9F\x0A\xFA\xF9\x9F\x02\xA9\x78\xEE\xA5\x13\xE9\xA5\x53\xEB\xFE\x35\x1B\x1B\xE6\x08\x25\xFB\xA5\xAA\x87\x7E\xE3\x7E\x71\x09\xEE\xF5\x9C\x3A\xA5\xC1\x9E\x28\x0F\x71\x75\x52\xE5\x4F\xEE\xBA\x6C\x22\xFC\xFD\x27\x1A\xED\x15\xAB\x3D\xE1\xAD\x8D\xBB\xC5\x21\x4C\xC0\xA8\xFC\x4B\xEE\xC6\x14\x91\x1B\xFB\x1A\xED\xF5\xCC\x6B\xB7\xF6\xAF\xDD\x46\xAF\xDD\xF7\xAE\x29\x45\xA6\x0F\xDE\xFB\x8F\xFD\x7B\xF7\xD3\x7B\x1F\xC4\xF7\x6C\xFA\x1E\x80\xBA\x3A\x51\x7E\xE3\x66\x78\x94\x35\x18\xEF\x2F\xFD\x1F\x0B\xA7\x8E\x79\x79\x04\xD1\x24\x3A\xA7\x7C\x76\x95\xBF\xFE\x95\x1B\x1B\xA2\xFB\xBB\x52\x3C\x77\x3F\x9F\x12\xF1\x2F\x40\xF4\x0A\x14\x53\x1C\x4D\x5E\xFD\xBC\x94\xF9\xD4\xBF\x53\x91\xAD\x47\x06\xCD\x9A\x57\xCF\x76\xAA\x6B\x35\x6F\x4C\xEE\xB8\xAA\xB5\xFE\x4E\x6C\x8F\x92\x4D\x8A\x4B\xDB\x1C\xC3\xAD\x8C\xB3\xFE\x2D\xE1\x01\x86\x09\xF1\x07\xFE\x1D\xF1\x75\xE5\x37\x36\xDE\x83\x7B\x9B\x02\x7D\xA6\x47\x5D\xD6\x61\xF5\x33\x01\x7F\x0C\x58\x3D\x8E\xA2\xE9\x5D\xBE\xD4\x62\xB1\x73\x27\x1E\x31\xB8\xEE\xDA\x0C\xAE\x13\x47\xCE\x0A\xAE\x9C\x36\x07\xAB\xAF\xCA\x68\x35\xCF\xAB\x1C\x27\xBA\x6D\x19\x6A\x1C\x22\xAB\x62\xF4\x9A\x3D\x43\xF4\x9A\xA6\xDD\xE9\xFA\x0D\xE4\x97\xEE\x28\x8A\x6D\x10\x76\x36\x08\x39\x1B\x67\xEC\x6A\xF2\x62\x15\x0B\x6A\xFE\x6A\xEA\x6B\x25\x5D\xFD\xBD\xBF\xF1\xE5\x1B\x9E\xB7\x3D\x2A\xEB\x78\xC9\xF1\x50\x3F\x7B\xDD\x46\x71\x15\xD6\xE6\xBC\x9A\xA3\xA7\x40\x19\xC7\xE9\xCB\x59\x20\x45\xFD\xE6\x93\x9C\x9F\xBE\x7E\x4B\xAF\xCE\xDC\xA4\x75\x36\x55\x27\xE4\xD4\x4F\x61\x81\x3C\xEA\xC5\xAA\x93\x4B\x64\x34\x84\x93\xE8\x7E\xDA\xA8\x61\x44\xD5\x21\xB2\x1C\x1C\xED\x24\x2C\x38\x98\xA0\x51\x51\x0B\x9A\x5A\xD5\xBB\x60\xD6\x38\x59\xFE\x7F\x3C\x19\x4B\xFE\x15\xFD\xA6\x49\xD4\x9A\x39\xB4\x11\x3D\x22\xAD\x9A\x18\x02\x81\x52\x95\x52\x3A\xC7\x43\x58\x96\x56\x5E\xBA\xBD\x8F\x76\x25\xE2\x60\xBD\xC2\x43\x2D\x50\x19\x23\x18\xEF\x58\x99\x1E\x6F\x8A\x15\xD1\x94\xDE\x60\x88\xD9\x08\x77\xE9\x15\x7B\x7B\x4B\x37\xBA\xE3\x9F\x84\xFD\x7F\x3C\x26\x40\xB1\xD7\xA5\x93\x3F\xA0\x8A\x7F\xC2\xBE\x21\x7E\xA2\xA8\x94\xE7\x6D\x64\xBD\x4F\xAC\x2B\x5C\xF9\xA3\x13\xE9\xCA\x15\x79\xBC\x29\xC3\x59\x01\xA4\xCF\x08\xF7\x94\xA7\x60\xAF\x38\x7B\x68\x89\x87\x7D\xE7\xC9\xBE\x5E\xC0\xAF\xB0\x92\xF9\xB2\xF7\xA4\x1B\x00\x98\x4E\xDA\x49\x62\x9C\xDE\x7D\x45\x3B\x47\x87\xE7\x02\x36\x29\x89\x3B\x41\x44\xC7\x44\x08\x0B\x82\x63\xD9\x1E\x8E\xD6\x4C\xDF\x9A\x98\xCF\xE9\x27\xB4\xC4\x2C\x33\xE2\x42\x5D\x60\xF0\x55\xA0\xAA\xA0\xB2\x53\x4E\xD6\xD7\xF2\x48\xDF\x97\x8C\xD4\x4F\xAB\xCF\x29\x4D\x3B\x03\xA7\xCB\x85\x90\xCE\x44\x2C\xC0\x2E\x95\xAE\x90\x0D\xC1\x34\x52\x5E\x24\xD9\x79\x63\xAD\x2C\x91\x29\xDC\x83\x96\x98\x72\x6E\xAF\x2E\x60\x7D\x48\x7A\x0F\x79\x70\xC9\x3C\xC4\x03\x91\x08\xE3\x44\x3B\x75\xA1\xC6\x10\x0B\xB3\x22\x7E\x0C\x73\x98\x8A\x1F\x50\xC5\xFF\x33\xE1\x60\x07\x73\x0B\x1F\xA8\xC2\x02\x32\x11\xB1\x96\x4F\x25\xCC\xA1\x55\x86\x9D\xD4\x88\xD6\x18\x02\x2B\xC2\xC4\xA4\xD6\xE4\x59\xB4\x26\x36\x6B\x0D\x29\x14\x4E\x57\xCD\x3B\x4B\xBF\xAC\x8C\x9C\xAA\x13\xEA\xF8\x82\xEA\x83\x8B\xCC\x2C\x46\xA2\x95\x1A\x25\x28\x12\x76\x67\x70\x13\x44\xBA\x62\x64\x03\xA1\x4D\x55\x0B\x68\x60\x13\x1A\xE8\xBD\xBA\x9E\xA5\x81\x0E\x34\x88\xED\xD5\xB1\x3D\x20\x43\xDD\xE8\xCA\x4F\x1B\x8A\x9A\x68\x8D\xD3\x3F\x36\x31\x20\x71\x9E\xDE\x00\xFD\x06\xCC\x44\x40\x2B\x72\x46\xF2\x4F\x01\x12\xED\xF1\x1A\xE4\x82\x84\x3F\xD9\x33\x41\x30\xFC\xC4\x12\xE7\x6E\xA3\x08\x8B\x26\x0F\x87\x2C\xE3\xFE\x3E\x85\x3C\xF6\x47\x10\x07\xE7\x6B\x7F\x3B\xE9\x33\x9F\x4F\x66\x91\x24\x31\xFD\x6D\x23\xAB\xA9\x13\xF5\xDB\xC2\xCC\xC3\x80\xB1\xB7\x91\x9F\x83\x8A\x91\x2A\x92\x5A\x06\xE4\x43\x9B\x25\x7B\x86\x78\x8C\xD0\xEB\xF5\xB6\xE8\x50\xF6\x50\x18\xD5\xB2\x32\x2C\x79\x4A\x40\x50\xBC\xD9\x8E\x98\x6F\x4A\x2F\x1A\x50\x83\xC6\x61\x9A\x97\x43\x0E\x2A\x03\x07\x95\x80\xEE\x51\xCF\x41\x98\x21\xB8\xC4\xFA\xDC\xCD\x18\xB0\x53\x02\x4F\x8E\xAB\xA4\xE7\xBE\x93\x10\xF8\x54\x22\xF5\xCE\xBE\x71\x64\x4F\x6C\xB8\xAA\xDC\xD8\x55\xC9\x58\xB8\xD6\x0E\x6D\x0B\x60\xE0\x80\x71\x16\x0D\x64\x0F\xEB\xB6\xBD\x74\x89\x8C\x67\x84\x83\x0E\x56\xF1\x39\x65\x37\x8A\x69\xFD\xF9\x26\x46\xB2\xF9\xA2\x19\x79\x89\x61\x91\x88\xA7\x51\x17\x5F\x73\x23\x10\xD2\xA3\x10\x42\xA5\x62\x28\x1A\xA6\x8A\x18\x55\x6E\x44\xEF\x55\x4E\xC5\x76\x75\xD2\xB1\x5E\xD4\xB1\x5E\xD0\xB1\x3E\x7D\xC7\x3A\x76\xAC\x67\x3B\xD6\xFC\x41\x59\xDD\x58\xC8\x1C\x2B\x7A\x8E\x1B\xE5\xEB\x50\x3F\x81\xEA\xA7\x6B\x6F\x71\x8B\x64\x45\xEC\x55\x86\x63\x9F\x31\xBC\x47\xAE\x53\x9E\x31\xB3\x9E\x54\xAD\xD6\xFD\x01\xD7\x58\xA8\x50\x75\x21\x56\xEE\x42\x25\xF6\xAA\x78\x16\x32\x04\xC9\x05\x08\x03\x06\x68\x3D\x8D\x50\xBA\x50\xFD\x50\xC4\xE8\xB8\xC5\x27\xFE\x54\xA8\x2F\xAB\x92\xF3\xA1\xD8\xE0\x26\x67\xFA\x42\x97\x1C\x96\x88\x08\xB2\x7E\x8C\xE1\x23\x39\x3A\xE2\x60\x76\x80\x25\x70\x9C\x24\x07\x34\x56\x10\x43\x35\x8A\xDF\x67\xA5\x1F\x0D\x15\xAA\xA5\x8C\x9B\xB9\x39\x26\x0E\xB1\xA0\x79\x5A\x67\xFD\xF3\x3A\xCC\x7B\x0C\x63\x7F\x5E\x87\x07\x8C\xA6\x54\x17\xF9\x30\xB6\x1C\x1A\x35\xDC\xA8\x33\x5E\x5C\xC2\x1E\x3D\x01\x30\xB8\x62\x0F\x86\x5F\x16\x5D\x93\x05\x3E\xEE\x49\x2B\x87\x94\x0B\x95\xEF\x89\x72\x62\x40\x39\xB1\x19\xE5\xC4\x62\xCA\x89\xD3\x50\x4E\x24\x94\x13\xF3\x94\x13\x91\x72\x72\x8E\x72\x72\x11\xE5\xC4\x62\xCA\xC9\x4D\x28\x47\x3A\x32\xD1\x4E\x04\xDA\x49\x2F\x31\xB8\xDC\xBA\x6C\x9E\x76\x79\xA4\x9D\x98\xA3\x5D\xC6\x47\xE3\x70\x23\xDE\x9C\x8E\x76\x06\x68\x67\x22\xED\x42\xA3\x05\x37\xEA\x8A\x9E\x76\x06\x69\x97\xEF\xE1\x94\xD4\x4D\x08\x29\x66\xD8\x0F\x25\xD9\xDC\x51\x58\x17\x88\xA2\x02\xAD\xDD\x50\xB6\xDD\x00\x34\xEC\x60\x2F\x1C\x56\x31\xD3\x31\x96\x97\x76\xA8\x44\xBC\x66\x69\x97\x93\x22\x51\x0C\x67\x59\x5B\x5C\x36\x91\x21\xE0\xD7\xF2\x11\x14\x3F\x6D\x72\x57\xE0\x05\xFC\xB9\x74\x7B\xE5\xF2\xEA\x17\x95\x96\x53\x79\x3C\x0D\x8F\x93\xB3\xEB\x78\xBF\x82\x73\xC9\xD0\xC7\xEB\xA2\xA1\xCA\xC7\x17\xEA\xE2\xED\x1C\x99\x3C\xA3\xA3\x81\x0A\x74\x1D\x89\x6C\xE1\x3F\x9F\x2E\xAD\x92\x3E\x97\x95\x7F\xD1\x7B\xE3\xED\x3E\xF4\x7C\xC6\x85\xFD\xCE\x56\xDF\xD2\x0A\x72\x6F\xFD\xFA\xAF\x2C\x5F\xCD\x96\x89\xF0\xC5\x31\xB8\xD9\x5F\xD7\x33\xD7\x3B\x8F\x85\xDF\x7A\xC5\xDD\x70\x4B\xFA\x50\xAF\x2C\xCF\xDE\xD8\x3D\x7B\xE3\x07\x6F\xB8\xE5\xEA\xEA\x0B\xA0\x50\xAA\x13\x26\x56\xC1\x2E\xA8\xBE\x36\x1F\x35\xBC\xA3\xD1\x2B\x82\x76\x39\x90\x70\x96\xF6\x3C\x7B\xF5\xBF\xD7\xEE\x6C\x45\x91\x4B\x03\xCD\xC8\xCE\x59\x83\x16\x4B\x47\x91\x35\x58\xC4\x53\xB2\x76\x10\x17\xDC\xC7\x73\x05\x6B\x30\xEB\x1A\x0E\x9C\xC3\xF8\x2C\x68\x04\xA3\xD2\x68\x61\xC8\x83\x35\x68\xC9\x1A\x6C\x32\x2C\x2C\x4F\x06\x61\x93\x71\x5D\x39\x94\x78\xC1\x26\xC4\xBB\xD4\x8E\x0C\x77\xF4\x3E\x97\x31\x76\x24\x9A\x85\x59\x38\xF2\x52\x80\x59\x58\xB0\xF6\x69\xC9\x2C\xA4\xCD\x7A\xCB\x96\x61\x86\x31\xC5\x38\xA5\x56\xE4\x71\x50\xBD\x7E\x94\xB5\x56\x2A\x6D\xEE\x54\xF5\xA7\x46\x96\x3D\x9A\x69\x7F\x39\xC4\xB7\x0F\xAC\x62\x35\x3C\x71\xE8\x89\x00\x76\xD6\x5E\x44\x93\x7F\x16\xBF\xB8\xB9\x94\xCD\xE0\x98\xE4\x54\x36\xC0\x71\x49\x38\x2E\x7B\x1C\xDB\xAE\xB1\xA8\x6D\xA0\x5E\x83\x8D\x2C\xC2\x71\xC6\x38\xB6\x80\xE3\x8C\x71\x6C\x09\xC7\xD9\x10\xC7\x70\x97\xDA\x29\xC3\x1D\x10\x46\x8C\xE3\x12\x71\x6C\x5D\x09\x38\x2E\x87\x38\xCE\x18\xC7\x18\x6A\x92\x31\x8E\x2D\x88\x21\xC9\x65\xFF\x87\x98\x30\x6D\x31\xC7\x69\x05\x9A\xB6\x84\x05\x1B\x77\x0B\x8A\x81\xB4\x9E\xC7\x02\x86\xB9\xF4\x58\x80\x46\x80\xB8\x2C\x7A\xB2\x80\x85\x82\xB1\x80\xF5\x41\x0A\xC6\x82\x21\x2C\x14\x1C\x7C\xCE\x63\xC6\x5A\x26\x45\x8A\x05\x83\x51\x29\x29\x16\x4C\xC0\x82\x05\x2C\x58\xC6\x42\xC1\x58\x40\x69\x5E\x30\x16\x0C\x9E\x35\xC1\xC0\xE4\x70\xCA\x74\x61\x7A\x8A\xD7\x59\x9C\xD9\xA7\x4F\x9B\xB7\xD0\x54\x0C\xF9\x13\x70\x15\xA5\x10\x45\x34\x91\xC6\xDF\x12\x2B\x92\x67\x90\xDD\xAB\x0B\x67\x56\xA6\x60\x51\xDA\xA1\x45\x69\x59\xE6\x88\x50\x92\x34\x40\x0B\x82\x83\xD7\xC3\x0C\x15\xEB\x0C\x68\x9C\xA3\xD3\xDA\xA0\x04\xC1\xD3\x2C\xFB\x27\x06\xA7\x64\x1E\x9A\x88\x66\x24\x1F\xD3\xA0\x35\xED\x70\x1B\x26\x0B\x89\x86\xC4\x86\x6C\x72\x62\x6B\x8C\xE0\x09\x35\xB2\xF1\x0D\x6A\x8F\x80\xA1\xA9\x93\x3D\x85\x0D\x4E\xE0\x5B\x0C\x8A\x8B\xD9\x00\xC4\x24\x2E\xA2\xF2\xB2\x89\x46\xA0\xC8\x1A\xA5\x3D\xAA\x74\x5C\xF2\x6C\xC6\xA5\xD3\x71\xC9\x87\x3A\x2E\xB3\x78\x5C\x72\x6E\x5C\xE2\xAC\xC6\xA5\x92\x71\xCD\x58\xD9\x4F\xA4\x40\x20\x19\x58\xF0\x78\xD8\xC0\x43\xB7\x7E\xBF\xE7\xA1\xD6\xFD\xED\xEF\x65\xBF\x25\x02\x8E\x8E\xB8\xBB\x75\x54\xF5\x65\x54\x18\x95\x93\xA4\x11\xA2\xC2\xA8\x82\xC2\xC8\x87\xDB\x25\x2B\x8C\xB2\x57\x18\x75\x92\xC2\x49\x84\x6D\x45\xAE\xB7\x8D\x2E\xB8\xA0\x30\xB2\xD6\xC3\xF1\xC0\x2A\x51\x18\x51\x21\xC3\x1E\x64\xB4\x84\x9C\x4E\x55\xFD\xD3\x1C\x6E\x95\xE1\x20\xAA\xEC\x15\xC6\xD9\x48\xE4\x99\xC3\xAD\xA9\xDD\x46\x46\x58\x38\x81\x94\xA1\xAE\x4B\xEA\xA2\x64\x75\x51\xA6\xEA\x22\xE7\xF7\xA3\x44\x3D\x32\xA8\x8B\xA8\xEA\x5B\x50\xE9\x50\xD5\x87\x41\xB7\x00\x62\x54\x17\x25\xA8\x8B\x32\xA8\xFA\x66\x46\x5D\xD4\xDC\xA8\xD3\x41\x5D\x34\xAC\xEA\xA3\xBA\xC8\x27\x99\xC2\x09\xC8\xDE\x7C\xAC\x3E\x26\x63\x71\xCF\x82\xC4\xD7\x98\x23\xBD\xB0\xFE\x9D\x1F\x39\xBD\xD4\x6A\x32\xDD\x74\xA2\xE1\xA7\x6B\xA8\xEA\x53\x18\xD9\xA4\x66\x2C\xFC\x65\xC2\xB1\x8F\x31\xD1\xF4\xA3\xBA\x9A\x44\x76\x05\x13\x32\x78\x9A\x59\xE5\x27\x2B\xA3\xDB\xA3\x0A\xBF\xB3\xB7\x26\xEF\x00\xE9\xA4\xFD\x75\xEA\x09\x52\xB8\x3E\xDF\x51\x1B\xEB\xEC\x61\x95\x99\x3F\xD2\xD2\x4C\x37\xDB\x92\xB7\x16\x5D\xBD\x19\x7B\x62\xF4\xEA\xC4\xC0\xAF\x7A\xFF\x84\x2A\xE5\x12\x38\x20\x30\xE1\x39\x0B\xE2\x70\x57\xB3\x97\x11\x04\x5E\x6F\xF0\x10\x0B\x05\x0B\x5B\x24\x16\xB6\xE4\xD3\x69\xE4\x54\x4E\x2C\x6C\x66\xEA\xB9\xF5\x03\x27\x36\x4A\x10\xEA\x50\xF5\x56\x4A\x1A\x77\x3A\xF4\xE2\x4B\x5E\x51\xD8\x48\xC2\x9D\x08\x20\x0F\xE8\x72\xA1\x0A\x64\xAB\x88\x07\x8B\x90\x50\x62\x06\xED\xC3\x33\xE5\x18\xF8\x41\x10\xC8\xAD\x42\x60\xCE\x00\x81\xD9\x22\x04\x55\xA8\x5F\x54\x55\xAF\xD3\xCC\xB7\x40\x4C\x4C\x37\x41\xFB\x0E\x32\xBD\xEE\x45\x92\xCF\x28\xC6\xC5\x05\x06\xA2\xC2\xA5\x86\xA2\xE6\x68\x0A\x53\x5D\xAF\xF0\xAB\x88\xBF\xC6\xF1\x57\x1D\x7F\xED\x88\xBF\x76\x3A\xB5\x52\xEC\x4D\x5E\x3F\xBF\xD7\xE3\x6D\xE2\xA2\x5E\x90\xC6\x02\xB7\x2E\xFB\x44\x16\x94\x28\x67\x36\x95\x05\x17\xD5\xC6\x55\xB7\xFE\xF5\xF0\x1E\xCE\xBA\xAC\x0A\xAF\xDB\xF9\xB4\x17\x62\x61\xDA\x8B\x33\xD9\xCE\x0F\x3D\xC3\xC6\xAC\x49\xED\x54\xF5\x42\x25\x75\xEF\xC7\xD7\x91\x55\x86\x0E\xBE\xC1\xC1\xC8\xC4\x8F\x6F\xB0\x6E\xD6\x76\x4A\xDB\xC0\x8C\x42\x09\x19\xBC\xEC\xEA\x77\x60\x24\xCE\x98\xC9\x69\xA9\xC6\x16\x57\xE1\xB2\x49\xBE\xFC\x99\x0E\x5B\xD4\xD9\x0D\x2D\x05\x9C\xCA\x25\x1A\xBA\x78\xFC\x51\x51\x15\x61\xD3\x06\x54\x51\x86\x80\x99\xD7\xF8\xF0\x5D\x83\xD5\x64\x0B\x3A\xC1\xEA\x2C\x76\xCE\x55\x3F\x03\xA4\x7B\xD4\xB8\xFA\x59\x2D\x6D\x2F\x64\xEF\xFA\x91\x7F\x81\xFF\x3E\xFB\xA4\xD7\x39\x79\xD7\xBF\x98\x88\xDE\x49\x16\xD8\x34\x4A\xDA\x10\xD6\xC9\x3E\xB2\x16\xCB\x8B\x69\x94\x6D\xC1\x07\x23\x99\x83\x71\xAF\xDA\x1B\xAA\x5C\x4D\x42\xC6\xF3\xCA\x61\x48\xC0\x84\x6C\x01\xD1\xCC\x3C\x63\xBE\x07\xDB\x2A\x64\x23\x72\xA5\x10\x1D\x65\xD4\xF8\x89\x7B\x19\xB1\xA8\x10\xC9\x68\x53\xE1\x17\x24\x0D\xE7\x70\x2B\xDB\x2C\xAA\x54\x0B\x5E\x0B\xB8\x05\xBB\x03\x5D\x54\x68\xD8\xE1\x18\x61\xE0\x30\x7E\x1D\x71\xEB\xFA\x4C\x0C\x32\x32\xBB\x9C\x15\x1C\xC0\xCE\x17\x2A\xB3\x17\x98\x21\x48\x90\xF3\xA5\x9C\x0E\xAB\x8E\x19\x27\x3D\x56\x96\x96\xD5\xDF\x82\x87\x3B\xFB\x87\x3F\x84\x75\x50\xAF\x33\x98\xB8\xB7\x7A\x7C\xFA\x14\x53\x15\xD4\x2E\x68\x3D\x8E\x64\x87\xBF\x2E\x83\x77\xD5\x5C\x4B\x54\x51\x95\x96\x2D\x59\x7D\x2F\x3C\x1D\x47\xC5\x89\xBE\x45\xF2\x3A\xE5\x0D\xB5\xF0\x56\x09\xD3\x88\x86\x38\x2B\xD1\xA4\x57\x51\x8B\x0A\x5B\x01\x8C\x25\xD2\x7F\x82\xE2\xA5\x03\x2F\xE0\x06\x3C\xE6\x55\xD0\xCC\x2C\x6A\x01\xB3\x84\x5D\x58\x62\x96\x98\x2B\xA8\xD7\x94\x70\x33\x90\x52\x56\x85\x6C\x3E\x32\xBC\xC4\xD3\xFF\xF6\xC5\x70\xE7\xDF\x56\xB8\x39\xCF\xC8\x3C\xB4\x7F\x25\x53\x61\xA5\x7A\xE9\x31\xB3\xED\x98\x64\x82\xEB\xF7\xB3\x54\x9F\xEE\x2D\x15\x52\xEF\x45\x96\x8E\x35\xF6\x49\x3A\xD1\x3A\xB3\xB0\x7D\x2E\x12\x39\x09\xA5\xF1\xC3\xF2\x30\x37\x83\x74\x2B\x59\x6B\x22\xF9\x35\xF3\x5A\x98\x41\x60\x2D\x02\x5E\xD0\xAD\x87\xD1\x41\x86\xEA\x6B\x0E\xA4\xD3\x16\xC4\x53\xFE\x28\x14\x4F\xE6\x11\x11\x4F\xE6\xEC\xC4\x93\x99\x13\x4F\xE6\x61\x12\x4F\xAF\xD5\x5A\x4E\xE5\x09\x3D\x9D\xDD\xEB\x4C\x35\xD8\x82\x62\x15\x72\xD6\x16\xC5\x66\xF2\x6C\x7B\x35\xEB\x65\x55\x42\xE6\xE4\x5B\x6D\x54\x22\x9E\xD4\xDE\x54\xD0\x6D\x9F\x7F\x83\x84\x5D\xFF\x62\xB6\xD9\x8B\xF1\x0D\xD5\xBF\xA1\x4C\xA3\xA2\xB4\x83\x37\xA8\x9E\x4B\x7C\x01\xD0\xA6\xD7\x87\xE7\xB7\xB1\x86\x15\xAF\xD6\x6C\x1D\xDA\x60\x1D\x8A\x45\xD6\x61\x6F\x64\x8C\xC3\x39\x42\xC4\x7F\xCF\xC7\x12\xE6\xC2\x98\xEA\x49\x02\x88\x96\xCF\x5B\x20\xC6\x30\xD6\xE4\xDD\x31\xEA\xE4\xB7\x39\x0A\xE0\xDF\xA6\x51\x00\x9C\x73\xEF\xC9\x60\x3B\xD7\x3D\x66\xC8\xAC\xA5\xD6\xD0\xEA\x4D\xA4\x9E\x98\x33\x89\x31\x7C\x4C\xD4\xF7\x90\xB9\x59\x7D\x41\x49\x3B\x4D\xA6\x1A\xED\x93\x79\x84\x94\xB6\xCB\x82\xF2\x3A\xB3\x35\x66\x39\x01\x8E\x71\x96\xB7\xC6\x6C\xC0\xC8\xD0\xEC\x9B\xDB\x1A\xA3\x8C\x6E\xC3\xAD\x31\x40\x2C\x9B\xC0\xDA\xA9\x98\xE7\x53\xD3\x2F\x2A\x99\xCB\x92\x33\x8F\x3E\xE2\x84\x6A\x59\xD0\xFD\x07\x54\xB3\x7C\xA8\x1D\xCF\x66\xC9\x45\x30\x86\x64\x31\x09\xD5\x28\xFD\x45\x44\x89\x25\xAA\x59\x87\x25\x84\x2D\x21\xC8\xBA\x8C\x14\xBF\x3C\x82\x6A\x9D\x72\x39\x2F\x4D\x84\x5C\xB8\x66\x90\xB1\x5A\xEA\x63\x61\xD1\x95\xB1\x50\xA9\xC4\x7D\xAD\xEA\x6F\xCF\x2C\xD4\x77\x0C\x57\xEA\x99\x35\x5F\x9E\x6E\xCD\x3F\x07\x17\x18\xF8\x5E\xD0\xF7\x54\x1B\x35\x72\x3E\xB1\x3C\xD7\x5E\xA3\x24\x68\xC8\x89\x5C\x7E\x99\x62\xCE\x37\xD3\x0A\xAE\x57\xD4\x09\xAA\x0E\x75\x5A\x6E\x75\x66\x93\xCE\x89\xEA\x97\x30\x4A\xB2\x4E\x85\xC2\x0C\x8B\xC6\x90\x29\x27\xD8\x31\x23\x7A\xC7\x4C\x5A\xC2\x0B\x8B\xAA\x22\x11\x95\x13\x4C\x44\x11\x88\x28\x80\x88\x22\x21\x62\x91\xE4\x2A\x31\x9C\xBB\x31\x30\x1A\x0D\xB3\x72\xB2\x7A\x5E\x3A\x81\x04\x69\x34\x22\xDA\x68\x32\xD6\x74\x86\x11\xA1\x8D\x25\xA2\x99\x26\xA2\x99\x26\xA2\x99\x26\xA2\x99\x26\xA2\x99\x26\xA2\x99\x16\x7E\x9D\xEF\xC4\x6C\xC7\xF9\xB7\xAC\xE3\x7F\x23\x95\x3C\xAE\xA6\x4E\xDC\xD5\x48\x5F\x70\x21\x6D\xC0\x7C\xA3\x9C\x18\xAC\xBC\x3F\x31\x11\xD5\xEC\xCA\x2B\x7A\xAD\x49\xF5\x2B\x2F\x95\xB1\x40\xA4\x06\x6A\x2A\x06\x3E\x2C\xBA\x22\x2C\xBA\xA2\x0B\x4F\xCC\x30\x83\x98\xD3\x55\x19\xBD\x26\x75\xC2\xBF\xF2\x62\x21\xAB\x7B\xA5\xCC\xA7\xA9\x43\x83\xD6\x1B\xE5\xC7\x8D\xF6\x98\x4C\x1A\x6B\x67\xE8\xF5\xAE\xB5\xEC\xF3\xB2\x9C\x6A\xE1\x9C\x93\x00\x6F\x88\x0F\x62\xC5\xCA\x92\xDB\xCB\xD0\x79\x77\xDD\x58\xF8\x49\xB9\xE0\xBA\x56\xE1\xD6\x28\xBB\xBD\xAC\x7F\x1E\x66\x3E\x85\x91\xE4\x4E\xB1\xDB\x2B\x69\x97\x37\x4A\x55\xBF\x51\xAA\x68\xA3\x9D\x24\x03\x7B\x9E\x42\x16\xB9\xC0\x99\xD5\x6F\x6A\x99\x4D\xBD\xA6\xF8\x61\x9C\x00\xB0\xA6\x5A\xCE\x8D\x40\x36\x32\x66\x1C\x57\x07\xB1\x32\xF8\xB4\xA3\xBC\x06\x06\x97\x7C\x76\xDA\xC2\x27\x66\x15\x63\xDB\x71\x2B\x3D\x6C\x1D\x54\x21\xD5\x37\x7D\x88\xBB\x25\x70\x07\x8F\xC5\x62\x40\x00\xCA\x45\x13\xB6\x86\x78\x3B\xC0\xD0\x7E\xAC\x0C\xF1\x2B\x9C\x7F\x39\x66\x38\x6E\x33\x3A\xB4\x10\x5C\x1C\x5E\x25\x2E\x8E\x8C\xA6\x4F\x16\xB9\xD8\x50\xD5\x69\xC4\x62\x46\x5C\x9C\x45\x2E\xCE\x22\x17\x67\x91\x8B\xB3\xC8\xC5\x59\xE4\xE2\x2C\x72\x71\xD6\x73\xB1\xD3\x7D\x3A\xDE\x02\x46\x5E\xB0\x0B\xC0\x65\x8D\x70\x39\xAF\xC0\x21\x2E\x06\xD1\xE8\xF2\x38\xB2\x3C\x6C\x57\x0A\x50\x9C\xD8\xC5\xD0\x48\xAF\x1A\xED\x70\x33\x0D\xB3\x55\x64\x78\x46\x34\x24\x1E\xD7\x4C\xA3\x70\x9A\x5B\x57\x5F\xC5\xE5\x52\x93\x88\x5A\x40\x3F\xCD\x81\x2F\x44\x3F\x4D\x9B\xEC\x81\x7E\x96\x13\x00\x47\xFA\xF1\x22\x64\x30\x23\x51\x38\x2F\x16\x3F\xCC\x38\x3A\x94\xCE\xFD\x84\xD4\x2A\x96\x52\x7B\x87\xDC\x49\x09\xED\x54\x1F\x90\x29\x42\xA6\xD8\x96\xE7\x65\xC1\xE7\xBB\x30\x71\x20\x63\xEA\xC0\x44\x8D\x25\xE5\x66\xA6\xA6\xA9\xC1\x2C\x36\x98\x05\x94\x99\xCA\x99\x4D\xE3\xED\x40\xB5\xA6\x7A\x81\xE4\xE5\xB0\xE8\xBB\x60\xA5\x08\x5D\x18\xC1\xB7\x52\x51\x85\xF4\x82\x1C\xA5\xA8\x76\x4D\x49\x5B\x05\x4D\x97\xD3\x71\x80\x76\xCA\x3F\x49\x72\x63\x90\xA4\xAA\xEE\x1A\x0D\x8E\x83\xEB\x10\x3C\xB6\x12\x57\x02\xE4\x39\x96\xFD\x18\xED\xFD\x0E\xDC\x83\xFF\xF9\xEB\x30\x97\xCE\xBC\x3E\x4B\xAF\xA2\x46\x4B\x0A\x6D\x6E\xB3\xA2\x8A\x21\x76\x34\x1F\x0F\x4C\x72\x20\xC1\x51\xD2\x77\x38\xE8\x4C\x3B\xE3\x32\x8A\xC8\xA8\xFF\x38\xA8\x2A\x7C\x82\x0F\x63\x7E\xBD\x3C\xD2\x16\x4B\x1C\xE2\xE0\xE5\x31\x57\x2C\xD5\xAF\xB8\x8E\x2B\xF6\xC0\x2B\x9C\xBB\x08\x3E\xAA\x7F\xE6\x3A\x4A\x79\x43\x3B\x3A\x1C\x17\x23\x39\xAB\x75\xD1\x9A\x35\xAF\xAE\x39\x34\x11\xE3\x02\xAB\xBE\xFF\x26\x0E\xEA\x95\xA1\x31\xB9\x4B\xFC\xE0\xFE\x64\xB4\xF4\xA0\xC2\x93\x84\x3B\xBA\xFA\xC3\x29\x74\x30\x0E\x02\x30\x03\x00\x15\x03\x98\x01\x80\xEA\xCC\x00\x0A\x7F\x77\xD2\xC5\x38\xDB\x02\x34\xA0\x25\x2F\x73\xF0\x44\xD3\xC1\x6B\x2E\xC2\x20\xFD\x05\x24\x5C\x07\x60\x38\xE1\xDF\x9D\x34\x81\x2D\x7C\x3F\xB7\xB0\x0B\x5B\xB8\x28\x69\x61\xF7\xE2\x16\x06\xF0\xDA\x6F\x02\x29\x3D\x4A\x4C\x82\x92\xB1\xA1\x94\xE7\x24\xEC\x40\x5B\xD6\xE4\xD2\x75\x8A\xDB\x3B\xE6\xB0\xD1\x9C\xCE\x5E\xB6\xC6\x1F\xBE\x6C\x42\x3B\x48\x94\x8A\x07\xE5\x73\x75\x08\x8F\xEC\x9C\xD3\xF9\xC3\x47\x5A\x5B\xBF\xE0\x3A\x0E\xA2\xE2\x95\x04\x66\xEB\x51\xEC\x00\x5D\x7C\xB8\x72\xE1\xB9\x4E\xD2\xD4\xE5\x51\x10\x80\xFB\xA9\x8E\x41\x81\xFE\x7C\x97\x2F\xB9\x8C\x53\xF9\x60\xBE\x96\x90\x21\xC0\xBF\xE7\xBD\x78\x90\x30\x62\xE8\x21\xA1\x40\xA7\x28\xD0\x15\x45\x3C\x71\xCF\x2A\xB6\x62\x96\x38\x6D\x11\xB4\x62\x42\x2B\xB8\x47\xEA\x7F\x77\x86\x6B\xD2\x19\x87\x4E\x74\x4D\x9B\x93\x33\xB3\x0B\xE0\x6C\x6D\x02\xA9\x49\x21\x35\x67\xE6\xE0\x38\xA9\x60\x4A\xD1\x0E\x09\xE5\x59\x02\x4D\x42\xF8\xFF\x94\x32\x1C\xE9\x93\xA7\x9D\xE6\x79\x3A\xCD\xF3\xAD\x4D\xF3\x60\x93\xA7\x10\xB1\xD5\xE8\x7F\x2F\x65\x5B\x3C\xBF\xF2\x3B\xE9\x6C\x4A\x45\xDC\x07\x86\x93\x3E\x6E\xB1\x6E\xAF\x9C\xAE\x5F\x0C\xB7\x45\xF5\xA9\x5C\xDA\x29\x0B\x4A\x4F\x89\xC1\x8B\xE8\x04\x91\x1D\x65\xB0\x0A\xBB\xF9\x2C\xBC\x68\x4F\x00\x44\xD7\x65\x13\x1D\x96\x59\x4D\x91\x2F\x94\x72\xE1\xB2\x09\xE7\x4D\xCA\x68\xC9\xC9\xF8\xA0\x2F\xA5\xE4\x74\xC2\x7F\x54\xD4\x3F\x7B\x1D\x1A\x99\x84\x08\x52\x64\x0A\x7A\x1B\xF9\x7A\x89\x3E\xF1\x17\x6D\xF5\x83\x8B\xB7\xFA\x41\xBB\xD5\x0F\x9E\xB8\xD5\x0F\x7E\x78\xAB\x1F\xFC\x5F\x5B\xFD\xE0\x23\x62\xAB\x5F\x7C\x6C\xCB\x5F\x7C\x7C\xCB\x5F\xFC\xDD\xAD\x7E\xF0\xD1\x2D\x77\xF1\xB5\x2D\x7F\xF1\xF5\x2D\x7F\xF1\x37\x5B\xFE\xE2\xFB\xB6\xFA\xC1\xE3\xB7\xFA\xC1\x93\x86\x1F\x80\xA0\xCD\x28\x2C\x82\x5B\xA0\x14\x92\x63\xB0\x2B\x1E\xC8\xFA\x8C\x06\xBB\xD7\x5A\xCB\xF1\x3E\xBB\x69\x35\x01\x83\xC8\xA9\xFA\xA3\x1C\x08\x80\x1B\x55\x17\x39\xEB\x5D\xE7\xE0\xEA\xA2\xAE\xFE\xF4\xC9\x18\x16\x09\x2A\xD8\x05\xCE\xC2\x12\xAD\x70\x05\xF0\xF2\xC8\x12\x1E\x7A\x75\x9D\x17\x7E\xEA\x3F\x88\x8B\x88\xA8\xFF\xF5\x75\x14\xB5\x5E\x39\x59\x7F\xEC\x24\x65\x3B\x25\x29\x87\x12\x86\x3E\x28\xBA\x56\xB2\x4D\x25\x9C\xBD\x50\x3B\x72\x99\x91\x91\x05\x2B\xA6\xC3\x08\x04\xBF\xB3\x4B\xEE\xD5\x49\xA4\x1B\x4A\xD7\x55\xEC\xBF\xEE\xFC\xEF\x27\x2A\x1D\x7D\x58\x77\xFE\x43\xC9\xCD\xE0\x84\x45\x55\x34\x3F\x40\x87\x75\x8F\xA2\xC5\xA3\xBC\x68\x4A\x96\x74\xB9\x53\x94\xB6\xB9\x1D\xB1\x8C\xCE\x3D\xC5\xA2\x8E\xB0\x5C\x10\xFC\x42\x23\x71\xE4\xE5\x31\xBC\x27\xBB\x26\xA7\xCC\x2E\xB9\x17\x6E\xE4\x6C\x9F\x15\x08\x8F\xD2\xD9\x65\x35\xA6\xD4\xBE\x04\x76\x68\xBF\x9A\x69\xBF\x82\x56\x2B\x68\xB5\xAA\xDC\xC8\x55\x07\x26\xB2\x0A\x74\x6A\xA4\x13\x2B\xBF\xB2\xB1\xB1\x71\xA7\xD9\x1B\x82\x09\x0A\x87\xCE\xCF\xEF\x85\x3F\x17\x8A\x3F\x82\xA1\xEE\x15\xE4\x75\x84\x1B\x9F\xE8\x6F\xD4\x78\xE3\x73\xFD\x8D\x9D\x78\xE3\xCF\xFA\x1B\xDF\xD3\x79\xB1\x2C\xFE\x3B\xDC\xD8\x23\x50\x7D\xB1\x2E\x67\x75\x08\x30\x89\xDA\xB9\x2B\x30\x78\x93\x56\xB7\xD1\x12\x17\x6A\x02\x4E\x18\x25\xAB\x9B\xED\x57\x37\x9B\xAC\x6E\xCA\x8F\xC1\xD2\x28\x61\x75\x2B\x53\x4A\xAC\xF9\xF1\xB3\x39\xBD\x03\x00\x7A\xF7\x60\xDD\xB2\xD0\xC6\x45\xF8\x84\x16\x5A\x98\x0A\x7D\x07\x26\xE9\xC0\x00\x13\xA1\xCA\x6F\x88\xC7\x0A\x36\xD9\x89\x71\x2F\x0A\x21\x1D\x9F\x42\x47\x5C\x58\xF7\xE7\xF9\x53\x3C\x41\x52\x9D\xCC\x5D\xA2\xEE\xC7\xB1\x33\x7C\x01\xEC\xDF\xDF\x5E\x8E\xB7\xFD\x6E\x5E\x60\xAB\x43\x6C\x7B\x8C\x1D\x46\xAA\x17\x4E\xB0\xA3\x86\xB6\x5B\x84\xFF\x1A\xA2\x59\x5D\xE0\x84\xDF\xD1\x05\x2B\xE5\xBB\xE1\x17\xBA\x54\x96\xC3\xBD\xEA\x97\x25\xEE\x83\xE0\xCE\xD1\x0E\x0A\x66\x00\xE3\xA8\x6E\xD5\x6A\x0C\x63\xE0\x38\x17\xDA\x94\x50\x74\xC2\x0E\x1D\xA9\x34\x04\x13\x60\x25\x4D\x2A\x82\x1B\x86\xBB\xAC\x6A\x8A\x17\xDF\x41\xD9\xFB\x4D\xE2\x9C\xA0\xE3\xA5\xD7\xC4\xF8\x22\x0E\xB0\xC7\x60\x83\xA8\xFA\x57\x7F\xA5\x17\xA5\x31\x53\x31\x3B\xE1\x38\x20\xFE\x95\x74\x10\x08\xC4\x06\x6D\xC0\x18\x50\x50\x05\x68\xBE\x2A\x9E\x52\x46\x95\x57\x00\x23\x24\x2A\x6F\x86\x76\xAA\xE3\x7D\x9B\x31\x7A\x52\xB0\x36\x04\xEA\xBF\x86\x33\xA5\xA2\xE2\x6B\xE8\xD8\x66\xE6\x48\xFE\x64\x98\xE1\x35\x28\xBC\x19\xBA\x9B\x38\x8B\x4F\x0C\x63\x32\x1D\xBA\x04\x64\x8F\x2C\x39\x44\x96\x70\x26\x71\x2B\x22\x7B\xD1\xC7\x31\x00\xA5\x6E\x4D\x20\x11\xEE\x0A\x52\x4E\x65\x93\x74\xE1\xC0\xCE\xAF\x5F\x1A\x26\x09\x7D\xD3\x87\xA7\x9C\xA6\x77\x99\x92\x2A\x84\x8F\xED\x68\x33\x67\x7A\x5D\x55\xC4\x1C\xDD\x31\x4E\xCB\x74\xE9\x00\x65\x52\x9A\x6B\x07\x05\xE8\xD7\xD1\x7E\xAC\x68\x01\x39\xDE\xED\x51\x3B\xE8\x54\xD7\x27\xE2\x9E\xE4\xC7\x4F\xCE\x54\x18\xDE\x0E\x16\x3F\xEB\x8F\xF7\xFF\x6D\x59\x04\xD2\xDF\x2D\x66\x4A\x17\xEE\x12\x8E\x0C\x3E\x71\xB1\x70\x8E\x43\xA2\x04\x19\x7B\xAD\xEA\x5A\xDA\xF7\xF3\x17\x91\x6F\x02\x1E\x5F\x80\xB1\xBD\x2A\xC9\x59\x4C\x1B\x80\x9C\xBC\x4D\x38\xDD\xA1\xB1\xAD\xA2\x1F\x9F\x6F\xE3\x3E\x2A\xA5\xC6\x51\x4E\x79\x71\xB0\x55\x4B\x8D\xF1\xB4\x51\xE6\xD4\x12\x71\xB8\xE0\x56\x3B\xCC\x61\xE9\x2C\xCD\xC0\x8B\x3A\xDA\x88\x04\xC8\x68\xD7\x0C\x83\x4B\x08\xBE\x85\xBE\x81\x33\xFD\x9F\x33\xFE\xC3\x02\x93\x73\xF9\x1F\xEF\xB6\x3D\xCE\xE6\x32\x9F\xFB\x67\x32\x5C\xD4\xFD\x0B\x30\x42\x36\xC7\x03\x06\x00\xDE\xF9\xB8\x4B\x77\xBE\xAF\x79\x79\x36\x7E\x1B\x31\xFD\xE1\xCB\x26\x1C\xE3\xEC\x14\x4D\x12\xC5\x93\x24\xE3\x49\xA2\x29\x40\xA5\xC0\x60\x3F\x9C\x17\xB4\xB9\x3A\xA6\x44\xF9\x2A\x4E\x0F\x0B\x53\x5C\xE1\xF4\xC0\xF4\x80\x71\x7A\xE8\x90\xD0\x27\x11\x38\x30\xAF\x76\xD2\x3E\xF3\x65\xA4\xF9\x33\x8A\xBC\x41\x76\x39\x97\xA6\x5E\xDD\xB5\x39\x0A\x9E\xB5\x36\xF3\xEA\x9A\x16\xC3\x41\x95\x07\x76\x30\x97\x4D\x28\xF4\x05\xB3\xAC\x62\xB2\xB1\x6B\xDA\x1C\xD6\xD9\x43\x4B\xFE\xD4\xA9\x53\xA7\xB4\xD3\xC0\xC8\xF0\x53\x1E\x5A\xE2\xDD\x66\xCA\x50\x16\x1A\xF8\x6E\x0C\xE7\xE2\x81\xE2\xE6\x31\xC6\xAB\xB6\x05\x81\xA3\x28\xDA\xAB\x31\xAE\x40\x01\x86\xC9\x4C\xD0\x5D\x46\x95\xBA\xC2\x00\x31\x7E\x6F\xA7\xC3\x88\xBC\x1D\x24\xCA\x6A\x67\xF7\x4F\x1E\x17\xE4\xE1\xF8\x71\x15\x53\xE1\xE0\xC4\x26\x44\x28\x22\x11\x8A\x47\x23\x11\x1E\x77\x36\x44\x28\xBF\x59\x22\xEC\x7C\x64\x89\xF0\xD8\x48\x84\xC7\xF6\x44\x30\x09\x11\xB6\x47\x22\x94\xDF\x42\x22\x6C\x81\x0C\xF2\x6C\xC8\x50\x3D\xCA\xC9\x20\x02\x19\xA2\x44\x9A\x82\x44\x1A\xEF\xA8\x9C\x41\x4B\x53\x27\x24\x29\x23\x49\x46\x8F\xC2\x79\xE1\x44\x7D\x3F\x46\x2C\xEE\xF8\x96\x10\xE6\x71\x8F\x1C\x61\x62\xB5\xB3\x48\x1A\x1E\xD9\x77\x55\x09\x35\xB2\x48\x8D\xEA\x51\x48\x8D\xF1\x77\x9D\x0D\x15\xC6\xDF\x2C\x15\x1E\xFB\xC8\x4E\x8F\xC7\x44\x29\xF5\x98\x24\x84\x65\xA8\xE9\x00\x72\x70\x6B\x1C\xBD\x62\x64\xF8\x91\x2A\x63\x52\x55\xA6\xE0\x7D\x83\xA1\x2A\x63\xA2\x2A\x83\x9B\x65\x07\xB1\x9E\x4E\x50\x65\xF4\x12\x9D\xDB\xE8\x55\x99\x02\x40\x2D\x16\xA8\x32\x19\x21\x3F\x73\xBA\xA3\x7D\xB7\xEF\x5B\x05\x7B\xB2\xE7\x16\x0C\x0E\xCD\xC0\x1A\x45\x96\x99\x7C\xC7\xB2\xCC\xB6\xEF\x30\x96\x49\x18\xE6\x6F\xC8\x12\x1C\x93\x2F\x57\x32\xFB\x80\xE2\x99\x10\xCA\xC4\x69\xBD\xED\xD1\x48\xA3\xC7\x9C\x0D\x8D\xB6\x7F\xB3\x34\xDA\xF1\xC8\xD2\xE8\x9C\x48\xA3\x73\x30\xF1\x3C\xA6\x14\x74\x1D\x7A\x4E\x96\xD5\x9D\xBF\x83\x64\x42\xF7\xD4\x85\xFA\x56\xB8\xDA\xAB\x97\xE9\xEA\x66\xBA\x72\xEC\x55\x59\x56\xF7\xF5\x2F\xA3\xDF\x45\xDF\x4B\x6F\xF0\xD7\xF7\xD0\x55\x41\x57\x77\xF3\xB3\xB3\xE2\x0B\xA7\xD6\xF8\xF8\x89\x44\xE8\x68\x79\x16\x97\x2C\xD5\x9F\xC4\xC5\x60\x7B\x45\x56\x26\x5B\x67\x2E\xAF\x5F\xCD\xBF\x1E\x35\xB7\x23\x5B\x2B\x2F\x9E\x20\x0D\xC7\x85\x24\xDC\x66\x86\xDC\x66\x82\x49\x66\x7A\x1D\xCB\x6C\xC6\x6D\x26\x30\x0B\x85\x5C\x9A\x19\x6E\x33\xA7\xE3\x36\x33\xE0\x36\x9D\x72\x1B\x26\x8D\x0F\xDC\xA6\x38\x68\x15\xB9\x4D\xCD\x71\x9B\x99\xE1\x36\x35\xE0\x36\x33\xAF\x63\x49\x7F\xB7\xE8\x2D\xEB\x88\xA7\xFA\x85\xD7\xE2\xDF\x5F\xFD\x2E\x39\x99\xCF\x47\x8B\x08\x40\x33\x58\x2F\x3E\x09\xAC\x37\x59\x99\xC8\x06\x5F\x56\xCB\x6D\xD6\xB5\x39\x47\x0F\x5D\x40\x21\x2B\x17\x35\xE5\xC0\xFA\xCE\x69\xC9\xCA\xE2\x92\x95\xF7\x4B\x56\x99\x2C\x59\xB9\xCB\xE8\xE0\x3B\xAD\x5A\x99\xCB\x60\xD5\xCA\x96\x9A\xD2\x73\xBA\xF3\x6C\xB8\x6A\xE5\xBC\x6A\x95\x7B\xD4\x45\xB0\x42\xED\x51\xCB\x71\x0D\x73\x19\x16\x5D\xE0\x73\x72\x25\xB9\xB9\x7F\x87\x27\x43\x51\x85\xB8\x95\x23\xE4\x34\x29\xFD\xC7\x04\xA5\x6F\x7B\x30\xBC\x33\xEA\x59\xAC\xF4\x23\xCA\x74\x79\x98\x39\x02\xC9\xC4\x7B\xAA\x24\x39\x75\xDC\x53\x2D\x01\x8E\xC2\x95\xB8\x95\xB8\x07\x13\xB3\x18\x9E\x80\x25\xEF\xA9\x1A\x57\x82\xE4\x4C\xF7\x54\x17\xEC\xA5\xF6\xFE\x19\xB1\xAC\x76\xB6\xC5\x65\x18\x3B\x52\xF2\x6A\xCC\x5C\x3C\xC2\x10\x5D\xE4\x77\x60\x50\x75\x4D\x8B\x9B\xFB\x59\xE4\x3F\x02\xD6\x15\xCE\xAC\x01\x6F\x5F\xD3\x5A\xCC\xC7\x15\x18\xB8\x48\x19\x98\xCE\xBC\xE1\x06\x5C\x11\x1A\xB0\xC8\xB0\xC1\x7D\x06\xBD\xE5\xC8\xC0\x39\x41\x92\x11\x3F\x37\x25\x3A\x9F\x43\x71\x3C\xAA\xB5\x65\x42\xEE\xEF\xF3\x98\x48\x3B\x89\x3A\x3B\x88\x56\x35\x88\xC7\xD2\x19\x16\x97\x58\x87\x46\x13\x45\x12\xCC\x8F\x23\xE6\xED\xB7\x0B\xF3\x66\x11\xE6\xB1\x62\x9C\xA4\xF3\xD7\x4C\x81\xE2\xB4\x14\x30\xAE\xC0\x72\x11\xD7\xB4\x23\x37\x4A\x28\x60\x52\x0A\x98\x9E\x02\xA6\xA7\x80\x39\x3D\x05\x4C\x4A\x01\x93\x52\xA0\x58\x44\x01\xD3\x53\x20\x0C\xC2\x15\xFB\x27\xCA\xB1\x03\x1C\x47\x85\x47\xF2\x4A\xDF\xD0\xAE\x88\xBC\x7A\x8E\x2E\x79\xA4\x4B\xF6\x68\xA3\x4B\xC8\x05\xF8\x50\x68\x33\x9C\x1D\xDF\x66\xDA\xC4\x81\x0C\xE9\x23\x02\x7D\x58\xB0\x96\x07\xA2\x60\x75\x65\x92\xDD\xBB\x29\x29\x80\x30\x77\xA5\xDF\x15\x84\x6F\x33\x72\x5C\x7E\x70\xDC\x96\xA0\xD0\x27\xFA\x61\x15\x69\x9A\x2F\xA6\x69\x95\xE8\x87\x19\xE9\x87\xD5\x02\xFD\x30\x1B\xEA\x87\x19\xEB\x87\x29\x4D\x75\x4F\xD3\x19\xAA\x6A\xA2\xAA\x19\xCA\x39\x99\x52\xB2\x02\x4A\x8E\x07\x94\x34\x4C\x49\xED\x0A\x5E\xA8\x27\x6E\x92\x50\x72\xC1\x42\x9D\x0D\x16\xEA\x2C\x5D\xA8\x91\x92\x63\xA4\xE4\x98\x20\xC9\xA2\x5A\x38\x0E\x0B\x75\xD5\x53\xB2\xDA\x54\x2D\x44\x39\x87\xC7\x67\xC3\xF6\x52\xC4\x77\xE6\x27\xD0\x72\x99\x3A\x03\x4B\xC2\x77\xC9\xF8\x2E\xE2\x1C\x42\x36\x2F\x50\xDC\x12\xBE\xB3\x38\x87\x32\x57\x32\xBE\x33\x9A\x43\x25\xCF\xA1\x91\xCB\x3B\x57\x26\xD3\xA8\x1C\x4E\xA3\x32\x99\x46\x65\x30\xF3\xCA\x44\xBC\xF1\x14\x2A\x17\x4C\xA1\x72\x38\x85\xCA\x19\xF1\x56\x9E\x6E\x0A\x95\x9B\x4F\xA1\x12\x7A\x0B\x53\x08\x21\x2A\x79\x0A\x65\x73\x53\xA8\x9C\x99\x42\xD9\x60\x0A\x95\x30\x85\x04\x55\xF6\x1C\x4C\x9F\x50\x07\xF4\x0C\x3A\x32\xCE\xAD\x65\x56\x93\xFD\xAB\xDE\xB7\xB1\x21\x48\x39\x06\xF1\xC8\x87\x98\x2A\xC4\x56\xD4\x4D\xD5\xAC\xCE\x35\x7C\x3C\xB8\x18\x28\xB5\x59\x7A\xD1\x67\xEF\xA9\x7E\x65\x9B\xAC\xE6\x37\xC4\x48\xD5\x2D\xF0\xAC\xC5\x9C\xBE\xBF\x68\x8B\x44\x77\x14\x31\x2B\xFC\x05\x5D\x63\xE3\x4E\x49\xB6\x48\x49\xB3\xB3\x4A\x9A\xE6\x99\xD2\x2B\x69\x69\x1D\x20\xCC\xFB\x24\x0E\xB6\x76\x09\xB4\x39\xAA\x75\x65\x97\x28\xE7\xE6\x50\x49\xE3\xBE\x13\x0F\x43\x16\x3D\x0C\x7C\xAC\x5D\x83\xC6\x46\xE7\xE2\x2D\x00\x58\x0C\xF6\x42\x32\x2C\x85\x87\xBC\x83\x1B\x51\x23\xCC\x46\x53\xC4\xED\x76\xCB\x4B\x94\xE8\xF7\x7E\x32\xCA\xF5\xEE\x46\x54\x72\x4C\xB4\x16\x6B\xEF\x58\x92\x9E\x85\x2B\xD7\xC2\xFE\x26\x65\xBD\xA2\xA3\xE3\x28\x80\x72\x67\xEF\x58\x71\x3F\xE3\xCA\x3B\xAE\xC7\x7C\x5E\xB8\x63\xCF\x3B\x84\x39\xCD\x87\x0C\xF8\x5D\x5F\xD3\x56\x18\x4D\x89\xEC\xAC\x58\x4B\xC1\x4A\xB6\x19\xCF\x97\xB1\x1B\xBB\x9C\xE7\x83\x74\xDA\x6B\x9A\x0F\x22\x0A\xA2\xBC\x17\x44\xEA\x32\x8C\x05\xD5\x61\x3E\xE4\xD0\x5B\x85\xD1\xE7\xD5\x10\x22\x7A\x0D\x70\x5A\x61\x1C\xB8\xDA\x17\x02\xB3\x73\xAC\x38\x1A\xE6\x85\x72\xB6\x3F\x7A\x08\xE3\x40\x69\x86\x99\x37\x82\x40\x92\x98\x8B\x4B\x76\xAC\xB6\x53\x52\xB0\x03\x13\x49\xB9\x3E\x2C\x4D\xBC\x1C\xCF\x7B\x45\xEC\x2E\x9A\x3E\x3A\x99\x3E\xC6\x69\x98\x3B\xF3\x93\x6D\xCE\x7E\x88\xDC\x99\xD8\x0F\x0B\x59\x73\xCE\x7E\x98\x67\xCD\xB4\x90\x12\x58\x6A\xC0\x9A\x79\xC2\x9A\xF9\x42\xD6\x04\x4E\xBC\x88\x44\x76\x62\x3F\x24\xDC\x98\xF1\xBE\xF5\x66\x1C\x53\x9C\x3D\xC7\x00\xF9\xB1\x68\x58\xCF\x31\xA1\x24\x59\x01\x1C\x03\xFA\x78\x2B\x9D\x4C\x38\xA6\x48\x39\xA6\xE8\x39\xA6\x08\x0D\x80\x16\x3F\xE4\x18\x3A\xAF\x60\x66\x39\xA6\x60\x8E\xC1\xCC\x66\x89\xAA\x0E\x88\xD3\xF3\x1C\x53\x6C\xC6\x31\x34\x7C\x41\xC3\x37\x14\x4E\x89\x4C\x62\x41\x01\x5A\xF3\xFA\x9A\x68\xDF\xE5\x1C\x8F\x0E\x34\xCB\x67\x4E\x6D\xE1\x79\x05\x97\x27\x47\xBF\x28\xB0\x15\x51\x75\x80\x32\xBC\x79\x13\x4F\x58\x19\x57\x74\x78\xCC\xAA\xCD\xF8\x20\x46\xB6\xAC\x44\x52\x4E\x52\x60\x81\x2C\x4C\x13\xD8\x61\xDE\xFC\xF4\xF8\x15\x05\x8A\x02\x6C\x94\xCD\x2D\xAC\xC2\x96\xAA\x18\xE4\xB0\x0A\x73\xCE\x21\x97\xD3\x2A\x9C\xCF\xAC\xC2\x99\xB3\x80\x8C\xC2\xD9\xB8\x0A\xE3\x21\xAD\x31\x95\x2C\xCB\x79\x15\xB6\xB0\x0A\x67\x2E\xE7\xB0\x01\xE3\xF2\x24\x6C\x20\x5F\xB0\x1B\x94\x47\x4F\x45\x60\x24\x92\x87\x79\xAF\xFB\xE4\xE4\xA9\x90\x91\x8B\xD4\x35\x6D\xD9\x73\x91\xE9\xE5\x0E\x73\x91\x1C\x72\xD1\x42\x43\x2F\x1F\x18\x7A\x26\x35\xF4\x90\x8B\x4A\x5C\x87\x4B\x02\x27\x67\x43\xCF\xE2\x3A\xB6\x39\xF7\xE0\x76\x78\x62\xE8\xE5\x78\xEA\x22\xE5\x1E\x17\x8E\x86\x2A\x8C\x80\x42\x45\x96\x12\xE8\xA0\x1F\x21\xEF\xE3\x7C\x92\x48\x81\xF9\xB5\xD5\x0E\x16\xCD\xF0\xF8\x63\x92\x0F\xFC\x52\xE8\x43\xCB\xC9\x7D\x15\xB1\xD3\xE2\xF8\x16\x67\xD3\x50\x1E\x4E\xD5\xE1\x28\xF5\xCF\x19\x22\x59\xC4\x2E\x71\x51\xA8\x11\x4C\xB1\xE3\xFD\x14\x11\x7D\x7C\x0F\xAF\xC4\x83\x67\x3B\xE2\xB3\xF3\x67\x9E\x85\x43\x1E\x24\x92\xC6\x4E\xF7\x21\x32\xB7\x2A\x99\xA5\xC3\x23\x7F\x29\x2E\xF2\x07\x26\x7C\x4C\xB7\x77\xA9\xF2\x41\x50\xE3\x62\xC1\x30\x8C\x3B\xA1\x70\x91\xE0\xF0\xCA\x07\xEE\x55\x31\xE7\x5E\x05\xD5\x7E\x0B\xEE\x55\x35\xE7\x5E\x0D\xEA\x9C\x4C\xDD\xAB\x9A\x57\x95\xCD\xDD\xAB\x1A\x0F\x9D\x01\x4F\x84\xF0\xA7\xB9\xBD\xAB\x7E\x74\xCE\xCC\xF2\xC7\xFB\x94\x34\xD3\xF9\xE2\x96\x3F\x1C\x8D\x1D\x71\xB1\xF8\x41\x67\xBC\x78\x82\xBC\x88\xFE\x38\xA0\x31\x3A\xE5\xE8\x8C\xA1\xF1\x63\x8E\x9F\x32\x7E\x47\x87\xD1\x5C\x58\xE8\x32\x09\xC5\xAF\xFD\x0B\x36\xE8\xC4\x8A\xA6\x5F\x74\x6C\x0D\xD3\x64\x68\x5F\x77\x21\x65\x21\x96\xBC\x2F\x30\x12\xF2\x9C\x6B\xF1\x70\x97\xD3\xCB\x6A\x87\xD3\xBB\x44\xDD\x5A\x2F\x8F\xA1\xF6\xB2\x84\x99\xAD\x84\x9F\xFA\x5F\x7C\x5F\x12\xFE\x38\xC1\xC3\xDE\x63\x60\x55\xCA\x76\x65\x88\x5D\xCF\x0B\xAE\x13\x58\xC1\xEA\x97\x5C\x17\x22\xE8\x29\xD6\x8C\x8A\x72\x46\xC6\xE2\x5C\x2B\x7D\x24\xDA\xE0\x69\xC5\xD1\x6A\xEC\x93\x4F\x8B\x54\x56\x2F\xC8\xE5\x82\x34\x6F\x6E\xE0\x76\xC5\xD0\xCF\x8F\xD3\x89\x54\x6F\x8E\xCE\x17\x0B\x8A\x5A\x1C\xCC\xF9\x6D\xB5\x90\x6A\xA4\x8D\x1D\x15\xD9\x68\x34\x1A\xE5\xE5\xA8\xE2\xD0\xB6\x71\xFC\x45\x11\x52\x92\x03\x9D\xC6\x65\x48\xF2\xD5\x53\x30\x50\x89\x4E\xFF\x0F\x88\xE5\x64\xC0\xC7\x42\x3A\x6B\x67\xEA\x3F\x39\xC9\x07\x38\x98\x6E\x66\x5C\x70\x17\x26\x76\x21\xE9\x63\x49\x1F\x4B\xFA\x58\xD2\x7E\x97\xE4\x93\xCE\xB1\x5F\x19\x99\xC4\x38\xB9\xA0\xF9\xFC\x6C\x9B\xCF\x1E\x52\xF3\xD9\xD9\x36\x9F\x3F\xA4\xE6\x2D\xD0\xE5\x67\xC2\x95\xA9\xCE\xD4\x4B\x71\x86\x5E\x38\x69\x3A\x75\xF4\xCD\xCE\x4A\xB9\x88\x9E\x64\x0A\x8A\xB1\x1E\xC0\x9A\x02\xB9\x39\x74\x92\xBA\xE4\xB1\x01\xAC\x78\x1C\x7F\x61\x27\xF0\x26\x1D\x39\x49\x6E\xF2\x27\xCB\xCA\x50\x70\x5F\xCA\xD4\xFE\x97\xDE\x37\x38\x33\xE6\x54\x1F\xB1\xA7\x86\x11\x7B\xFD\x12\x58\xFD\x52\x29\xB7\x85\x69\x78\xAF\x48\xE7\xE1\xDD\xA2\x8B\x87\x2D\xD0\x26\xDC\xE7\x94\xDF\x3D\x73\x6F\x37\x66\x43\x1F\xDE\x73\x74\xB8\x59\x73\x1B\x78\xCA\x07\xBF\x94\x47\x1B\x4B\x7B\x82\xF6\x28\x86\x4C\xD6\x1D\xCC\xEA\x9C\x33\x7E\x96\x5E\x34\x23\xAA\xA5\xFC\x04\xB9\x83\x44\x40\x1D\xCA\xCA\x7F\x9C\x6D\xE4\x05\x01\x6F\x30\xF9\xEB\x6D\x13\x21\x6D\x51\x14\x85\x2A\x8A\x4C\x9B\x02\x64\xCD\x9B\x12\x84\x78\xD1\x44\x61\x90\x57\xC9\x15\x28\xE4\x41\x1A\x64\x60\xB4\xC0\xFF\x46\x63\x1B\xEB\xA3\x9E\x0F\xEA\xFF\x2E\xB1\x03\xC3\xA1\xDB\x72\x89\x94\xFE\x9D\x18\x08\xED\xCA\xA5\xD4\x34\x28\x29\x9E\x1A\x6D\x02\x7C\x27\x7C\x77\x0C\xDE\x3E\xB2\x84\x01\xD3\x69\xD7\xFE\x9D\x09\x88\x30\x19\x10\x65\x11\x20\x80\x66\x6C\x36\x81\xA4\x18\x42\x52\x0C\x20\x29\xB6\x0C\xC9\x27\x52\x48\x4C\x85\x14\x8B\x80\xF4\x88\xFE\x11\x68\x69\x5F\xDF\x52\x3B\x5E\x6A\x31\x37\xC8\xF7\xC3\x83\xDD\xED\x04\x1E\x4C\xE0\xC1\xB6\xA5\x76\xE2\x2A\x37\x39\xB4\xD4\x6E\xDF\x4F\xB0\xFC\xA0\xB3\x6E\x1B\x1E\xAE\x3A\xE8\x8C\x1B\x2F\xB9\xED\xF5\xCB\xB1\x7A\xE5\x18\x74\xDA\x89\xAB\x0E\x4D\x24\xF4\x3D\xF6\xD3\x4B\x26\x7A\x06\xC0\x93\xF7\x24\x00\x6A\x4A\xC3\x3F\xC4\x94\x02\xB5\x53\xAC\xBA\xD2\x8B\xD5\x23\xCD\x64\xC8\x21\x17\x00\x7C\x2E\x41\x01\xC0\x34\xA1\x14\x95\xA2\x19\x87\xC9\xDC\x54\xD0\xF6\x84\xD8\x7A\x6E\xCD\xF4\x62\xF5\xEA\x39\xCC\xFD\xEA\x3D\x43\xCC\xCD\x36\xE8\x26\xD0\x55\x45\x62\xA1\xA2\xCA\x5D\x55\x94\x0E\x55\x94\x0E\x15\x0C\x08\x19\x20\xAC\x32\x95\xBF\xA8\x43\x42\xC4\x1B\x6E\xE4\xE5\x11\x10\x1E\x63\x57\xB1\xD8\xA0\xA0\x7C\x35\xD3\x6A\xE8\x6D\x47\x87\x43\x99\x6F\xA0\x22\xA1\x55\x91\xD0\xEA\x9B\x4B\x25\x8E\xA7\x98\x7F\x40\x2E\x0B\x94\xA0\x5D\x02\x37\x06\x69\x52\x51\xDA\x4D\xD5\xEB\xA5\xAA\x8F\x3B\xA7\x07\xBB\xFB\x07\x3F\x38\x78\xB0\xAF\x7F\x70\xB0\x97\x4E\xF7\x86\x1D\xC7\xEA\xAE\x71\x1F\xC0\xBB\x21\xCF\xAA\x98\xB8\x4D\x8B\x89\xE3\x13\x3C\xF7\x7F\x98\x54\xAB\x9F\x54\x73\xC7\x68\x0B\xD2\x5B\x0A\x12\x50\xDE\xA0\x1F\x2E\xEB\x53\xBD\x51\x62\x6F\x6E\xEF\x00\x6F\xC6\x16\x55\xF2\xB0\xCD\x31\x9B\x2F\x06\x8A\xBB\x1C\xB3\xE6\x72\xF3\x05\x99\x7C\x05\x9B\x7C\xA3\x60\x07\x39\x0D\x86\x0C\x28\x75\x23\x34\xF9\x0A\x10\x43\x7B\x54\x4D\x47\x24\x0B\x34\xF9\x0A\x67\x9B\xDC\x15\x68\xEF\xE5\xAE\x74\x45\xD4\x62\x73\x57\x24\xF6\x9E\x5E\x56\x57\x51\xA5\x81\xC3\x74\x00\xB1\xA0\xAA\xB2\x48\x72\x3F\xEE\xF6\xA8\xAB\x48\x95\xFE\xBC\xE8\xE8\xC9\xE7\x4F\x86\x8A\x9C\xBB\x44\xD1\x1F\xF4\xAB\xA3\x25\x45\x29\xCA\x43\x3A\xD4\x3E\x39\x85\x97\xF5\x07\x37\xF8\x5C\xBD\xDF\x11\x1C\xCE\xC4\x72\xE8\x33\xFF\x1B\x54\xAF\x33\x5F\x10\x1F\x7A\xE9\xB2\xFA\xF7\x39\x11\x61\xC4\x11\xE7\x98\x36\x8B\x71\x94\xC7\x03\x25\x11\x47\x79\xC4\x91\x9C\xC3\x51\x20\x9C\x64\x4C\x49\x30\xE8\x23\xA6\x64\xC0\x94\xD3\xC0\x55\x09\x62\xCE\x73\xDA\x7F\x5A\x24\x5A\xB7\xFA\xA7\x84\x49\x3C\xF6\xF1\x93\xAD\x04\x73\x77\xFC\x6C\x34\x25\x0A\x3A\x05\xF3\x87\xB7\xFD\xC1\x6D\x12\xF7\xDE\x1D\xDB\xEE\x6D\xB1\x07\x3E\x03\x38\x2F\x47\xC4\xE7\x7E\x7C\x55\xB7\x47\x3D\x13\x13\x15\x02\xB3\xE8\x7D\xAE\x08\x6A\x23\x50\xD3\x8F\xBB\xA6\x70\xA4\x1E\xB4\x72\x15\x3D\x15\x78\xAF\xE2\x66\x4E\x43\x96\x8C\x06\x51\x7F\x81\xF3\xDD\x6B\x7F\x4F\x3A\x00\x4A\x15\x4D\xCC\x0C\xAB\x48\x01\xF3\xFE\xA5\x51\x2C\x69\x10\xE8\x94\x0D\x9E\xD2\x4F\x12\x3B\x80\xB8\xF9\xC8\xC9\x24\xB3\x3C\xF4\xBE\x1B\x4C\xFD\x23\x48\x14\x05\x32\x59\x63\xF2\xC2\xEF\xC7\xC2\x7E\x91\x07\x8A\x84\x07\x0A\xE2\x81\xA2\xE7\x81\x22\xE1\x81\x22\xF0\x80\xF6\x5F\x05\xC0\xC8\x6A\x09\x14\x01\x08\xFE\x6B\x70\xAD\xD5\xFF\x8D\x0F\xD1\xE4\xBB\xC4\xCE\x78\xFE\xC8\x0D\x4D\xD9\x1C\x73\xDE\x84\xB4\x99\x39\x9B\xAA\xD2\xE1\x6C\x37\x98\xBF\x9F\xA6\xAF\x5F\xEF\x28\xC9\x76\x38\xB8\x70\x3C\x55\x83\x0A\x2C\x17\x56\x10\x11\xC0\x2C\x2F\xD0\xC9\x85\xD0\xF7\x9D\x25\xC4\xC8\x93\xD3\x3C\x3A\x48\x35\xCC\xED\xCF\x52\x0D\x4F\x6A\xF7\x32\x0E\x25\x05\x9F\xB4\x8E\x3E\x85\x99\x8B\x7B\x45\x07\xCB\x76\x88\x4A\xD1\xB0\x36\x6A\x56\x9C\xA0\x93\x7D\x48\x8A\x56\xE2\x59\xDD\x1F\x01\xB6\x3F\xE6\xE4\x12\x95\x2A\x75\xAA\xFE\x73\xCE\xAF\x4D\x80\xEE\xEB\xB9\xE6\x60\x32\x82\x65\xF5\x4F\x31\xF7\x50\x10\x17\x97\xB7\xD2\xE5\xAB\x58\x30\x63\x2B\xB8\xCA\x19\x57\xD0\x1E\x31\x6C\x0E\x7C\x9F\xA2\xE4\x69\x3D\x00\xFF\x77\xBC\x0D\xD3\x29\xED\xFE\xAA\x6F\xBA\xFB\x9F\x8C\xDD\x5F\x95\x74\xEF\x37\x64\x48\xD3\x6C\xAA\x07\x15\x1F\x4D\x8A\x0E\x5F\x5C\x98\x5B\xB5\xE6\xC7\xCF\xC6\x82\xE2\xAD\x09\x33\x5A\x70\x38\x0E\x15\xFD\xC5\x17\x30\x39\x9B\x75\x16\x83\x72\xFE\x00\x5E\x73\xCA\xDF\xF6\x07\xB7\xFD\x81\x88\xFB\x4D\x8A\xF6\x9B\xB8\x0D\x05\x26\xC4\x55\xA9\x83\x02\xAE\x3B\xD0\x78\x9D\x86\x5F\x41\x0A\xB4\x30\xF5\x39\xBB\x75\x1B\x80\x3A\x30\xC1\x72\x79\x80\x04\xE9\x0C\xFC\x31\x49\x8E\x2E\x14\xC8\x98\xA2\x22\x5A\x2C\x31\xCF\x9F\x42\xAE\xAF\x42\x4E\x17\x11\xB3\x9E\x8B\x39\xD7\x96\x6A\x34\x3B\xD2\x0A\xF2\x4E\x07\x3B\x83\x5C\x33\x07\x38\xAB\xA8\xC2\xEE\x7B\x52\xA8\x21\x29\x0C\xD6\x77\xC4\x4E\x91\xB9\xE8\xEC\x14\x3D\xAC\x5E\xAD\x30\xBD\xB5\x1F\xC3\x70\xBF\x48\x89\x01\x93\x22\xCB\xFD\xA4\x35\xCF\x6A\xAD\xC7\x8A\x9F\xC6\x4F\x3B\xA7\x8E\xF0\x01\x6D\xB0\x0B\x30\x1F\x2A\xFC\x39\xDC\xB0\xF9\xD9\xA7\xE9\xCE\x79\x1E\xF2\x2F\x8C\x33\x2A\x78\xA3\x46\x1D\x8A\x59\x62\x73\xAA\x21\xA7\x0E\x70\xD1\x5A\x43\x09\x9A\xCD\xE1\x86\x0E\xA3\xAE\x4E\x30\x4B\x7F\x81\xDE\x74\x27\xEB\x3F\xA5\x84\xEE\x15\x37\xCD\xB9\xEC\xA9\xD2\xC8\x7C\x93\xD0\xD2\x11\x68\xC4\x2C\x68\x44\x25\x8D\xA8\xAA\x72\x39\xD5\xBA\xFC\x00\xE5\x76\xEC\x5D\x08\xE1\x08\x1E\xEA\x7F\x21\x1C\x8F\x73\x09\x8C\xD7\x5A\x0D\x8B\x0F\x6D\x3F\x23\x8A\x23\xA3\x49\x4C\x6F\x43\x62\x97\x9D\x59\x9A\x78\x8D\x3C\xA3\x9A\x6A\xDA\x53\x5E\x30\xF8\x53\xB7\xF2\x00\x57\xE6\x31\xBC\x10\x59\x2C\x76\xDF\x68\x58\xA4\x60\x21\x22\x16\xB2\x78\x2F\x38\xC9\x2A\x74\x1F\x3B\x15\xE1\xDD\x5E\x21\x61\x03\x5B\xBD\x24\xD3\xF9\xD4\x9C\x50\xCF\xC7\xB5\xCC\x29\x26\xB7\xE9\x15\x2B\x2C\x28\x10\xB6\x5C\xEC\xB3\xDA\x8C\x08\x6E\x81\xE0\xE6\x08\xF9\x89\x4D\x93\x3B\xCA\xB5\x0B\x7F\x0E\x37\x79\x45\x81\xC4\x79\x20\x38\xE5\x88\x06\x32\x97\xE8\xD4\x23\x22\xBB\x82\x9F\xA9\x50\xBB\x20\xC3\xE5\x16\x37\xEB\xB1\xB5\x0C\x5A\xA3\xD0\x89\xD5\x09\xEE\x36\xC3\x8A\xE4\x14\x51\x69\x7F\xD8\x74\x52\x15\xC2\x83\x86\x67\x68\x53\x0E\xDB\x74\x99\x1B\x1D\x81\x56\xD4\xA6\xAD\xC8\xCA\x5F\xC4\x53\xBF\x70\xFA\x42\x05\x5A\xBF\x5E\x89\x89\xBC\x5B\x05\xB7\xC6\x94\xC6\x4E\xA0\x0B\x8F\x6E\x4D\xFA\x5B\x8E\x72\x59\xA2\x5D\x4C\x4B\xFA\x85\x6A\x67\xB3\x8D\x9A\xD9\xE9\x0A\xCA\x35\x57\xD0\x33\xD0\x95\xDC\x18\x57\x5C\x57\xED\xC5\x83\xAF\x13\x7A\x6B\xDB\x5E\xE5\x5C\x11\xE3\xAB\xE5\xF7\xA9\xBA\xD9\x4E\x19\xE7\x64\xF7\xE6\xA6\x26\x5A\x00\x4E\xB7\x3B\xFB\xE6\x57\xB9\xFA\x9F\x79\x09\x26\x8D\xB3\x5E\xAE\x3B\xEB\xF5\x21\x67\x11\x23\x47\xBC\x58\x3D\x7A\x94\x6B\xF4\xD4\x6E\xFB\xCF\xBD\xAC\xDD\xFE\x0F\x84\xD8\xB8\xEC\x9F\x39\xF8\x2B\xC4\xE5\x28\x1C\xB6\xBF\x21\x64\xF7\xD1\x98\x7C\x13\xBE\xC6\x9A\x8E\x5F\xE2\x75\x9C\xF8\x80\x9A\x9C\x21\x3E\x25\x70\xBA\xAC\x4F\x6B\xC5\x1C\xD0\x73\x10\xF3\x41\xCC\x41\xEC\x28\x6F\x12\xB9\x06\x0B\x56\x09\x71\x3F\x4C\x10\xF5\xCC\x7E\x10\xC4\x7D\xB6\x78\xEA\xB8\xD5\x47\x90\xBF\xF4\x11\x0A\xA3\x4C\x19\xCE\x30\xC3\x51\x38\x00\x4F\x68\x3E\x18\x89\x11\x43\xF0\x17\xF7\x32\x65\x40\xE3\x1E\x35\x46\x39\x7A\xB1\x30\xB4\x21\x21\xAA\x6B\x0D\xC8\x3D\xAE\x73\x27\x3B\x4A\x47\xA1\x60\xE1\xD6\x4B\x21\x71\x3D\x65\x5C\x97\xC7\x30\x3F\xBC\xC7\xF0\x08\x2E\x3C\xAB\x31\xAD\x99\x59\x16\xC2\xFF\xDC\x7F\x79\xDF\xC7\xED\x55\xAD\xF5\x3B\x8F\x39\x7B\x35\x5F\x3A\xC5\x3F\xAE\xC6\xF2\x47\x86\xAA\x7E\x1E\xEF\x5A\x8D\x45\x07\xAA\xBE\x2C\x9F\xA6\x54\x95\x52\xE8\xCA\x99\x5D\x42\xF9\x7A\xDD\xA9\xAB\xB1\xA0\xE5\x2E\x21\x7D\x11\xAE\x14\x15\x11\xE5\x66\xF1\x8E\x9F\x1C\x73\x2A\xDC\x69\x8D\xDF\x7E\xCC\x99\xAB\x9B\x0C\xD7\xE6\x9A\x43\x5F\x51\xA8\xE0\xD1\x74\xE3\xF0\x5C\x34\x3A\x6E\x24\x56\x52\x90\x20\x82\xDA\x99\x7C\x2A\x1A\x33\x72\x04\x14\x28\x52\x3C\xE5\x31\x67\x1F\x8D\x28\xC8\xA8\x96\xB6\x3A\xD6\x6D\x82\x8D\xCE\xBF\xFD\x2B\x1F\xFB\xF3\x9F\xEE\x5C\x16\xEA\xEC\xB4\xD2\x71\xF2\xF9\xAC\x7A\x33\x27\x85\x45\x42\x4B\x1E\xFD\xB1\x16\x47\x0C\x48\xA0\x3A\x02\x26\x22\xE6\x18\x3D\xD0\x4B\xA1\xB0\x6F\x8F\x3B\xBD\x04\xB2\xD9\x87\xBA\x31\xCE\x4E\x70\xAB\x94\xEA\xED\xE4\xBB\xC0\x9C\xD9\x25\x44\xF8\x4E\x63\x2A\x14\x3E\xE8\x1F\x53\x72\xE1\xE2\x93\x71\x86\xFD\xC2\xE5\xCE\x2E\x61\x1A\x32\xF4\xB8\xEC\xA7\x4F\x13\xE2\x66\x4C\xDC\x35\xF4\xB7\x71\xC9\xAE\x71\x6B\xD6\x0E\xC4\x57\xA9\x74\x9F\x97\xDB\x49\x65\xCB\xF1\x74\x7D\x1C\xA7\x0C\xC3\x69\x2D\xCE\x40\x44\x6F\x9F\x87\xA1\x55\x71\x9C\x06\xE0\x31\xC8\x23\x4B\xA1\xA4\xAC\xD3\x20\x1C\xF6\x53\x20\x05\xEF\xCC\xE6\x4E\x77\x54\xAE\x1C\xF7\xD5\x76\x09\xB1\x3A\x81\x25\x8B\xB6\x5E\x57\x79\x88\x36\x68\xE6\x88\x00\xA7\x97\x9C\x72\x76\x30\x4A\x5C\xE3\x30\x64\xC7\xA2\xB1\x45\x35\x08\x25\x7C\x5B\x7D\x42\x6A\x35\x95\xCF\x97\x53\xAF\x28\x33\x1A\xA6\x4D\x46\xF3\x4D\xB2\xC8\x92\x5C\x57\xAA\xBE\x9E\x75\x3B\xDA\xF1\xC2\x8C\xB7\x4F\xA1\x13\xE7\xF5\x9F\x9D\x0C\x65\x7C\x95\x03\x85\xD2\x49\xAF\x0F\x25\x09\x59\xDF\xEC\xC4\xF7\xA9\xFA\xE7\x5E\xD6\x1A\x96\xA1\x26\x91\xA1\xE6\x0D\xB4\x84\x88\x86\xBD\x8E\x80\x0B\xF9\xAC\xB0\x5A\x4A\xEF\x50\x8A\xFD\xCB\xB5\x75\x54\x72\x96\xA8\xC2\x90\x65\x78\xB0\xC2\x90\x82\x41\x1F\x5A\x6A\xB9\xF7\x08\x52\x55\xFD\xA1\x95\x45\xAF\x07\x53\xE2\xB7\x18\x86\x4E\x59\xA5\x59\x67\x65\xDD\x91\x76\xC5\xC4\xAC\x6A\xC7\x99\x0E\x79\xB7\x53\x85\x02\xA5\x3A\x18\xA0\xB4\x9D\xC9\x35\x19\x30\x5F\xAF\x0B\x85\x0F\x74\x74\x08\x27\x65\x3C\x7A\xFD\x8F\x10\x1D\x24\xB3\x24\xC9\x2C\x51\x32\x8B\x28\x99\x29\x98\x47\x71\x3E\x69\x4D\x8A\x5F\x46\x4A\x9F\xD2\xFB\x62\x82\x56\xFA\xDE\xC2\xF7\x8C\x4C\x4B\x66\x21\xC9\x99\xDE\x1C\x10\x98\xC2\xBF\xCD\xBB\x16\x16\x0E\x1C\x69\x63\x29\xE3\x03\x86\x75\x69\x8C\x3A\xC0\xD5\x9B\x16\xFD\xB0\xE4\xE3\x23\x5A\x26\xD0\xE9\xA6\x1B\xE5\x2C\x86\x4E\x72\x4C\x42\x6B\x83\x1E\xE1\x32\x97\x7E\xCB\xB9\x16\x6D\x48\xD4\x18\xA2\x4E\xD0\xA9\x4F\x79\x46\xE2\x0D\x1C\x6F\xE5\x42\x7A\xEB\xB1\x89\xE5\x68\xC8\xF1\xB8\x80\x3E\xE4\xDC\x56\xBD\xBE\x8F\xB9\xBF\x73\xDD\x17\x9C\x72\xF9\x91\x60\x25\xCF\xA3\x03\x58\xA1\x2D\x3B\x84\x9E\xD0\x41\xB9\xB4\x71\x83\x10\xAF\x18\x1D\x14\x02\x91\xA2\x43\x52\xA4\x42\x44\x87\x0C\xE8\xC0\xC0\x02\x19\x53\xEB\x65\x2E\xFD\x96\xD1\x11\x72\xAC\x90\x27\xD6\x95\xB3\xE8\x28\x17\xA0\xA3\xAA\xAA\x97\xE9\x7E\x23\xF6\x4E\x39\x53\x1F\xCF\x0D\xFD\x75\xF5\x3B\x51\x35\x15\x5C\x0E\xC8\xD0\x81\x32\x76\x43\x85\xE8\x8C\x99\x33\x4B\x86\xA7\x85\xA0\xEC\xAB\xAF\x97\xA8\x2B\xCB\xA3\x7B\xD4\xCD\x92\x76\x88\x6F\x85\xE9\x73\xB3\xEC\x42\x59\x04\x3A\xBE\x74\x33\xDF\xF5\xF2\x28\x7C\x4B\x2F\xF0\x59\x26\xCA\x10\x9C\x1C\xBE\x08\xC9\x1E\x56\x00\xF6\x7A\xAF\x7E\xB5\xA4\xDF\x66\xAF\xBE\x09\xDA\x91\x17\x8B\x97\x4B\xDA\x58\x7A\xA9\xA4\xED\xA3\x1B\x24\x29\x22\xB7\x52\x55\x1F\x58\x68\x6E\x96\xAD\x20\x09\x4B\x39\x71\xE5\x32\x80\x2B\x28\x07\x1A\xE5\xAF\xAD\x3B\x32\x24\xA5\xBF\x41\x76\xF5\x7F\xB9\x1E\x4F\xB8\x2C\xAB\xDD\xFB\x03\x6A\x9C\xAA\xFF\xE0\x7A\xD4\xF3\xB1\xC1\x60\x85\x43\x3F\xFD\x49\x90\x3B\xA3\x7D\x8D\x75\xE8\x61\x6C\xC1\x93\xF1\x7A\xD3\x93\x63\xF7\x4C\xD5\xAE\x21\x45\x52\x5F\x6D\x38\x6E\x68\x28\xBF\xDC\x0C\x45\x38\xBF\x9C\x99\x49\xB6\xC1\xDB\xC9\xE7\xC7\xFC\x1A\x3B\x29\x9F\xB0\xC3\x52\xC5\x34\x37\x31\x01\xD8\xFA\xC5\x62\x27\xDE\x23\x5A\xD0\x53\x67\x22\x29\x62\x34\x9B\x27\xB1\x67\x62\x1C\xAB\x5A\x56\x0E\x53\x4D\xC2\x62\xBD\xD3\xCB\x23\x4B\xF5\x97\xE9\xC8\x23\x63\xC5\xB4\x98\x16\x84\xF3\x8B\x85\x23\x04\x33\xD9\x41\x64\x0C\x6B\x56\x2E\xE4\xEB\x67\x4F\xA6\xA2\x38\x04\xCC\x6E\x29\xD3\xF4\x20\x05\xDE\x03\x80\x35\x3F\xE5\x44\x21\xA0\x59\x8B\x24\xCE\x39\x24\x0A\x51\xCB\xAA\x46\x94\xD0\x5E\xCF\x2E\x51\x0C\x74\x8B\x3D\xEA\x22\x68\x9B\xB4\x52\xB8\x5C\x76\x0A\xAC\x03\xE3\x61\x5E\x7C\xF8\x7A\x42\xA8\xA9\xBF\x42\x09\x35\x98\x1D\x34\x36\xD4\xFB\xE6\xA3\x0B\x52\xF5\x5E\x39\xC0\x51\xEF\xB2\xDF\x1D\x0B\x64\x81\x22\xE3\x77\x26\xE1\xAB\x89\xBF\xEB\x8E\x24\x15\x8B\x4B\xE6\xAC\x17\xEC\x53\x54\x89\x4F\x91\x4D\x23\xD5\xFB\x14\x55\xF4\x29\x02\xBF\x92\x4F\x11\x59\x0B\x33\xB7\xB4\x1A\x48\x22\xA2\x1A\x88\xB4\xD0\x33\xD3\xD9\x52\xDA\xF2\x71\x9C\xCD\x30\x61\x2C\x7B\x95\x51\xB5\x40\x22\xC8\xB0\x1D\x00\x42\x16\x85\x9B\x48\x82\x91\x2C\x2E\x7B\x88\x7D\x3A\x59\xF4\x60\xAC\xD7\x22\x7B\xBC\xC9\x1E\x6F\xBD\x19\xAF\x97\xD5\x0E\xAE\x54\x10\x26\xFD\xCE\x38\xE7\x6B\x4A\xDC\x0A\xD3\x7D\x4C\xB3\x9D\x53\x28\x6D\xF3\x5F\x15\x71\x99\x46\xE1\x23\x42\xE2\x62\x1E\x3C\x9B\x5F\x14\x5A\x27\x09\x2A\x9C\xE6\xB4\xEC\xEE\xA6\xA5\x82\xA7\xB8\x8A\x11\xFF\x9A\x8E\x11\x26\xB4\x93\x09\xB9\x6E\x96\x7D\x8C\xC6\xC3\x44\x2E\x12\x43\x82\xFD\x10\x9A\x06\xB3\x03\xCF\x26\x62\x78\x10\x76\xFF\x17\xC1\x76\x5C\x88\xCC\x04\xE2\xEA\x4D\x96\xED\x2E\x13\x4D\x2E\x56\x37\xF5\x52\x6B\x38\x71\x6C\x60\xFE\xD6\x36\xD2\x19\x8E\x69\x45\x8F\x90\x69\xB4\xA3\x9B\xE8\x7E\x4B\xAC\x8F\x0C\xAC\x8F\x6C\xA1\xF5\x11\xF3\xD5\x9C\xC9\xFA\x90\x03\xEB\x43\xCE\x58\x1F\x72\x68\x7D\x78\x81\x5B\x0D\x28\x0C\x33\x72\xBB\x0D\x4C\x10\x09\x26\x88\xBC\x9A\x75\xCA\xEC\x59\x30\xD4\x4B\x29\xDF\x24\xAA\x4F\x94\x5D\xB3\x01\x8B\x38\x3B\x44\xF7\x0F\x37\x1C\x7B\x32\xAC\x52\xCA\xAB\x72\xF0\xC4\xA9\xD4\x6F\x86\xA2\x8D\x7C\x81\xA8\xCB\x66\xE4\x89\xCB\x0E\xF3\x91\x56\xF6\x9B\x51\xEE\x47\x90\x35\x80\xF0\x56\x04\x0D\x1F\x75\x0B\xF8\x10\x70\xBB\x7F\xC2\x35\x58\x61\x6D\xEC\x7C\x89\x8A\xBC\xA5\x0C\x71\xAB\x13\xC5\x45\xCA\x30\x7B\xCE\xD8\x56\x78\xAA\x15\xE5\x69\xCD\xB3\x0F\xCC\x03\xD0\xDE\x82\x2E\x4F\x85\xE4\xB1\x26\x05\xD5\xDB\xC4\xC2\x6E\xF3\x80\x83\xBC\x3B\xE2\x8A\x03\xB4\x97\x3C\x03\x6A\x36\x07\xEA\x7E\x82\xF5\x34\x5D\xD3\x06\x88\xD9\x8F\x05\x24\x37\x1B\x8F\xC4\x28\xE1\x30\x1E\x5D\x25\xC0\x82\x95\x22\x31\xF6\xB1\xFA\x5A\xA1\xCB\xA9\x3A\xA1\x9E\x9F\x98\x82\x98\x65\xD7\x04\x1B\xB1\x31\x89\x1D\x68\x97\xDA\x1C\x0B\x22\x5B\xF4\x7D\x21\xCF\x66\x60\xC5\xC4\x2A\xC9\x2A\xE1\xD9\x02\x78\xB6\x88\x3C\x6B\x22\xCF\x62\x69\x1C\x2A\x1C\x7D\xBC\x6B\xFF\x7F\xF6\xDE\x05\x5A\xB3\xAB\x2C\x10\x3C\x7B\x9F\xF7\xD9\xE7\xFC\xFF\xB9\x55\x37\x21\xE6\xCF\x98\xEF\x9C\x85\x6B\x8A\x21\xA5\xB5\xD6\x48\xC5\x4E\x6B\xA7\x76\x99\x7A\xDC\xAA\x40\xB2\x7A\xB5\x6B\xD9\x33\xB3\xD6\xD8\x83\xED\x5A\x9E\x5B\x84\xDC\xAA\x32\x44\x29\xEA\x5E\x2A\x01\x32\x8A\x4A\x37\x0C\x82\x0D\x43\x51\xA1\xE7\xA6\xD1\x2B\x74\x0B\x42\x37\xAF\x12\x45\x79\x49\xA2\x51\x01\x65\x46\xC0\xA8\xD8\xA0\xC6\x77\x54\xA4\x66\x7D\x8F\x7D\x1E\xFF\xE3\xD6\xCD\x53\x83\xC9\x52\xEA\xFE\xE7\xEC\xBD\xCF\xDE\xDF\xFE\xF6\xB7\xBF\xF7\x17\x4E\xE3\x6C\xD8\xC3\x59\x2D\x38\x1B\x9C\xA2\xBA\x96\x82\xB3\xFC\x8B\xE7\xDB\xE2\x2C\x3E\x41\x1C\x0D\x5A\x1C\xD5\x88\xA3\xFA\x54\x9B\x20\x76\x8E\x0E\x30\x60\xAC\x4D\x58\x21\xC4\x7E\xF7\x49\x95\xE2\x03\xAE\x80\x1C\xDE\x5E\xA5\xA2\x92\x49\xFB\xF5\xA9\x99\xF7\xD3\x6D\x92\xF1\x15\xA7\xAD\x66\x35\x7A\xC2\x18\x3B\xA5\xD9\x09\x20\x3D\x52\xF8\xE2\x58\x2C\xB7\x23\x82\xBB\xCE\x18\xE0\x66\x42\x22\x2D\x4D\xCE\x20\x6C\x49\xFE\x0C\x72\xAA\x49\x4B\x51\x63\x54\x29\xA5\x94\x4B\xB9\x01\x33\x41\x29\xD8\x61\xA4\x48\x6D\x92\xCC\xDD\xA1\x44\x0C\x81\x43\x89\x4C\xFC\x3B\x8F\x14\x9D\x31\xC8\xAD\x83\x9D\xEE\x48\xF9\x98\x4F\xAF\x04\x02\xC8\xD7\x70\xEE\x7A\x07\x73\x07\x23\x65\x5F\x70\xBE\xC1\x76\xF3\x0D\x58\xE9\x16\x89\x0D\x3B\x78\x94\x93\xF7\xDD\xE4\xC9\xD3\xF9\xAA\xF6\x42\xF2\x59\x95\xD9\x57\x78\x06\xF8\x68\xE4\x92\xA9\x3B\x9F\x6F\x57\xD8\xA6\x6C\x60\xC4\xCE\xEC\xC5\xF5\x48\x9E\x1F\xAF\x92\x32\xB8\xBC\x92\xB2\x15\xB0\xF1\xBA\x87\x80\x10\x65\x52\xFE\xF5\x36\x4A\xCA\x64\x4D\x02\x17\x10\x29\x6F\xE9\xA2\x44\x04\x45\xFB\x4A\x4A\xF6\x68\x4E\x9C\x92\x52\xF7\x94\x94\x94\xEE\x20\x69\x95\x94\x7A\x1B\x25\x65\xC2\x4A\xCA\x64\x67\x4A\x4A\xDD\x57\x52\x6A\x51\x52\x06\x33\x4A\xCA\x80\x95\x94\xD4\xE0\x17\x92\x2E\xEE\x07\x56\x6B\xBF\x0A\xA0\xF3\xA7\x08\xB7\xE5\xD1\x9D\x3F\x85\x7F\x84\x43\x61\x88\xC1\xBE\xAA\x29\xB7\x58\x90\x1B\xD8\xD9\x58\xBA\x12\x87\xE7\x70\xBE\x74\x15\xB5\xBC\x7C\xC0\xC5\xF2\x5D\x46\x88\x04\x02\x20\xBF\x2F\xF2\x41\x88\xFA\x89\x21\x12\x7A\xC6\xBC\x3C\xBF\x6D\xC5\xAA\x5E\x8A\x08\x9A\x5D\x27\x5A\x05\x7B\xC8\x1F\x41\x35\x10\x30\x33\xE1\x38\xF9\xF8\x50\xE1\x97\xF7\x4B\x62\x78\xBE\x37\xE3\x3D\xFA\xBB\x39\xDB\x75\x6A\xFD\xD3\x76\x8C\x33\xED\xE5\x0B\x84\x54\xAE\x43\xEB\x51\x7E\x79\xE4\x54\xFE\x9D\xF3\x33\xA0\x6C\x27\x54\xEC\x88\x53\x0B\x53\x02\x95\x8C\x42\xCD\xA5\xB0\x6C\x85\x53\xCB\xFD\x03\xC0\x71\x8E\xDC\x8A\x18\x39\x84\x5B\x17\x3A\xE8\x18\xBA\xC8\x70\x34\x64\x80\x67\x12\x19\xFE\x55\xCE\xC6\x8F\xF8\x95\x96\x9F\xBD\x9B\xEF\x44\xF1\x7D\x85\xFC\x88\x94\x55\x75\x46\xF1\x3D\xBA\xE4\xF2\x09\x3E\x64\xE0\xAF\xD6\x9E\x0D\xC0\xB3\xC1\xD1\x49\xF9\xB6\x57\xB0\x6A\x8C\x02\xA9\x2F\xBC\x42\x6A\xD0\x70\x46\xC0\xC2\x6E\x6C\xFC\xA2\x77\xB4\xF0\xEC\xDF\x5F\xF2\xAB\x02\x02\xFC\x77\xBF\xF6\xAC\xC2\xF9\xF8\x10\x35\x15\x25\x2B\x47\xFA\x50\xE0\xBB\x23\x24\x9B\xE0\xF2\x48\x45\x94\x3D\x9F\x6F\xDD\xB4\x7C\x80\x40\x5B\x80\xAE\x7C\x30\x54\xFF\xC1\x97\x34\xA2\x94\x41\xDF\xAA\x86\x03\x34\x38\xE5\xA8\x48\x36\x75\x2E\xDB\x56\x40\x0E\x5E\xF9\x29\x72\x9F\x97\xDA\x3A\xAC\x42\xC2\x4B\xBF\x8E\xC8\x5A\x05\x85\xBC\xD2\xA2\xAD\x0C\x9A\x3A\xAF\x3C\xC8\x71\x16\xCE\x80\xD7\x31\xB3\x4F\x02\xF4\x55\xEB\x7B\xDC\xC1\x5F\xC4\x05\x01\xC0\x36\x33\x54\x2E\x88\xE9\x1F\x6E\xAB\xA8\xF0\xB2\x88\x7E\xC9\xD5\xC6\x1D\x11\x49\x5C\xDA\x73\x07\x6A\x25\x84\x27\x4C\x61\xFA\xC8\x3F\x3E\x85\xE9\x23\xAC\x30\xFD\xD5\x48\x8D\xD7\xE7\xAA\x4B\xF5\x53\xA0\x2E\xA5\xF2\x99\xA2\x2E\xA5\x2B\x22\x74\xEA\x52\xE1\xF7\x43\x51\x97\xF2\x92\x06\xEA\xCE\x4E\xAD\x29\xEA\x52\xC2\x93\xE8\x50\xA1\x44\x15\x88\xDF\x3B\x5E\x25\x5C\x8B\x25\x10\x68\x27\x84\x78\xF1\x1A\x4B\xA5\x3E\x8F\xE8\xE3\x88\xBE\x4B\x0D\x45\x2E\x75\x51\x15\x4C\x69\x0C\x7D\xD1\x18\x6A\xD1\x18\x46\x48\xF0\x44\x43\xAB\xF1\xAF\x1C\xA2\x96\xF1\x36\x8E\xF1\xAE\x72\x11\x10\x22\x9B\x36\x55\x01\x51\x35\x22\xF6\x27\x07\xBF\xCE\xB0\x4F\x36\xAD\xC3\x40\x86\x68\x24\x3E\xB9\x90\xE3\x97\x32\xA4\xEA\x99\xF0\x38\x64\x2C\x29\x2A\x0D\xA6\x1A\x33\x23\xD5\x33\x90\x50\xC7\xB8\xB5\x91\x70\xDE\x69\x18\x23\x15\x19\x13\xFE\x67\xD5\x08\x87\xEB\xB4\xAC\x3E\xCE\xC1\x77\xF3\xE6\x59\x8F\xC8\x4E\xC1\xDF\xC6\x55\xE6\xA0\x8F\x50\x0E\x96\xA4\xA9\xC6\x30\x3A\x54\x8D\x70\xB9\x65\x55\xE2\xDB\x5C\x1B\x6A\xA4\xC9\xA7\x83\x16\x25\x4A\xEB\x39\x43\x4F\xA4\xFE\xB9\x64\x2D\x8A\x28\x57\x7D\xC8\xDA\xD3\x12\xC6\xB4\xBE\x9A\x58\xBA\x11\xB6\x86\xDC\x89\x24\x2E\x58\xCE\x77\xDE\x8B\xD5\x98\x87\x26\xAF\xDC\x02\xA7\xA4\x69\xA6\xF4\x51\x72\x7B\x2D\x26\x47\xB8\x6E\x0A\xBE\xA1\x48\x6E\xD1\xB3\x66\xDB\xAA\x5E\x23\x66\x32\x08\x97\x44\x9A\xD9\x89\x26\xDA\x8C\x5B\x4D\xF4\x08\x09\xFE\xFF\xBE\x66\x3F\xB4\xDE\xD8\xAB\x57\x8C\xC1\x9F\xF2\xF7\x3E\xA5\x5D\x02\xE1\x97\x34\xB5\xB2\xCF\x3A\x4E\x15\x63\xB4\x55\xA0\x4E\xDB\xFF\xB0\xB1\x11\xAC\xD1\x15\x6F\xCD\x61\x12\x41\xCD\x5F\xF8\xBE\x3A\xEB\xAF\x47\x67\xC0\xFB\x40\xAD\x36\x5B\xF7\x12\x75\x10\xEE\xD9\xAC\x03\x7B\xE9\xD2\x8D\xC7\x44\x46\xB8\x74\xE9\xD2\xA5\x78\x0D\xF4\xC9\xC2\xBB\xC9\xA3\xFF\x6E\xFD\x05\xEB\x3D\xD7\xDF\x78\xEF\x5D\x1B\xDE\xBD\x63\x4E\x70\xBA\x7E\x8C\x8E\xBF\xF7\x56\x69\xE2\xE1\x1B\x68\x3B\x7C\xE7\x85\xDA\xFB\x00\x0E\x5D\x05\xF6\x18\x57\x17\x0A\xEC\xC6\xC6\xC5\x4B\xF1\x51\xEE\x77\xDE\xC5\x40\xB5\x1F\xA1\xD2\x4D\xDC\xC8\x3F\x4C\x21\xF3\x76\xF9\x07\xC0\x6F\x68\x46\x37\xAE\xD5\x81\x7D\xE8\x83\xDF\xD4\xD4\xDA\x2E\xDF\xD1\xD8\x97\xDF\xD9\xBC\xA7\x8E\x6E\xF2\xBE\xE7\x2D\xB7\xEE\xBE\xE9\xC1\x1B\x2F\xD4\x31\x69\x50\x2E\x7A\x6B\x10\x9C\xE4\x31\x4E\x51\xB4\xA5\xF7\x81\x83\xB8\x9E\xF1\x5D\x77\x7F\x48\xBE\xF3\xF0\x87\xCE\xD7\x34\x05\xF9\xFD\xF9\x1B\x2F\x5C\xA8\xC3\xB7\x7E\x80\xF4\x4A\xB7\xDF\xF5\xA1\x3A\xB9\xC9\xF3\xE0\x8A\x97\x7E\xE2\xB7\x6F\xBC\x50\xA7\xE7\xEB\x0C\x52\x88\x21\x7B\xEB\x79\xF0\x20\x79\x2B\x84\x6F\xED\xF5\xF5\x0E\x9C\xBF\xB7\x26\x93\x04\x78\x38\x87\xF8\x82\x0C\xFD\x96\x5F\xBA\xF3\x81\xF4\x27\x7F\xE1\xC6\x0B\x37\xBD\xE3\xCE\x1F\xFB\x1F\x3E\xB2\xF6\xC9\x1B\xCF\x5F\xB8\x29\xF8\xDB\x9F\x78\xE3\x1B\xDF\xF8\xD9\x1B\xCF\x5F\x80\x18\xB8\xDD\x4D\xFF\xF2\x5F\xFC\xCE\xAE\x3F\xFD\xF0\x8D\x17\x6E\xFA\xFF\xFC\x4F\xBC\xFE\x7B\x0F\xFF\x32\xB6\xFB\xD7\xAF\xAD\x5F\xF7\xEC\x95\x5F\xC3\x3F\x5F\xFB\x5D\xF8\xDF\xEF\xDD\x78\xFE\xC2\xF9\xF3\x17\xCE\xD7\x5E\x37\x35\x88\x6E\xDA\xFF\xDC\x3F\x5D\xFA\xF3\xAF\xFD\xEB\xEF\xB8\x80\x33\x3B\x7F\xD3\x6F\x6C\xFD\xE4\xC7\xBF\xED\x75\xEF\xFB\x8E\x0B\xE7\xCF\x9F\x3F\x4F\xE4\xDA\xFC\x33\xA5\x7A\xC1\x42\x81\xF3\xF2\x62\x55\x1E\xF1\xD9\xE0\x97\x5F\x13\xDD\x65\xD0\x2F\xD7\xFF\x53\x57\x87\x66\x5D\x9D\x55\x28\x75\xF7\x98\x5F\x19\xC2\xBE\x8A\xFE\xF7\x1E\x56\x6A\x4F\xA4\x34\xE0\x3A\x68\xAB\x9B\x2A\xE6\x52\x92\x2C\xA5\x73\xB2\x05\xFB\x4D\x47\x48\x44\x4C\xF5\x01\xFF\x00\xE2\x69\xCF\x95\x38\x46\xA2\x92\x5D\x87\x6D\xED\xF7\x34\xA3\x25\x1D\x45\x7E\xC4\xFF\xA9\xC8\x8B\xA2\x20\x32\x78\x67\xE6\xBE\xB1\x41\xA5\x73\x6D\x6C\xC2\x09\xB2\x4B\xC4\x6C\xB1\xDE\xA6\xA0\x4F\x56\x69\x2E\xE2\x39\x24\xF6\xD6\xC6\x5E\x52\x6B\x36\x3D\xCE\x3E\x67\x14\x1F\x91\x9D\x80\x04\x9F\x36\xF6\x56\xA4\x66\xD9\x5E\x4F\x55\x09\x64\x14\xB8\x59\x65\xBD\x4E\xCF\xE7\xF3\x6A\xB9\x30\x5D\x62\xFF\x27\x09\x5B\xA7\x1C\x29\x56\x9F\x84\x94\x4D\x3C\xDA\x7A\x47\x27\x55\x8A\xB7\xA2\xBD\xF6\x07\xEA\xA4\x81\xE4\x14\x15\x4C\xA3\x5B\x2A\xDE\xEB\x79\x55\x42\x5E\xEF\x32\x29\x1C\xFC\x9B\x29\x3C\xB7\xA2\x28\x55\x9A\x7C\x06\xA9\xDD\x48\x4E\x92\x9F\x4E\xBC\xD7\x53\xF5\xF4\xE4\x91\xA6\x99\xE1\xE4\xCD\x60\xF2\xF9\xBC\xC9\xB7\x1F\x94\xC9\xD3\x07\xD9\x4E\x67\x3D\xAA\x61\x36\xA9\x70\x06\xBA\xA9\xF2\xDE\x8C\x75\x95\x0C\x62\x45\x20\xE1\x02\x04\xF6\x0D\xEB\x8D\xBD\xF6\x25\xA3\x4C\x69\x2F\x08\x02\x1F\xFF\x8F\xEE\x15\x9E\xF1\x57\xE8\xD2\x40\x01\xAA\xCA\x71\x3D\xB4\x1B\xA1\x81\x9C\xC7\x4F\xED\x46\x84\x4F\xFC\x5E\x97\x2F\x4D\x75\x51\xD8\x20\xE8\x75\xB9\x48\x4F\xB4\xE9\xB5\x09\x4E\xB2\x30\x39\xFC\x90\xC1\x21\xBD\xA9\x89\x4F\x2F\xA0\x8E\xED\xFF\xDA\x8C\xFE\x55\x34\x9B\xB3\x79\xF0\x9F\x56\x7A\xFA\x91\xD7\xFD\xE5\x7B\xF8\x9E\x1F\x78\x61\x1C\x07\xF8\xB7\x17\x23\x4A\x16\xB6\xAC\xE2\xC1\x67\x2F\xA9\xB5\x3A\xB3\xF7\xAD\x37\xA3\x4A\x39\x12\xE7\x69\xAF\xF7\x9F\xEF\xFE\x50\x94\x90\xE2\xC5\x6B\x55\x6A\xB3\x2A\xC6\xBF\x01\x91\x2C\xB3\x9F\xF1\x0E\x23\x82\xC5\x55\x01\x99\x7D\xD3\x7A\x33\x8A\xB4\x52\x4A\x69\x85\x67\xA2\xC0\x86\xA7\xD6\xAA\x14\x0A\xEE\x74\x76\x0D\x52\xC2\xA6\xB5\x89\x78\xF7\x49\x27\x0F\x3B\x79\x2C\x95\xDB\x8D\x80\xB8\xC1\x04\x12\xFB\xE2\x35\xF0\x6D\xDC\xD8\x3B\xD7\xEA\xD1\x73\x7C\xAF\x1E\x1F\xF4\xFE\x65\x9D\x4E\xAA\x02\xC6\x07\x6F\xBC\x67\x53\x54\x0A\x29\xE7\x09\x81\x02\x47\x26\xB7\xFA\xAD\x6A\x09\xC6\xD8\xE6\x55\xF5\xAE\x33\xB0\xEB\x15\xD5\x18\x0A\x0B\x6B\xF6\x0B\xDE\x29\xFB\x57\xC8\x31\x58\x8F\xF8\xA5\x00\x92\xC6\xEE\x03\x1F\x1B\xC3\xD2\x46\xBD\x0B\x96\xCE\xBE\x6C\xB3\x4E\xED\xD7\xD4\x9A\xCD\x56\x26\x90\x36\x28\xB6\x70\xBA\x03\x48\xEC\x37\x1E\x2F\x14\xB6\xFC\x5F\xF0\x54\x54\x09\xEC\xAA\xC6\x90\x11\xBF\x31\x62\xDE\x2E\xA4\x54\x62\x0A\x02\x48\xA1\x84\x18\x90\x03\x28\xCA\x97\xB3\xEC\xC1\x68\xA1\xF3\x91\xAC\x53\x3B\x9F\x5A\xAF\x4E\xED\xB5\x77\x54\xBB\xDD\x9A\xD3\x49\xB5\xDC\x5B\x59\x06\xCB\x6E\x65\x08\xBE\xF6\x24\xA7\xB0\xDC\x5F\x54\x39\x5C\x54\x89\x8D\x21\xBE\xAD\x2E\x20\x3E\xB1\x5A\x67\xED\x9A\xB2\x6E\x4D\x99\x5B\x53\x0A\xF1\x2D\xC8\x31\x55\x09\x14\x55\x0A\x23\x29\xFD\x16\x0C\xD7\x94\xC1\x6E\x59\xD3\xF2\xBC\x35\xD1\xAA\x2E\x7A\xBC\xAA\xEB\x3C\x49\xDD\x22\xEB\x2B\x4F\xDB\xF2\x07\xC8\x6E\xBE\xA1\xD6\x26\x94\xBA\xE5\xE9\xBE\xE6\x62\xE7\xF8\xBA\x0D\x9E\x3E\xE5\x88\x69\xBD\x45\x88\x59\x0C\x10\xD3\x2D\x88\xF6\x6F\xE1\xEE\x3C\xD5\xDB\xD1\xCE\x7E\xEE\x76\x0C\x50\x70\xEF\x14\x0A\x5E\xBA\xE4\xAF\x0D\x30\xF0\xB2\xA8\xF7\x8F\x67\x71\xB9\x69\x31\x2B\x1E\xEE\x68\xFC\x5C\xDF\xA3\x3E\x42\x94\xF9\x56\xFD\x25\xEF\xF0\xA4\x3C\x37\x1C\x43\x2E\x5D\x38\x49\x2D\xFE\x80\x0A\x0C\xA5\x76\xA3\x3C\x29\x7A\x4F\xE9\xFA\x3E\xAA\x0D\x80\xE4\x98\x86\x44\x40\x5D\xF6\xD3\xE5\x5D\xC3\x4F\x65\x14\x6E\x49\x29\xAA\xF4\x5A\x8D\x70\x45\xA9\x85\x6D\x52\x5C\x02\x9D\x4D\x21\x94\x97\x27\xF6\x0F\x58\x90\x1C\x8C\x14\xF4\x7E\x17\x07\xBD\x27\x52\x7A\x90\xBC\x98\xF1\x25\x69\x40\xAE\xF3\x3C\xF9\x7C\xDB\x30\xE2\xD2\x1B\x02\xCF\x0C\xF4\x2D\x45\x08\x29\x69\xF9\x21\x5B\xED\x8D\x3F\xDB\x87\xB8\xBB\xB8\xF0\xF2\xC8\x08\x9E\x94\x78\x5A\xA1\xA4\x4A\x3C\x14\xE1\x67\xD7\xC1\x4C\x58\xD0\x2E\x09\x01\x68\x6B\x0B\x4A\xD9\xE1\xEA\xB1\x26\xC8\xEE\x8C\xA0\xA8\x12\x18\x21\x10\x69\x7F\x0B\x28\x57\x6B\xDC\xCF\x18\xCC\xCA\x84\x52\x64\x6C\x24\x24\x75\x16\x3D\xC8\xE8\x03\x50\x00\xC1\xA5\xA0\x71\x59\x0B\x5C\xAC\x3A\x08\x15\x8F\x0E\x42\x34\xF3\x2A\x33\x74\xB9\x5E\x72\x19\x2B\x47\xB8\x92\x92\x78\x51\x9F\x2C\x0B\x1A\x32\xE6\x3B\x16\xC0\xC5\xE0\xBA\x0C\xF3\x88\xAE\xD8\x10\x52\x14\x92\x13\xDB\x81\xB6\x1F\x63\xA6\x73\x87\xF3\x05\xEE\x11\xE5\x1E\x0B\x78\xB1\x3B\xDA\xA3\x90\x72\x68\x55\x27\x29\x59\x8F\x23\x4D\xF1\x44\x8A\x1C\xC5\x9C\x03\x06\x32\x39\xBD\xC2\x57\x07\xA0\x1B\xBB\xCF\x5E\x0F\xB1\x1D\xF1\x01\x8D\xED\x78\xAD\xE1\x02\xFE\x8A\x11\x91\xCE\x68\x6C\xC7\xC7\x91\x98\x56\x1A\x62\x1B\xDC\x81\x5C\xFB\xFC\x63\x9A\x58\xCF\x96\x60\x6C\x02\xD9\xF4\x31\x8D\x8D\xFD\xA6\xA9\x55\x48\x42\x96\xC1\x33\x23\x4B\x33\xAE\x9F\x4F\x69\x89\x28\xA3\xE9\x7A\x83\xFF\xAC\x4C\xFA\x80\xE8\xA9\xEA\xA2\xB1\x71\x2C\xE3\x34\xB8\x04\xD0\xA2\x0C\xE1\x2F\x50\x2D\xAE\x2B\x25\x14\x84\x14\x72\xA0\x1B\xF0\x6E\xF0\x50\x14\xF1\xCC\xEF\x84\x0A\xA5\x21\xC8\x10\x19\x8D\x28\x99\xD3\x2E\xEF\x9A\xAB\x45\x85\xBC\x5E\xBE\x06\xD1\x49\xEB\x1D\x59\xED\xCC\x24\x21\x50\x0E\x9A\xD0\x5E\xFB\x7C\xD2\x8F\x56\x92\xD2\x2F\x44\x02\x19\xDA\xF5\x53\x90\x34\x75\x6E\xAF\x85\x70\xB5\x2E\x20\x87\x62\x65\x62\x95\x53\xAF\xFB\x07\xB8\xEC\x6B\x8E\x83\x28\x1C\x64\xA5\x2A\x20\xAF\x42\x28\x24\x08\x25\xE3\x68\xB5\xDC\x65\x91\xCA\x21\x5D\x19\xFC\xC6\xEF\xBA\xDF\xB4\xD5\x79\x63\xF7\xE1\xCE\xE6\x5C\xFB\x3E\xC5\x81\x73\x1E\x38\xAC\x72\x37\x30\x0D\x5D\xCA\xD1\x60\x74\x21\x2F\x55\x8F\x97\x44\x35\xBA\x20\x3D\xC2\xA6\xA9\xB3\x0D\xDB\x71\x72\xC8\x27\x90\x43\x6C\xCB\xC3\x13\xA7\xA0\x8C\x6D\x79\x44\x06\x81\xB5\x3A\x67\x50\xB0\xF7\x63\xD8\xD8\x47\x48\xE7\x8D\x6B\xE4\x88\x84\x9C\xEB\xD0\x5E\x8B\xC8\x46\x0D\x3E\xD3\x6F\x40\xF2\x96\x3E\x32\x35\xC6\x17\x7B\x4D\xCC\xA0\xF3\xBE\x99\x17\x14\x26\x71\xC8\xF5\xDC\x3B\x18\x9B\xE6\xC8\x8C\x0A\xBD\x7D\xEE\xF0\xCB\x99\x4D\xBA\x77\xD0\x1F\x59\x93\x62\x2F\xB3\xE4\x6B\xAF\xAB\x1C\x08\xAA\x88\x11\xE1\x2A\xD9\x01\x72\x3A\xB9\x0A\xA8\x02\xB7\x43\x45\x11\x80\x0A\x04\x9C\xDB\xCD\xF0\x90\xAB\x03\x5D\xF8\x48\xF3\xC2\xE6\x3A\xAA\x88\x34\xAF\x63\x57\x40\x5A\x0C\xA1\x90\x83\x5E\xA5\x2F\x5B\x0F\xF4\x2A\x15\x8B\x9E\xF9\x6A\xC8\xFB\x9E\x37\x84\x27\xC6\x40\x6E\xFE\x68\xE4\x87\xEB\xFE\x99\xBE\xBF\x4B\xD2\xAB\x16\x14\x40\xF0\x6F\x28\x14\x51\xA3\x98\xA2\xFB\x06\x79\x4D\x6E\xC5\xB4\xEC\xC8\xBE\x0C\xEF\x72\xDD\x7E\x35\x1C\x7C\x95\x20\x05\x19\xE9\x09\xC8\x86\xFA\xC7\xDE\x54\x1B\xFB\x05\x7A\x42\x6A\xBF\xEE\x21\x37\xB3\xBA\x19\x8E\xE6\x53\xDA\x24\xC2\x15\x5C\x73\x74\x4B\xA1\x78\xCD\xD9\xBC\xAF\x67\x52\x3B\xB9\x21\x2B\x96\x48\xD6\xC1\x4D\x97\xE8\xBF\x3F\xB9\xF4\x42\xAB\x4E\xCD\x2E\x30\x9C\x59\xE0\x99\x47\xB5\xC0\xBD\xD3\xEB\xFB\xC3\x9D\xAE\xCF\xFE\x3E\x3F\xF4\xA7\x16\x1D\x3C\x81\x8B\x5E\xFF\x5E\x5A\xB4\x0D\x2C\xE5\x0D\xA2\x48\x85\xCC\x9E\xFF\x18\xEE\xFF\x7F\xFC\x18\xD9\x3B\x26\x95\x11\x60\xC4\x0E\xB3\x43\xC8\xF0\xB3\x14\x17\xB4\xCA\xB0\xDA\x06\x18\x29\x02\x83\xEF\x07\x82\x6D\xCE\x37\x66\xEA\x1F\x40\x5A\x29\x98\xBD\x2D\x14\x85\x1C\x3C\xC6\xD5\xD2\x7A\x51\x34\xFF\xB7\xBF\xF2\x2B\xF6\x7B\xF1\x60\xB9\x5F\x3F\xFF\x42\xB1\xAA\x8B\xFE\x8C\x73\x83\x43\x2C\x2C\x1A\x1E\xF7\x56\xA3\x09\xC1\x5B\xF1\x08\xB4\xBF\x5F\x38\x21\x76\x23\x23\xEF\x11\xBC\x78\x51\x54\xAF\x73\x9B\x71\x24\xF7\x3E\xC8\xED\x1D\x4D\x1D\x22\x41\xB7\xD7\x12\x2D\xAF\x33\xA1\xE6\x90\x51\x35\xF1\x9C\x1C\x1F\x06\xE7\x17\x19\x9C\x37\x3A\x65\xA6\x7D\x21\x25\x68\x78\x3B\x5B\x7A\xF0\x20\xDE\xE9\x88\x69\x01\xE1\x7B\xDE\x0A\xB9\xF5\x4F\xDB\x07\x3E\xBA\xB1\xE1\x35\xCF\xF5\xBD\x7A\x74\xA1\x1E\x3B\xC5\xA8\x7D\x21\x8C\xDB\xA9\x7E\x1F\xBB\x00\xFC\x94\xE4\x04\x77\x2C\xEC\x18\xD2\xF7\xBE\xB5\xED\xF2\xF9\x1B\x19\x15\x60\x24\x05\x00\xDF\xFB\x6F\x09\x38\xB2\x19\xEE\x96\xEB\x9A\xBF\x90\x58\x62\xB5\x06\xE9\xA1\x93\x90\x36\xDD\x4D\x97\xB3\x44\x94\xB1\x18\x60\x70\x9B\xAA\x25\x24\x8C\x60\x1A\x0B\x47\x0A\xCF\x42\x95\xE5\x81\x81\x12\x4C\x43\x22\x6B\x76\x5B\xBD\x64\xB3\x13\xAB\x76\xDF\x49\xA4\xA7\x86\x6C\xC5\x90\xDA\xF4\x78\xB5\x0B\x96\xAA\x14\x76\x21\x12\x60\xFF\x9A\x90\x69\x09\xAF\xA9\x5D\xFA\x00\x2C\x21\x3B\x82\x04\xCF\x90\x66\x0C\x4C\x53\x2D\xD1\xB0\xD5\x2E\x44\xB2\x53\x90\xF3\x24\x32\xCA\x14\xB5\x0B\x32\xB9\x0B\x32\x16\x06\x97\x20\xC3\x3B\x97\x58\x9A\x12\xB2\x23\xF8\xD1\x2A\xA3\x74\x0E\x86\xEF\xC2\x5D\xB0\xE4\xD6\x86\x93\xC0\xD7\x94\x64\x03\xD2\xC6\x7E\x33\x8E\x94\x12\x67\xCE\x96\xA8\x90\xF7\xA5\x83\x90\x55\xA7\xD6\xE8\x66\xC8\x2C\xE0\xFF\x1F\x9F\x54\x4B\x86\x53\xBF\xE3\xB7\xF1\x02\x5B\x72\x03\x66\x0D\x79\x41\x64\x2F\xAA\x53\x06\x45\xD3\x4E\x14\x91\x3A\x25\x88\x50\x52\xCA\x5D\xE2\xBB\x44\xE7\xB1\x36\x64\x38\x65\x97\xB8\xCE\x88\x1B\xF5\x31\x15\x62\x9B\x93\x9F\xC8\x2A\xB2\x51\x72\x74\x33\xBE\x39\x13\x82\x09\x1E\x64\xBB\x7E\x8A\xF1\x35\x5B\xAD\x53\xBC\xBA\xFA\x0C\x08\xC1\x2F\x63\x4E\x01\x8F\x20\x76\x5F\xA9\x52\x08\x51\x88\xE6\xCA\x90\x8E\x91\x96\xDB\xB5\xF7\x7D\xC6\x2B\xFA\xD2\xDE\x6E\x51\x7C\xD3\xC6\xD6\xA9\x04\x32\xB9\x69\xDB\xB7\x10\xCB\x4D\x4B\xEF\xA0\x7B\xE7\xE8\x91\x79\x02\x88\x11\x72\x88\x8F\x92\x18\xD1\x1D\xEB\x88\x91\xB7\x63\x62\x64\x0C\xE5\x23\x68\xCD\xC8\xEF\x4B\x62\xB5\xAE\xCE\xA8\xB3\xEA\x0C\xFE\x1B\xCD\xBF\x76\x87\x37\x14\xB1\x1D\xC1\xFF\x41\x34\x6C\x40\xC1\xE7\xD0\xB0\x73\x2D\x0D\xE3\x51\x7E\x72\x8A\x7C\xD5\xE9\x07\xEA\xEC\x20\x8F\x72\x29\xBE\x8B\x6C\x26\x1B\x1B\x1B\x8F\xDC\xD8\x9A\x57\x1E\xF9\xD0\xF9\x9B\xFE\xCD\x77\xBE\xF8\x85\x9B\xAF\xFA\xB5\x1B\x2F\x40\x76\xF0\x5B\xEF\xD9\xB4\x97\xC6\x6B\x64\xB1\xB9\xE9\xEF\x7E\x50\xEC\x35\x37\xFD\xEC\x47\xBF\xE7\x47\xF6\xFC\xF0\x2F\xDD\x78\xFE\x7C\x6D\xA6\x88\x97\x79\x3B\xB1\xB9\x42\xBC\x72\x03\xF9\x7B\x6A\x73\xD3\x95\xDF\xF5\xEE\xF7\xFD\xE7\x55\x7D\xE0\x82\x28\x7B\xCC\x4D\x6B\xF7\xFE\x60\x7A\xCB\x6B\xB3\x03\x17\xDA\xB3\x33\x77\xAC\x51\x37\xD6\xC8\x30\xC9\xAD\xC6\xE4\x96\x52\xC0\xE8\x3D\x37\xFD\xC9\x3F\xFB\xDB\xAF\x1D\xFC\xE6\xDF\xFF\xD0\x85\xF3\xB5\x01\x73\xBE\x35\xEC\x80\x79\x2B\x18\x30\x17\xF0\x29\x38\x8A\x7E\xDD\x81\x7B\xE5\xAF\x67\x1F\x38\xEF\xFE\xBC\xEA\xC0\xF9\x7B\xDB\x1F\x0F\xDF\x78\x1E\x25\xD4\xB8\xD9\x3A\xF8\xAD\xAF\xFC\xD0\x85\xDA\x08\x22\x9B\xB6\xDF\xBD\x95\x11\x2A\x6E\xE8\x12\x18\x4F\xAA\xD0\x06\x16\x79\xAB\xDF\xF5\x1A\xFB\xCB\x6A\x65\x52\x95\x84\xE4\x1B\x8E\x85\x4E\x6F\xDA\xFB\x9D\xD7\x7C\xF9\x8B\xFB\xAF\xBE\xF1\xFB\x78\xB4\x94\x46\xDB\xB8\xFB\x9B\xAD\x0C\x1F\x42\x7E\xAC\x1A\xE1\xB8\xA7\xAA\xB1\xF5\x88\x8D\x1C\x37\xD6\x83\xD1\x84\xB8\x90\x8D\x84\x13\x46\x95\x7C\x80\xC2\x36\x33\x2F\xB6\x23\x99\x23\x44\x22\x3F\x6A\x11\x22\x05\x73\x2F\xA4\x90\x4F\x6A\xF3\x93\x60\x06\x38\x00\xA1\x45\x04\x8E\x50\xFA\x1E\xC3\x18\xA2\xE3\x93\x7A\x0C\xE5\x04\xC6\x74\x3A\xEA\x25\xAB\xEE\x40\x21\xF0\xD2\x8B\xD7\x1C\x3E\x41\x29\x09\x29\xD6\xEC\xEF\x79\xA7\xA6\x51\x3E\x47\xB2\xF9\x03\x75\xDC\x40\x7C\xAA\x8A\x89\x6A\x24\x30\xAA\xC7\x0D\xA0\x1C\x9A\xDD\x56\xEF\xEA\xD1\xF9\x31\x9E\xAC\x11\x8C\x9D\x4C\x99\x1E\xAF\x76\xC3\xAE\x2A\x86\xDD\x74\x5A\xC6\x42\xE5\x51\x26\x87\xD2\x9E\x6D\xEA\xDD\x2B\x64\xCE\x96\xC8\xD9\xB1\xFD\xC6\xE7\xB7\xBF\xF1\x33\xAC\x6A\x1A\xAD\xDA\x97\x35\x75\x6C\xAF\x85\x11\x2E\x2A\x86\xF1\x94\x24\x35\x62\xC2\x3E\x22\x75\x00\xEC\x46\x6A\x46\x8F\xC6\x10\xE3\x6C\x84\xB8\xEE\x62\xCA\x13\x37\xF6\xB9\x76\x2F\xE5\x63\x9A\xDC\xD0\x0A\x8E\xC6\x51\x05\x22\x1E\x23\x1C\x14\xE2\x6D\x88\x87\x13\xCB\x0D\x2C\xCD\x21\x1E\xA3\x79\xC4\x63\xB4\x2D\xF1\x28\x3D\xFB\x9F\x90\x3D\x2B\xC5\x79\x0D\x1F\x7C\x78\xFA\xC1\x6F\x0D\x1E\xEC\xF6\xFC\x03\xF6\x4F\xBB\x47\x28\xEE\x66\x1E\x78\xEC\x84\xC4\x7F\x52\xFA\x50\xB3\xEC\x81\xE2\xA7\x16\xFF\x22\xC0\x99\x9C\x8B\x8D\x3E\x40\x2D\xFE\xDA\x57\x66\x1D\x38\x46\x91\x74\x87\x01\x87\x24\x86\x95\x0F\x41\xF9\x77\x12\xDB\x64\x3D\x97\xA6\x91\x6C\xC6\xDF\xD5\x8C\x7C\xE5\xB1\x9B\x5E\x04\x61\x15\xE7\xDA\xB0\xAB\x4E\xC8\xD2\x78\x48\x23\x75\x5E\xB9\xDA\xBE\x98\x1C\x02\x49\xD5\xB6\x8F\xA5\xC7\x78\xAF\xE7\x59\x38\x69\x1F\x21\x93\x93\x3E\xC4\xE2\x62\xBC\xD7\x53\x55\x60\xCB\x2A\x61\x33\x58\x4C\x9E\x9A\x87\xAA\xD0\x66\x64\x0D\x95\x01\x94\xB1\x49\xE5\xDB\x7D\x55\x40\x75\x94\x90\xB7\x4D\x0C\x1B\xC9\x99\xAC\x40\x34\xA9\x53\x48\x6E\x23\x5D\x9B\x2B\x65\xB2\xE1\xD2\x5F\x4B\x61\x85\x0C\x92\x13\xAB\xE4\x6E\x74\x49\xAD\x91\x1A\x3A\xE7\xD6\xFE\x01\x72\x4B\x36\xE2\xB1\xE2\xDB\x5B\x1B\x32\xFC\x41\x60\x6F\x6D\xC4\xB9\xA4\xFC\x1B\x09\x1A\xB3\x2B\xF6\x3F\xAD\x83\x5F\xFE\x2D\xFE\x9E\x40\x40\x79\x07\x12\xF0\xC9\xD6\xE4\xDF\x2C\x9E\x55\x21\x64\xC4\xE0\x50\x32\x33\xFE\x7D\x84\x6C\x80\xC6\xAE\xA3\xD0\x08\xFE\xD1\x42\xE3\xC9\x4F\x4E\x80\xDF\x50\xE6\x36\x4E\x9A\xCB\x0D\x0C\x6B\x38\x62\x89\xB8\xA5\x6D\xE2\xE5\x90\x72\x4A\x23\xD0\x39\xB3\x15\xFB\xF9\x10\xA2\x82\x07\xF9\x84\x3C\x1E\x52\x30\xD6\x03\xB3\x0A\xD1\x84\xA6\x30\x31\xD7\xB7\x0E\xE3\x8A\x56\xCF\xF1\xB8\xDE\x5E\xC4\x7C\x8A\x64\xA2\x14\x09\xAA\xA9\x58\x75\x29\x8E\xB2\x9E\x59\x21\x27\x07\xC4\x20\xDE\x36\x71\x5D\xF6\x68\x5A\x1C\xB8\x10\x70\xEA\xA6\x60\x95\x42\x30\x14\xC5\xFD\xCC\x19\x69\xBF\x52\xEB\x6D\xA5\x63\x46\x40\xEA\x4F\x51\x64\xDC\xAD\xF5\x74\x71\xDD\xC0\x33\x2F\x53\xFE\x3A\x68\xC9\x88\xEC\xBB\x01\x10\x22\xC4\x2F\x40\x30\x3D\x08\xD5\x28\x0E\x3A\xEE\xDC\x75\x50\xC0\x1E\xC3\x8B\x3A\xF4\xBE\xEA\x4B\x3A\x5E\xCF\x5C\x37\x6F\xD2\x9C\x93\x69\x76\xAA\xFF\xA3\xF2\xD7\x25\xA6\xDC\x03\xD2\x4F\x51\x9C\x35\x7E\x1A\xB7\x91\x12\xC0\xD8\xF5\xC6\xFC\x12\xA7\xAC\x65\x1F\x8E\x06\xFC\x95\xC2\xB3\x7F\xD6\x9D\x6F\xE0\x1C\x2D\x61\x5B\xBE\xAE\x0A\x01\x0F\x9E\x57\xC5\x10\x90\x72\x36\x66\x5F\xA5\x14\xC2\x86\x3C\x69\x9A\x3A\xB3\x1B\x5F\x79\xDD\xBD\x67\x1A\xE0\x52\x03\x90\xD9\x4B\xAF\xFD\xF2\xE7\xFC\xE3\x27\xEB\x70\xC2\x25\x2C\x02\x72\x3A\x65\x5F\x7A\x49\x5F\x9E\x90\xEB\x2E\xA7\xCF\x50\xA7\x20\x00\xFF\x96\x93\x9C\x21\xC4\xEA\xD3\x75\x88\xFC\xAB\x82\xB0\xA9\xB4\x55\x04\xEA\xCC\x8A\x3B\x60\xD8\x50\x29\xA2\x8D\xD7\x7D\xF9\x73\xFE\x61\xF9\x82\x40\x51\x9E\x92\x13\x36\xE5\xA4\x77\xDF\x0C\x68\x9D\x46\xEA\xED\xDF\x22\x28\x53\x91\xB6\x11\x3F\xA7\x1B\xAA\xC3\xA1\x1B\x76\x14\xE8\x1C\x7C\x06\xA3\xB4\x3E\x94\x06\x42\xB3\x25\x99\x8D\x35\x9F\x72\x1F\x37\xB8\xC5\x0A\x76\xFA\x01\xBF\x21\xF7\x63\x4A\x0B\xCD\x10\x8A\x11\x7C\x2B\x35\x4A\x7E\x0E\x46\xBE\xAC\x80\x42\xF3\x25\x46\xA6\xC5\x1F\x49\xB4\x1F\xF3\xC2\x56\x98\x63\xD3\xEC\xCD\x8C\x5F\xB2\xEC\xAC\xC6\x97\x52\xEC\x80\x12\xF7\x87\x8C\x29\xEC\xF3\x5E\xC5\x67\x49\x15\x9E\x3D\xF7\xF1\x76\xBB\xA5\x16\x28\x1E\x27\xAA\x17\x28\xD3\xD4\xC8\x02\xAC\x80\x6E\x27\x19\x4C\xD8\x61\x29\x38\x44\x96\x06\x1F\x14\x5B\x1A\x3C\x82\x0C\x27\x9F\xE4\xED\x09\x9A\x1A\x1F\xCB\x4C\xA8\x9F\xFB\x89\xFB\x22\xA0\xF4\x41\xAD\x54\x2D\x22\x73\x48\x6B\x60\x4E\xB0\xAE\x14\xA9\x0A\xFB\x32\xE1\x99\x43\x84\x6E\xAB\x28\x72\x09\x6C\x19\xDC\x41\xAE\x1D\x9C\x66\x43\x7E\xE4\x2B\x74\xA6\xE8\x18\x04\x7C\xAA\xC6\xC6\xDE\xD3\xAD\xDB\x2A\xF3\x5E\xAD\x62\xE7\xAF\xC8\x99\x62\x90\xD2\xFF\xD8\xC7\xA7\x8F\xC2\xE5\x8E\x81\x43\xCA\x6C\xB5\x36\x3C\x95\x06\x88\xE0\xAF\xEC\x0C\xFD\x53\xC8\xB8\x20\x9A\x7F\x4B\x87\xFF\xBA\x21\xE4\xC4\x4B\xC9\xAE\x93\x9C\x11\x41\xCA\xF9\x71\x74\xC3\xBB\x42\x31\xAA\x2D\xEA\xEB\x42\x59\xCF\xAE\x83\x9E\x54\xA9\xFB\xA2\x7C\x6A\x01\xF2\xC7\x0E\xF9\x23\x9C\x96\x07\x51\x43\xDE\xC6\x51\x5B\x40\xBD\xC5\xFA\xA0\x6F\x23\x30\xC6\xBC\x4E\x71\xD6\xEB\xED\x50\x5F\xC9\x76\xC5\xAB\x75\xE2\xA0\x92\xA0\x84\xB1\xB2\x3D\xC2\x63\x13\x71\x2C\xED\xB0\x9C\x07\x55\x76\xDD\x2D\x5D\x0D\x86\x38\x24\x18\x1E\xA8\xB3\x9A\x5D\x96\xC4\xB1\x6C\xB0\xE3\x1E\x07\x8C\xFA\x5B\x5C\x2F\x04\xC8\x1B\x9F\x96\x8C\x1C\x94\x7A\x1E\x0E\x78\xB6\x8E\x37\x9B\x3A\xC1\x1B\x75\x85\xBC\xC7\xE2\x33\xE2\x15\x06\xC9\xD6\xDD\x07\x69\x1D\x1B\x75\xBC\x59\xFB\x7C\x52\x4E\x34\xBD\x42\xEC\xA2\x63\xEB\x3C\xAD\xE3\xCD\xAA\x3B\xAF\x94\x30\xCA\xFC\x95\xF2\x93\x76\x8A\xDB\xA1\x1E\x65\x8C\x3E\x54\x68\xF1\xCB\xA7\x79\x6A\xF6\x51\xC5\x9D\xAB\x12\x88\xAA\x14\x7C\x66\x39\x48\x0C\x4D\x11\x46\x39\xC4\xB8\x90\xE4\x79\xDA\x3B\x5B\x17\x9B\x4D\x3D\x02\xD3\xD4\x06\x8A\x83\x70\x0F\x8C\x20\x5F\xD9\x3A\x03\x06\x46\x2B\x5B\x67\x70\x75\xA6\x5B\x53\xB1\x89\xD8\xDB\xAD\x89\xEA\xDF\x25\x9C\x5C\x3E\xED\x89\xB1\x1E\x07\x71\xE2\x82\x8A\x7F\xD1\x21\x37\x73\x9D\x92\xDD\x8A\x6E\x73\x46\x64\xE7\xE8\xFF\x56\x15\x28\x5A\xB7\x3A\x3B\x6F\x6B\xC6\x1C\x07\xAE\x4F\xDB\x33\x4D\x4D\xC0\x44\x42\x82\x04\x62\x8B\xAF\x22\xBE\xD5\x18\xB5\x82\x2D\x9E\x34\x6F\x56\x00\xD1\x60\xAB\x1E\xA6\xD2\x0C\x77\x41\xB0\x75\x37\x84\x1B\x9B\x75\xC4\x75\xFD\x22\xF0\x4F\xAC\x12\xBE\x9D\x61\x1E\xE1\xCC\x80\x47\x80\xC0\xFC\x76\x1E\x46\xEB\xEA\xAC\xBF\xAE\xCE\xB6\xA9\x33\x5F\x13\xAC\xD6\x51\x4F\x9B\xCD\xB9\xB2\x38\xFB\x8E\x32\xF6\xCD\x3D\x42\xDA\xB1\xAC\x14\x7F\x62\xD7\x2B\x21\xC7\xE0\x4B\x98\x8A\x44\x65\x59\xD5\xDC\x46\x67\xF4\x35\x78\xEB\xD0\x3D\x7C\xD1\xE3\x78\x6D\xD5\xD8\xA0\xFC\x19\x72\xE8\xCF\x0E\x15\x81\xA1\x42\x4F\x15\xC9\x6B\xD2\x26\xEC\xB7\x31\xA4\x90\xF1\xF6\xE8\xF7\xFE\xDC\x5D\x1B\xDE\xD2\xDD\xAC\xFF\x5D\xAF\x62\x97\xFB\x36\x98\x39\x02\xE4\x60\x9E\x6E\x11\xEF\x59\x90\x0F\x6F\x50\x8D\x49\x10\x29\x40\x23\xE6\xE4\x67\xEB\x72\x93\x2E\x13\x28\x18\xAE\xE5\x10\xAE\x71\x87\x33\xE5\x66\x5D\xF4\x71\x46\x08\xC6\x08\xFF\x19\x75\xBE\xC2\x50\x6E\x52\x16\x19\x08\x91\xE8\x64\x92\x97\x42\xBB\x20\x82\x70\x66\x92\x7E\x6F\x92\xC8\x18\x17\x10\xD2\x24\x0B\xBC\x95\x7A\x93\x1C\xC1\x08\xF4\xDC\x49\x8E\x06\x93\xD4\xFD\x49\x3A\xFB\x28\xFE\x53\x0C\x27\xE9\xA3\xA4\x4F\xD9\x2D\x96\xD8\x11\x9C\x9D\x13\x59\x49\xC8\x21\x44\xBB\x20\xE3\xD4\x4A\xD5\x6E\x30\x52\x82\x61\x19\xD8\x8A\x55\x5F\x41\x3F\xAF\x44\x66\x05\xE2\xFA\x59\x38\xD0\x55\x0D\x2C\x49\x1A\x26\x0D\xCB\x14\x7E\x70\x5B\x3D\x62\xC4\x38\x41\xA1\xB0\x52\xB8\x03\x0A\x18\x9D\x58\x6D\x31\x1B\x97\xE7\x43\x3C\x77\x71\xFE\xD6\xDD\x50\x6C\x6D\x6C\xE2\x25\x11\x03\x99\x7A\x4F\xAC\x92\x59\xFE\x0A\x78\x16\x1D\xCD\x6A\x24\xEA\x42\xC7\x65\x10\xCD\xBF\x12\x41\x17\x6F\xD5\x08\x3D\x81\xCE\x66\xAD\xD9\x6B\x5D\x1F\x29\x34\x38\x38\x41\xB9\xD9\xC0\x68\x8F\xF6\x9E\x4F\x49\xD9\xC8\x00\x4F\xFE\xEC\x45\x53\x6B\xBA\x4C\x28\x3A\x45\x77\x4C\x09\x79\xDA\xE4\x48\xAB\xC6\xB0\x9B\x02\x57\x0A\x31\x0E\xFB\x6C\x43\x0E\x40\x43\x0E\x05\x91\x25\xDE\xBA\xE1\xC6\xDD\xDE\xDF\xB3\x6E\xC7\x60\xD4\xD4\xA3\xC1\x0D\x12\xC0\x68\xA5\x1E\x11\xF9\x1F\x11\xBF\x36\xB3\xA1\xAA\xA9\xC7\x10\x3E\x9F\x89\xD5\x55\x04\x46\x49\xD5\x77\x88\x3D\xF6\xA9\xEE\xB9\x27\xF6\x32\xBA\x0A\xC2\x6A\x84\xE7\xC1\x3F\x00\xBB\xF1\xCE\xD2\xAE\x9C\x4A\x81\x37\x39\x15\xA6\x98\xE2\x8D\xE2\x3E\x6F\x54\x4C\x7A\x78\x35\x42\x84\x1A\xE1\xC7\x8B\x43\x85\xB2\xAA\x1A\xC1\x2E\xFA\x02\x01\xC3\x4A\xD8\xCE\xA8\xA9\x0B\x28\x1C\x1F\xC3\xCB\xD1\x48\x54\x74\x3F\xC2\xA1\xD2\x6D\x1B\xF1\x47\xED\x31\xEA\x06\x76\x21\x0D\xDB\x05\xBB\xF1\x9F\xDD\xF0\x2C\xF6\x93\x78\x16\x71\x3B\xCA\x69\xE0\xF9\xCA\x80\xAC\xC1\x46\xBE\xA4\x29\x44\x9A\x67\xD5\x0B\xA4\xE8\x48\xC0\x6E\x19\x55\x41\xE8\x88\x90\xA4\xF4\x6C\x8A\x24\x6D\x88\xEC\x6F\x7D\x5C\xCA\xDE\x46\xF6\x73\x01\x85\x66\xEC\xD1\xBF\xFE\xAE\xBB\x36\x3C\xFB\x89\x8F\x93\xBD\xBC\xBC\x9B\x68\xBC\xFD\x73\xFC\x69\x9F\x63\x55\xDB\xA2\x7C\x05\xBF\xC9\xBA\x27\xAF\xA4\x27\xCE\xC1\x7D\xEE\xF1\x27\xC5\x4C\x46\x13\xC7\xEB\x40\x73\xBE\x3D\xDA\x6B\x21\x0A\xB1\xEC\x96\x66\xE4\x1A\x41\x31\x38\x38\x05\x5E\x70\x73\x0E\x4E\xB1\x75\x37\xE4\x1B\x9B\xF5\x08\x57\x53\x40\x8A\x27\xAE\x5B\xB6\xC6\x7F\xF4\xD0\x30\x9A\x31\x99\x3E\x3C\x8F\xD0\x22\xB1\x40\xFA\x7C\x98\x5C\x58\x66\x5E\x46\xF6\x35\x81\xD3\xBC\xC4\xE6\xCF\x12\xCA\x26\x44\x22\x58\x1D\x92\x37\x8A\xFD\x2F\x53\xB7\x07\x04\x36\xBD\xAD\x8E\xEC\x4B\x4E\x20\x13\x1D\x23\xB2\x72\xDD\x08\x7D\x1A\x14\x6D\x60\xC0\x75\xFA\xF4\x69\xF0\xE8\x37\x0B\x2B\x81\xCB\x6F\xC9\x4D\x64\x1D\xF4\x25\x5F\x44\x76\xDF\x9E\x65\x3F\x8C\x75\xBC\xCD\x15\xF3\x11\x54\x9E\x80\xA8\x99\xDD\xC0\xA9\xD3\x83\xAC\xF3\x60\xB5\x29\xC4\xC8\xD3\x66\x38\xD9\x37\x7F\xA2\x53\x01\x51\xE2\xEC\x80\x7D\x93\xDB\xFA\xB4\x64\x9E\x1B\x05\x9E\xD2\x7E\x68\x20\xB0\xD9\x6D\x75\x6C\xEF\xC0\xA5\x50\xDC\x35\x85\xF0\x06\xF6\x21\x0F\x1F\xBF\x69\xBD\xF7\xDC\xC7\xE7\x5F\x8A\xF1\xF9\x1B\xEE\xEC\x3D\xC7\x2B\xC3\xBE\xE6\x53\xD4\xE1\xE1\xFF\xDC\xEF\xA1\x4C\xEF\xA3\x2F\xED\x7F\xF4\xFC\x8F\x46\xD8\xFC\xF3\x7F\xF6\x83\xD3\x1F\xB8\x78\xF7\x77\xE0\x9B\x8B\x7F\x77\x70\xFA\x13\x1B\x5B\x5F\x0C\xF0\xD5\xC6\x83\x6F\x7E\xE9\xE0\x23\xF8\xEE\xC3\x9F\xFE\x16\x7A\xF7\xC1\xFB\x6E\xED\xDE\x99\x9E\x64\x00\x12\xC5\x2A\xD5\xAA\x11\xCE\xAD\xAF\xD7\x3A\xF9\x1A\x48\xC2\x20\x60\x40\xD2\xBE\x54\x39\x28\xB9\x3C\x7C\x37\x40\x15\xD3\x62\x88\xD7\xA7\xF3\xA8\xFA\x30\x1F\x38\x34\xA7\xA3\x10\x57\x1C\x84\x94\x4B\x06\x97\x1C\xF0\x92\xC9\xD3\x25\xA0\xDA\xC3\xB8\xDC\x80\x97\x2B\xFE\x2F\xA0\x64\xA9\x81\x2C\x95\x9E\x6B\x7A\x4E\xCB\x0C\x64\x99\xF4\x9C\xCA\xBA\xD1\x99\xA2\x17\x4C\x8D\xF1\x0D\xA5\xFA\x85\x10\x32\x77\x9D\x9E\xE1\x1C\x6A\x67\x48\x61\x54\x85\xA4\xDA\xAE\xF3\x16\xF1\xB6\x59\x6D\xB8\x52\xA5\xBC\xE8\x44\x08\xA4\x96\x54\x52\x5C\xB1\x04\xD2\xC2\xEB\x3B\x54\x8F\xC2\xC0\xD7\xCA\x8B\x16\x4D\x2D\x0F\x17\x2C\x26\x58\xB0\x78\x7F\x2E\xA8\xF4\x5C\xB0\xAA\x9E\x73\x39\x18\x99\x0A\x7D\x90\x70\x34\x60\x1C\xED\x3E\x87\x18\x1D\x10\x46\x77\x9F\x7A\x88\x1A\xBE\x69\xBD\xFF\xA1\x0C\x1F\xDD\xD1\x7E\x84\x58\x1E\x85\x38\xB6\x0D\x98\x1D\x88\x0D\xE5\x64\x65\x76\x0A\xB2\x13\xA4\x7D\x43\x82\xFC\x41\xD2\xBD\xBE\x3E\x57\x66\x5D\xB8\xED\x0F\xCF\xE3\x54\x7D\xA4\x37\x01\xD2\x1B\xBF\xE1\xCA\x4B\x2C\x11\x72\x39\x42\x9B\x42\xB8\x3A\x24\x0E\x31\x71\xA2\x33\xF4\x80\x72\x81\x06\xC7\x29\xC0\x23\xE1\x20\x0F\xDD\xCB\x90\x40\xC7\x33\x42\x8A\x98\xDD\x56\x67\xB8\xD6\x88\x64\x8A\x2A\xC2\x0D\x8B\x08\x2A\x19\x41\x25\x6A\x6A\x6A\x94\x4A\xA3\x84\x1A\x11\x1F\x8C\xE0\xCC\x08\x9C\xD4\x08\xFB\xA4\xD2\x47\x9A\xF9\x44\x73\x3F\x45\x83\xD1\x5E\x50\x43\xEC\x97\x4A\x3F\x69\x48\x32\x55\xE2\x20\x70\x4B\xEB\x8B\xC5\xF9\x11\x90\xD6\xD6\x8E\x31\x8B\x04\x9B\x52\xC1\x26\x1C\x92\x63\xD9\x38\x6D\x88\xBD\x13\x85\x9F\x80\x75\x73\x44\x70\xAD\xA2\xB8\x65\xAE\xEC\x4A\x5E\x1F\x11\x64\x72\xA1\x26\x14\xAE\xB6\xCA\x61\x7E\xCA\x15\xD2\x24\xD1\x26\x12\x1A\x2E\xE2\x94\x28\x32\x5A\x1A\xEE\x9B\x79\xF5\x3B\x42\x24\x7E\xA1\x17\x22\x2D\xC0\xC5\x13\xCE\x66\x8C\xB3\xB4\x78\x82\x46\xEA\xA0\x21\xCB\x4F\xB0\x29\x1F\x06\xC3\x87\xA1\xCA\x19\xC6\xBC\x56\xC3\x6B\xAD\x72\x86\xA8\x1C\x34\x27\x1C\x56\x39\x57\x6B\xA4\x6B\x32\xB3\x19\x83\x2F\xC9\x03\xD3\x47\x09\x7A\x44\xDC\x03\x9D\x2E\x43\xA7\x8B\xED\x5D\xEB\x0D\x45\xD7\xD1\x74\x4C\x45\x09\x23\x3A\x7C\xC9\x78\x45\x42\xDC\x12\x5E\x51\xCA\x2B\x4A\x1A\xFA\x0C\x24\x3C\x66\xCA\x27\x96\x9E\xFA\x54\x37\x8D\x96\x94\xCA\xF9\xA6\xE7\x54\xD8\x62\xB8\x83\xF4\x9C\xEB\xAC\xF1\xC2\x52\x47\x41\xF0\x0D\xD7\xBC\x26\x1F\xB8\x44\x24\xB2\x2A\x73\x58\xD1\xB1\x50\x2C\x0E\x3B\x16\x8A\xBE\x43\x54\xCB\xB8\x73\x6A\xDA\x1D\x47\x4E\x85\x6B\x41\x52\xD5\xBB\x5B\xD8\x43\xEB\x14\xB2\x33\x45\x7B\x9A\x40\xB5\x65\xB8\x70\x4F\xAB\x10\x8A\x2A\x00\xAE\xEA\xC2\x03\x2F\xBC\x0B\xC2\x0E\x5C\x86\xC1\xA5\x19\x5C\xA6\xA9\x28\xA2\xDC\x30\xB8\x34\x83\x8B\x9E\xA2\x9C\x23\xE0\xD2\x02\x2E\x7A\xAE\xE9\x39\x81\x4B\x0B\xB8\x8C\xA4\xD9\x32\x0E\x5C\x2D\x67\x8E\x6F\x90\x4D\x53\x10\x23\x72\xF5\xB4\xA5\x8E\x83\xC5\x93\x21\x39\x35\x29\x41\xB5\xCB\x14\xE5\x16\x1A\xAE\x90\x1F\xCB\x4B\x51\x58\xC4\xD3\x52\x90\x98\x1F\x55\x5A\xD6\xCC\x6E\x21\x83\x2B\x60\xD4\x5D\x01\xED\x8C\xA2\xFE\x8C\xF2\xB0\x5B\x43\xD4\x5F\x43\xD0\xAD\x39\xEA\xAF\xD9\x6F\x21\x14\xF5\x20\xA4\x5B\x68\x46\x3D\x68\x0E\xAE\x80\xB0\xBB\x02\x0C\x1F\xB4\x88\x0F\x5A\xF7\x39\xA4\x3D\x11\xD1\x9E\xEE\x53\x0F\x51\xC3\x37\xAD\xF7\x3F\x94\xE1\xA3\x3B\xDA\x8F\x58\xAF\x52\x74\x34\x1C\x74\xF5\x1C\xE8\xB6\x90\x95\x5C\xA5\x1E\x14\x74\x0F\x50\xD0\xA9\x70\x09\x6D\xD4\x0F\x39\xAD\x7B\x47\x1A\x0A\x0A\x31\xE7\x94\xD6\x67\xD5\xFA\x02\x3D\x8C\xD3\x8F\x11\x59\xEA\xF4\x62\xE1\x26\xA9\xDD\x9D\x5E\x2C\x6C\xC5\xB6\xA8\x13\xB5\xC3\xF9\x7A\x31\xA7\x95\x6D\x75\x2D\xE1\xA6\xF9\x76\x0F\x3C\x7B\xF1\x55\x3F\xFD\xEE\xB3\x94\x54\xFD\xE0\xBF\xC7\xD1\xEE\xF9\xD0\xDB\xBF\xE1\x7A\xED\x81\x27\x19\xBE\xF4\x35\xF2\xE7\x45\xEF\x7A\xBD\x0C\xDE\xC1\x0F\xFE\x84\xBA\x5E\xE7\x33\x9D\x2F\x3E\xF2\xE0\xC7\x5F\xBE\xB0\xF3\xE7\x87\x9D\x7F\xCB\x0F\x7D\xD1\x44\x05\xEB\x56\x49\x05\x63\x7D\xDA\x7E\xE6\x5D\x72\xCD\xD5\x24\xCC\x13\x54\xC3\xAD\x2A\xA6\xF4\x20\xBA\x4A\x24\x61\x52\x24\x05\x1E\xB6\x24\xAB\xC9\x56\x9D\x42\xB0\x75\x16\xE2\x73\x9B\x78\x6B\xA7\x90\xF2\xDF\x09\xB6\xBC\x83\xCD\x05\xE2\xF1\xC5\xE5\xF6\xFE\x4F\x02\xF5\x0F\x4B\xB5\xA8\x0D\x38\x5E\x27\x13\xFB\x23\xFC\xF3\xFB\xAD\x3A\x5C\x9B\x49\x1D\x2D\xD1\x86\x78\x7C\x27\x6B\x64\xFB\xBB\x27\xF6\xD5\x34\xC2\x8F\x52\x97\x64\x62\x7F\x8C\xFE\x30\x12\x20\x4E\x0E\xA1\x32\xF7\x94\xDC\xB5\x59\x79\x88\x9B\xF1\x3C\xBC\xC3\x21\xE2\x4D\x8D\x50\x18\x87\xF4\x9C\xD3\xE2\xD5\xE4\x46\x3F\x78\x48\xB9\x42\x14\x67\xD3\x4B\xAC\x6E\xEA\x04\x8A\x15\x61\x3B\x70\x52\x79\x6F\x52\x8A\xEC\x24\x2F\x6B\x38\x55\x16\x7E\x1C\x82\x2D\x92\xD4\xF8\x86\xF3\x48\xFB\xE0\xC6\xF6\x6C\x4E\x7A\xD6\xA9\x87\xC9\xBC\x87\xC1\xBC\x87\x25\x67\x69\x08\xA8\xD0\x0A\xCF\x29\x33\x3F\x37\xB5\xB7\x3E\xA7\xE9\xEA\xEF\x6D\x80\x7B\xCB\x8E\xB2\x5B\x55\x44\xD5\xA8\x55\x9B\xEB\x33\xE4\xBD\x8D\xB7\xF8\x8E\x88\xB7\xEA\x04\xFC\xAD\xB3\x10\x9D\xDB\x64\x3B\x6B\xC2\x7F\xC7\xD8\xF2\x0E\x96\x12\xF9\xFE\x62\xB7\x89\xC1\xDE\x2A\xDA\xDB\xD0\xED\xAD\xA2\xBD\x8D\x27\x0B\xF6\x31\x74\xFB\xC8\xDE\xF6\xE4\xA6\x5E\x1B\x99\x27\x32\x55\x31\x78\xA4\x61\x0F\x21\x7C\x1E\xA9\x41\xA3\xB3\x90\x10\x30\x42\x84\x42\x0E\xF9\xCC\xE3\x84\x9D\x08\x74\x53\xC7\x60\xDC\xBE\x21\x3A\x65\x83\x7D\x4B\x41\xE1\xBE\xE5\xEE\x63\x48\x02\x22\xCB\x5F\x93\x7D\x6B\x07\xF5\x38\x0D\x70\x3C\xF5\x30\x99\xF7\x30\x98\xF7\x90\xF6\x8D\x66\x1C\x42\xCE\x73\x4A\xCD\x5F\xF9\x81\x5E\x57\x67\xA3\x75\x7D\x56\x4B\x5A\x0A\xAA\x15\xEE\x55\xCE\x4B\xC5\x7A\xCF\xD3\xB4\x89\x3C\x33\xD1\x01\xB8\x33\x19\xB2\xC2\x26\x65\x53\x56\xC6\x8A\x62\x63\x3D\x94\xCA\x3F\x87\xBD\xEA\x00\x0C\x04\x2B\x93\xD5\xDA\x90\x21\x31\x80\x8C\x7F\xE6\xAB\x64\x43\x33\x90\xAF\x4C\x9A\x2D\xFC\xCA\xC3\xD8\xFE\x2C\x04\x5B\xE7\x20\x62\x15\x3A\x69\x3C\x3E\x4F\xC3\x50\xF0\xFF\x0A\x69\xD3\x37\x9B\x3A\x03\x73\x3B\x51\xC3\x60\xEB\x6E\x30\x5B\x75\xB1\xB1\x79\x82\x86\x34\xD6\x83\xB4\x1B\xF2\x2F\x78\xC8\xE2\x5C\x5D\xE0\xCF\xAF\xD2\xCF\x7A\x44\x83\xC0\x68\xB3\x36\x40\xCF\x1F\xE1\xE7\x32\x78\x08\x66\xA5\x41\x01\xA0\x81\x00\x70\x1A\xB8\x80\xA6\xC6\xEF\x37\x14\xC3\x94\xD0\x04\x56\x1A\xE0\xA3\x8B\xEF\xF8\x4A\xBC\xA5\xF3\xED\x09\xDC\x25\x19\xBA\x4B\x32\x68\x28\x3F\x30\x04\x5B\xB3\x06\x80\xB8\xBB\x49\xE9\x6D\xD6\xBD\x1D\x6D\xD6\x89\x7B\x9B\x35\xFB\x65\x7F\xAB\x96\xC6\x3B\xB3\x9E\x30\xAC\x06\x46\x9B\x15\x4E\xBB\xE2\x44\x93\x10\x9C\x84\xF0\x24\xA2\xDB\xFF\xD3\xF1\x31\xC6\xFC\x5C\x12\x07\xEB\x6C\xB4\xC1\xFF\x1F\x93\xF1\xEF\xFB\x29\x31\x8A\xFD\xCD\x4F\xF4\x25\x16\x50\x94\x7C\xCD\x7E\xB1\xF7\x94\x36\x7D\x70\xBA\x39\xA1\x01\x25\x0F\x67\x57\x78\x7D\xBA\xE2\xF2\xE8\x55\x84\x58\x8D\x67\x88\x04\x14\x9F\x9C\xD0\x89\x1E\x66\x90\x40\xD8\xD4\x86\x8A\x8F\x11\x51\xAC\x0A\xB2\x7E\xD4\x23\x30\xF8\xCF\x98\x62\x96\x48\x1F\x5A\x8F\x9A\x7A\x89\xB4\x78\x4B\x50\x22\xFA\x2C\x59\xBC\x23\x97\xC0\x5F\x99\xAC\x92\xD5\x04\x72\xCE\xA7\xBF\xD4\xD4\xBB\xA8\xE1\x2E\x44\x83\xD5\x7A\x17\x35\xDC\xD5\x36\x1C\x43\x06\x23\x28\x57\xA9\xCD\x08\x07\x6B\xE8\x0E\xC1\x43\x62\xA0\x80\x25\xC8\x57\xE5\x43\x79\xFF\x9D\xD0\xE4\x84\x2A\x46\xA3\x28\xDD\xE0\xF5\xE3\xCC\x7B\x8A\x1D\xEB\x69\x6B\x76\xE3\x82\x33\xAB\xAB\x65\xFF\x80\xD8\xD4\x81\xD2\xA0\x5D\x81\x7F\x5F\xD9\x54\xCF\xA2\xB2\xD4\xD5\x55\x94\xF1\xB5\xFA\x06\x01\xCD\x88\x7C\xAF\x9E\xD5\xD4\x4B\x0C\x10\x02\x4E\xCA\x05\x16\x12\x48\xE0\xCA\x16\x20\xB4\x4E\xD3\xAD\xB3\x9C\xB7\xCE\x94\x58\xFD\xCB\xC1\xC2\x38\x48\x18\x82\x04\x3E\x5C\x82\xC8\x81\x20\x22\x10\x30\x78\xAF\xC2\x65\x8F\xE0\x1B\x70\x77\x38\xFB\x0B\x65\xA4\x54\x70\x85\xD5\xF8\xFF\xC7\x27\xD5\xD5\x56\x55\x13\x5E\xD4\xB3\x20\x80\x2B\x1A\xC4\x82\x6F\x80\xDD\xD5\x35\xA4\x2D\x9B\xC0\xF2\x09\xE4\x94\x6C\xCE\xD7\x21\x39\xBD\x5D\xC3\x61\x67\x38\xEE\x3F\xE0\xBA\xB7\xF0\x82\xCB\x68\x9F\x67\x96\xDF\x7B\xD7\x42\xE1\x59\x1D\x14\xAE\xC1\x2D\xBF\x06\xC8\x61\x7C\x02\x57\x53\xB8\xDA\x32\x82\x60\x19\x77\xF9\x78\x95\xC0\x15\x55\x80\x32\x0D\x5D\x58\xC8\x13\xE4\xCC\xE4\x51\x9A\x36\x9F\x82\x7C\x78\xB1\x39\x48\xDA\xCB\xBA\x14\x99\xB4\x94\x4C\x26\x0A\x94\x1D\xDB\x6B\x25\x8D\x3C\x05\x7B\xD6\xD1\xA4\x2E\xED\x9D\x0D\x94\xE0\x83\x6F\xCB\x3B\x20\x9A\xD4\x11\x39\x23\x19\x7C\x73\x06\xDF\x44\x88\xB5\x77\x00\xB2\x35\x76\x2C\x2F\xCE\xBA\x17\x81\xBC\xF0\x39\x31\x1F\x95\x11\x00\x33\xB1\x6A\x6D\xD5\xAA\xE6\x8E\x55\x48\x4E\x55\x09\xE4\xB8\xBC\x5C\x92\xEC\x18\xB3\xA1\xD8\x33\xC6\xD1\x8A\x3F\xEC\x51\x05\xFB\x97\x9F\x10\x67\x9D\xF6\x40\xD8\xAF\x7E\x42\x92\xA6\xDA\x97\x6F\x6C\xEC\x23\x2F\x0A\xBF\x4F\x5A\xD6\x41\x97\x7F\xCF\xA9\x70\x25\xF3\x3D\x89\x64\xE5\x7F\x3C\xC7\x19\xB4\xA6\x4D\x6C\xEA\x09\x9F\x81\x7A\xD4\x33\x78\x30\x4A\x03\xE2\x73\xD4\x59\x7F\x5D\x9F\x65\xEB\x6A\x97\xEA\x43\x12\xD4\x21\xDD\xBC\xAD\x0A\xEC\x4F\x7C\x92\xDC\x30\x65\x4A\xFB\x70\x4A\x6F\xF9\xA4\xE4\xF5\xED\xA6\x14\xB6\x94\x75\x48\x56\x43\xBB\x2E\xD1\x9F\x04\xAA\xAF\x9D\xEB\xB9\x19\x5B\xEF\x98\x30\xCA\x02\xC8\xAF\x92\xC7\x67\xC2\xBA\xD1\x80\x53\x1D\x91\x10\xED\x9F\xAE\x0C\x37\xF4\x9F\xA7\x93\x2A\x17\xC6\x34\x02\xD3\xD4\x05\x7B\x53\x24\x10\x41\xDA\xD4\x44\x6B\xC7\x5B\x75\x89\x2D\xF3\x7A\x89\x38\x6A\x3C\x4D\x21\x9D\xA6\xC4\x9D\xA6\x10\x76\x41\x48\x3F\xA0\x20\xDB\xDA\x56\xBD\x1B\x72\x69\xBD\x4C\xAD\x97\x61\xD7\x74\x6B\xB6\x1D\x25\x50\xB6\x4D\xB7\x1B\x78\x37\x2C\xED\x64\xC0\x08\x0A\x18\x83\x64\x79\xC7\x33\x3A\xC6\x56\x09\xB5\x4A\x78\xAC\x84\x0C\x53\x16\x0F\x7E\x32\xFD\xD2\xE9\x7E\xEA\x08\x32\x3A\xD0\x03\x58\xB2\xB4\x17\x49\xD2\xBE\x40\x0A\xBF\x27\xB8\xDA\xF8\x1C\xB2\x7E\x38\xC3\xAA\x3C\xA8\xAA\x25\x09\xA1\x0E\x61\x49\xD2\xDE\xD4\x4B\xC4\x30\xBA\x0B\x84\x58\xAF\x31\xE4\xEE\x6D\x0E\xE3\xC1\x5B\xE2\x19\x05\x32\x55\x8E\xF7\x1C\xFD\xB5\xE4\xF8\xFF\x88\x53\x6E\xD1\x76\x62\xD3\xDD\xB0\x1B\x1B\xD4\xA5\x34\xCC\xA1\xDC\xAC\x92\x83\x8A\x02\x28\xC6\x34\x93\xD2\x7D\xAB\x9C\x9D\x49\x41\x36\x41\x5C\xCA\xD2\x82\x39\x94\x90\xCB\x0E\x54\x09\x8C\x71\x12\x63\xF1\x6D\xB5\x8A\xCA\xDE\x2B\xAE\x57\xCE\x89\xF8\x29\x51\x39\x12\x7C\xAE\x43\x49\x69\x12\xCB\x4B\x94\xF9\xA9\x55\xD5\x78\x48\x99\x3D\xB2\xD2\x93\x86\xBA\x4D\x7A\x93\x98\x9F\x8D\x92\x48\x92\xDE\x28\x61\x4A\x76\x72\xA4\x76\x76\xAC\x78\xA6\xC4\x95\xE1\x04\xDB\x19\x0B\x67\xCF\x25\x17\x40\x97\x1B\x77\x75\xD3\xF5\x0D\x84\x38\xDD\x10\x62\x9E\xEE\x94\x38\x43\x42\x9E\xAA\x12\x21\x21\x8C\x2F\x2C\x3F\xF8\x2B\xAD\x3B\x11\x29\x21\x21\xBC\xBD\x8E\xB7\x6A\x03\x06\xA5\xA8\xCA\x1C\x54\x74\x07\x28\x3A\x82\x3D\x7C\x98\x91\x0B\x65\x8F\xE2\xAD\x7A\x34\x25\x1A\xE6\x32\x56\x0E\x06\xE8\xDD\x26\x51\x01\x4D\xC5\xD8\x06\x88\x12\x41\xD6\x7E\x9A\x11\x16\xA7\x00\xF9\x66\x15\x3F\x15\xF3\xE0\x18\x63\x02\x11\x11\x2F\x64\x0F\x8E\xF6\xC0\xC3\x3A\xBE\x6A\xCC\x54\xAA\x64\x02\x65\x48\xFE\x29\x51\x8A\xF1\xD8\xF3\xC4\x83\x71\xC3\x65\x19\x82\xAD\x3A\x67\xE2\xC4\xDF\xC3\xD9\xE1\xD1\x2E\x20\xC2\xA3\x5D\xD0\xD1\x2E\xF8\xDC\x17\x52\xD7\x6C\xAB\x5E\xE2\x79\x6E\x36\xB5\xA6\xD6\x1A\x8A\xE9\xD6\x9C\xFD\xB4\x5D\xD2\x65\x06\x5E\x82\xD1\x4E\x06\xA4\xF4\xB4\xA4\x12\xA6\x56\x11\xCB\x3E\x11\xB5\x8A\x78\xAC\x08\x45\x8B\x90\x44\xA3\x68\xFA\xA5\x63\xF7\x6B\x8F\x89\xD3\xD4\xA1\xF9\xEC\x58\xC5\xB3\x49\xA6\xD8\x6F\xC9\x7A\xE5\x2F\x11\xC0\xDB\x82\xDB\x1E\x95\x59\xE3\x13\xB3\xD7\xF3\xEA\x88\x3D\xBC\xB5\x87\x27\xB3\xFC\x85\x73\x5C\xAE\x3A\xE4\xBC\xDF\x52\x2D\xA8\x52\x73\x33\xFB\x88\x3E\x06\xB9\x8D\xD8\xBE\x67\xBD\x19\x3D\x37\x8C\xA2\x28\xF2\xF0\x7F\x82\x68\xDE\x7F\xBD\xF7\x4A\x32\x8B\xC3\x49\xFB\x05\xEF\x30\x65\x95\x84\xB8\xE0\x02\xB5\x7B\x3D\x8D\xCF\xFF\x98\x12\x38\x10\xA5\x2A\x3F\x46\xAB\x50\x7B\x3D\xBF\x0E\x0F\x15\x78\xDE\x7C\x72\x3E\xB7\xFB\x0E\x17\x01\x67\xD8\xEB\xCF\xC4\x68\xDF\xFD\xA7\x3C\x03\xF1\xA1\x22\x82\xD8\xFE\xA1\xC7\x89\xE4\x78\xED\xAA\xFB\x86\xFB\xE2\x17\xDC\x2F\x7F\xF8\xFD\xA4\xFB\x7E\x80\xDF\x8F\x59\xEE\x62\x7B\xCC\xBE\x23\x54\xD9\x8D\x13\x15\x20\x48\x69\x52\x1C\x5D\x15\x41\x4C\x44\x32\x6C\xBD\xCA\x63\x3A\x69\x3D\xE0\xD6\x49\xA5\x20\xC2\xF7\xB2\xB9\x51\x11\x01\x7E\x83\x52\xA9\x24\x10\xAF\xD6\xAA\x4D\xC4\x79\xCD\xEA\xF3\x8B\x30\x27\x63\x0E\x2D\xF6\x50\x11\x42\x6C\x3F\x46\x6E\xF5\xBC\xBE\x1E\x5C\x87\x6B\xFB\x7D\x8F\x7D\xF1\x09\x86\x11\xC7\xB5\x44\xD2\x35\xA2\xAE\x11\x75\x0D\x5C\xD7\x10\x7F\x85\xF8\xEB\x0F\xDD\xAF\x08\x7F\xFD\x85\xFB\x15\xE3\xAF\xBF\x71\xBF\x92\x22\x24\xF7\x34\xAB\x19\x58\x39\x05\xB4\xA4\x4E\x5D\xE0\xCF\x22\x4D\x62\x7F\x4F\xBE\xCE\x09\xA2\x22\xFB\x11\x5E\x40\x88\x1B\x2C\x1E\x9F\xB4\x18\x42\x51\x5F\x91\x41\xE4\x9B\x8F\x50\xBA\x91\xF0\x24\xC9\x4B\xD8\x28\xA5\x86\xC9\xA1\x42\xE7\xB1\x78\xA1\x47\xD8\x9F\xED\x0C\x0C\x78\x8A\x84\x25\x75\x1E\x29\xC2\x25\x6B\x7D\x38\x78\x12\x8A\xDD\x78\x9F\x4B\x16\x8C\xDF\xAE\xDD\xD6\x28\xFE\xB6\x00\x54\x9E\x19\x9E\x23\xFB\x4C\x28\x52\x8E\xC4\x92\xFB\x40\x41\x44\xDD\x0B\xC6\x3F\xDF\x39\x5B\xC9\xCC\x2C\x1D\xC5\x47\xDE\x73\xD7\x06\x7B\x95\xD5\x09\x95\x7B\xB2\x59\xF9\x6A\xBA\x44\x94\x3E\x80\x5C\x27\x35\xC0\x41\x70\x66\x7B\x59\x23\x19\xEC\xD1\x39\xB9\x94\x4B\xB2\x4E\xB2\xA8\xD9\x2B\xDD\x68\xF2\x30\x74\xB9\x3C\x21\xB2\xF5\x91\x22\xE0\x1B\x71\xE3\xCE\xC6\xAA\xE3\x45\x80\xF7\x5A\xBC\x8A\x5F\x81\x94\x83\x5B\x52\x0A\x25\x8C\xEC\xC5\x47\x1E\xFC\x38\xB6\x00\x4F\xA2\x2B\x3C\x4E\x32\x1D\xAD\x22\x41\xE2\x67\x27\x8F\x4E\x70\x60\x4A\xFB\x72\x7C\xB5\x22\x63\xB5\xF4\x6B\x4B\x66\x46\xF6\xDC\xEB\xBE\xFC\x39\x9F\xC8\xBC\x7D\x39\xFE\x89\xB3\x62\x9B\x16\x8E\x41\xA9\xF4\x79\x4A\xBC\x86\x27\xF6\x0C\x29\xDC\x5C\x3D\xFF\x0C\x1D\xA7\x8D\xB3\xE9\x8B\xC8\x93\xFD\x25\x27\x40\x35\xB5\xB2\xDE\x11\xD6\x4C\x12\xDF\x12\x82\x5F\x5E\x64\xBE\x05\x01\x11\x80\x5F\x7E\xFC\x9C\x18\x5B\xA9\x14\x69\x8E\xAB\x5C\x96\xBB\x1E\x2F\x33\xEB\xF1\x2D\x4F\x1E\x26\x9C\x58\x9B\xD2\x4C\x92\x60\x1B\xD9\xEC\x04\xD7\xF9\x8A\x21\x86\xF4\xF0\xA4\x8E\xAF\xF3\xBC\xC6\xDE\xDA\xB0\x83\xA4\x64\xB6\x20\xAD\xA3\xEA\xA7\x13\xE8\xB9\x0F\xD1\x9A\xA5\xE4\xE6\x70\xAC\xE8\x3A\xCF\xC3\xA1\x62\xF7\x2E\x3B\x81\x82\x9C\xE5\x04\xCE\x35\x36\xAA\x93\x49\xE3\xBE\x98\xC9\x25\x67\x35\x0E\x91\xA0\xF8\x37\xAF\x2D\x64\x36\x3B\xF1\x4C\x87\x9D\x76\x60\x11\x42\x1A\xBB\xED\x3C\x33\x70\xAE\xC6\x1B\xF6\xE7\xF9\x3A\xA6\xC4\x8F\x9F\x3C\x27\xEE\x3C\x9E\xD5\x0E\xC1\x4C\x2F\x0D\xA4\xB1\x83\xF4\x91\x92\xD1\x9B\x14\x6A\xC4\xBD\x7A\x47\xAC\x3E\x69\x55\xF9\xD3\x1C\xA0\xAE\xF7\xE8\xBC\xE2\x5C\xE5\xAE\xDC\x91\xF9\xC2\x32\x39\x21\xF0\x7D\x5F\xBE\xCF\x55\x97\x33\x64\x77\x51\xE5\x7B\xE5\x81\x64\x5F\x3F\x2A\x55\xF7\xAC\x69\xB8\x4C\x82\x12\x57\x57\x3A\x4B\x7B\x9D\x4C\x5D\xFE\x57\xE1\xE8\x81\xFC\xB6\x2E\x7A\x6B\x85\x67\x9F\x4B\x6E\x76\x11\x68\x8E\x55\xA2\xC8\x46\xBF\x4D\xB4\x11\x99\xC1\xF7\x64\x9C\xFF\xE6\x3E\x1F\xD9\x4F\x7D\xF8\xBF\xEA\xFD\x9E\x67\x7D\xAE\xCB\x19\x50\xCE\xEC\xA0\xB1\xDE\x5E\xEF\x83\xC8\xD2\xDF\x40\xE9\x72\xBC\x3D\xDE\x7F\xC3\x5F\xFB\x3D\xAE\x2B\x4E\xBE\x69\x32\x75\x0D\xAA\xFC\x39\xC9\x56\xE3\x43\x04\x51\xF9\x6E\xE2\x66\xBC\x72\xEB\x9C\x48\xFF\x7B\x74\x8E\xDB\x88\x6D\x23\xF6\xD7\x8B\x90\x9F\xBF\x93\xDD\x00\xD4\x1E\xBD\x0C\x11\x49\xDC\x09\xCA\xFC\x37\x78\x29\x15\x6C\x77\x16\x6C\x0E\xEE\xF6\xF1\xE4\x25\x90\xD2\xE3\x13\xE4\x1C\x29\x96\x6F\x8E\x8F\xF4\x11\x07\xE8\xFD\xD6\x17\x03\x7A\x2F\xF6\x72\xD7\xDD\xE7\xD7\x17\xEF\xFE\x0E\x7A\x2B\x36\x76\xD7\x39\xE0\xB7\xE7\x7F\x34\xA2\xB7\xE2\x42\xE0\xDE\x86\xFC\xF6\x35\x9F\xF2\xE8\xED\xA5\x4B\xFE\x9A\xB8\x58\xB8\xD1\x23\x6E\xF1\xA5\xB8\x6B\xF0\xD0\x60\x88\x98\x1B\x3C\x24\x23\xA8\x35\xF6\xF8\x70\xAF\x13\x7E\xDD\xC5\x7B\xFA\x36\xE5\x04\xE6\x67\xBA\x30\x04\x52\x3A\xB0\x57\x04\xA8\xF2\x3D\xDD\x2E\x26\x4D\xED\xE3\x96\xFD\x72\x6F\xCB\x9E\xE3\x7D\x08\x7F\x5D\xEF\x79\xBD\x22\x6F\x52\xE9\x97\x06\xB2\x1F\xFD\x64\x4F\xAB\xAC\xB8\x82\x2D\xA2\x61\x23\x89\x8F\xB4\x4D\xD6\xD8\x18\xE3\x8B\x77\x22\x65\x01\x2A\xD7\x84\x89\x21\x15\x08\x3E\xCB\xEC\x0F\x1D\xA3\x62\xEF\xA0\x6D\xB0\xD6\xB6\xA7\x59\xBE\xBF\x87\xEA\x81\xD5\x8D\xF5\x5F\x5C\xFB\x9C\xA6\xC3\xB7\xDE\x51\x4E\x80\xC5\xA9\x62\xD6\xE9\x65\x84\x2F\x23\x4A\x85\x3F\x41\x4C\xE3\x12\xBF\x49\xED\x43\xBC\xCA\x39\x3F\x62\xFC\xFF\x63\x13\x24\xC6\x47\x27\x76\xA9\xA9\x07\x07\x46\xD9\xBD\x1C\x49\x39\x38\x30\x51\xEF\xC0\x44\x10\xB2\x1F\x6F\x7B\x60\xA2\xF6\xC0\xF8\x2E\x01\x0F\x2D\xCE\xB7\xEF\xFC\x9C\xF7\xCF\x05\xBB\x70\x54\xEB\xE1\xA5\xE6\xF3\x91\x8D\xEC\x3E\xF0\x5D\x9A\x8A\x98\x6C\xC7\x6D\xC6\x30\xE2\x2C\xFC\x5B\x28\xEF\x93\x2F\x51\xFF\xEE\x9C\xE0\x29\xF1\xC1\xDF\xE6\x9C\x28\x3E\x27\xE9\xA3\x38\x27\xF9\xF0\x9C\xE4\xD3\xE7\x24\x1F\x9E\x93\x7C\xEA\x9C\xE4\xC3\x73\x92\x0F\xCF\x49\x3E\x3C\x27\xF9\xF0\x9C\xE4\xC3\x73\x92\xCF\x9E\x93\x7C\x78\x4E\xF2\x99\x73\x92\x0F\xCF\x49\x3E\x75\x4E\xF2\xE1\x39\xC9\x17\x9E\x13\xCE\x58\xC0\xE7\x84\xB3\xB7\xC4\x84\x3D\x44\x61\xB7\x27\x53\xAE\x08\x36\x6D\x80\xF7\xCC\x06\x3C\xEE\x0D\x30\x46\x80\x7F\x33\xB9\xC8\x51\x4D\x07\x88\x24\xA9\x08\xDD\xE7\x91\x34\x58\x3D\x4A\x71\x9A\x10\x49\xE0\xAF\xDB\xA9\x04\xC1\x0B\x09\xEF\x14\x1E\xFA\xA6\x8E\x86\xA7\x25\xE5\xCD\x4A\x29\xFB\x13\x9F\x3E\xB2\x0F\xD1\x9E\xA5\x6E\xCF\x28\x31\x39\xF9\x09\xF8\x54\x04\x07\x45\x0A\xCA\xA5\x48\xB5\xD6\x65\xAB\x6E\xAB\x47\x02\xA4\x1C\x46\xB2\x79\xDC\x87\x9D\x98\xE6\x77\xA4\xAD\x1D\x09\x28\xA8\x23\xED\xEA\x65\x3B\xD2\x9E\xF7\x3F\x48\xDB\x7D\xD9\x6E\x84\x0C\xFD\xCF\x11\x1E\x5C\xB6\x1B\x61\x49\xBF\x1B\x21\xC8\x65\xBB\xB5\xE8\xD3\x9F\x28\x62\xCE\xCE\x7A\x3E\x34\xF5\xD1\x87\x76\xF4\x4D\x46\xB8\x7E\xBF\xEC\x72\xDD\xF0\x28\x2A\xEA\xCD\x9D\x52\xC4\xC5\xD4\x85\x59\x72\xB2\x30\x41\x49\xF2\x6E\x8F\x71\x04\xC3\x75\x12\xED\x47\x3C\xFB\x7B\x5C\x91\x6C\xC2\xE8\xBC\xD7\x3E\x17\xDB\xAC\xA2\x6C\x72\x74\x82\x23\x2B\x50\xF6\xDA\x1F\xA8\x23\xAA\xE4\xAC\xF8\x04\x2B\x50\xD3\x14\x84\x2F\x40\xE0\x24\xA1\xF6\x9B\xBA\x90\x65\xC3\x65\x11\x20\x68\x56\x0A\xCF\xFE\xC6\x27\x07\x26\x5E\x1F\x42\xFC\x14\x05\xD6\xA3\x04\xFC\xA5\xFE\xAD\xEC\x73\x7D\x13\x12\x7D\x3C\x50\x63\xC9\x96\xEF\xD9\x75\xF3\x77\x81\x37\x57\x33\xA4\xED\xD9\x66\x94\x26\x71\xC4\x8E\x5A\x29\x5B\xA2\x7D\xD2\xA9\x30\xD0\xBC\xB6\xA8\x87\xEA\xFC\xDB\x18\x72\x12\x44\xAE\x5C\x94\xCB\x89\x55\x52\x42\xB9\x01\x7C\x1E\xC0\xEF\x0F\x20\x8E\x73\x8C\x20\xFD\xFE\x7C\x82\x2E\xD7\x5D\xFC\xF1\x66\xBB\xF3\x39\xBA\x4C\x77\x71\xF2\x9B\xE9\xCD\xA7\xE9\x32\x9D\xC5\x6F\x70\xA6\x33\x9F\xA9\xCB\x74\xA6\x23\x35\xA7\x33\x9F\xAC\xCB\x74\xC6\x33\x35\xA7\x2F\x1D\xAD\xCB\x74\x7D\x68\xFE\x67\x1F\xDA\xC1\x57\xB3\xB9\x3D\xB3\xCB\x74\xE4\xC3\x85\xFD\xBB\xAE\xE6\x7F\xD6\x6A\x5D\x9D\x95\x44\xF1\x6A\x0B\xBC\xAD\xB3\xB5\x7F\x10\xEE\xD9\xC4\x01\x36\xAD\xB7\x47\xFF\x34\xD9\xED\x5E\x41\x76\x3B\x2E\xC5\xB0\x4F\xA9\xF5\x41\x0F\x69\x4F\x22\xD5\x1E\xFD\x2E\xEA\xF0\xF2\x5E\x87\x1B\x95\x62\xC7\x18\xBD\x05\x6A\xEB\x6C\x1D\xB4\x1D\x02\xFA\xC4\x3B\x7B\x3D\x6A\x2D\xE5\x6A\x68\x8A\xDC\xDF\xBA\x49\xFA\xB3\x63\x04\x9B\xB5\x06\xAF\xA9\x15\xA7\x48\x55\x33\x53\x46\xBA\x62\xDE\xA6\x38\xBD\x00\xB9\xD4\xDC\x87\xAF\x49\x41\x74\xD5\x74\x6B\x29\x34\x27\x06\x11\x3C\xCA\xFE\x9A\x64\x6F\xC0\xE3\xBA\xCC\xB9\x57\x15\xDE\x4B\xD3\x3D\xA9\x9E\xA5\x87\x93\xD7\x33\xE6\x4E\xAA\xF2\x0E\x8A\x8B\x63\x72\x49\xD8\x83\xDE\xF5\x5C\x1A\x8D\xF2\x8C\xD2\x06\x99\x0F\x2A\xA5\xD7\x59\x2F\x22\x6A\xA0\x12\xD4\xCD\x85\x67\x5F\xF1\x2B\xD3\xEE\xEF\x53\xB3\xF3\xDA\xD9\x79\xF3\x67\x17\xE0\xEC\x02\xA9\x80\x4B\x33\x2D\x3F\xCC\x92\x74\xF9\x8B\x9D\x82\x3B\x07\x4F\xE6\xC5\xCE\x29\x1B\xC1\x49\xA4\x51\x54\x4B\x1D\x7C\x9A\x90\xD5\xA7\xA5\xE4\x16\xAB\xC7\x4A\xF0\xCA\x8F\xB0\xD6\xDB\xBC\x76\x38\xFF\x6E\x8A\x5C\xE4\xC8\x4D\x6C\xB0\xDD\xC1\x21\xA7\x87\x2B\x11\x1F\xF6\xEB\x65\x4B\xD4\x91\x66\x29\x3E\x70\x0C\x88\x17\x50\xD5\xD3\xC1\xAC\x6D\x5B\x8D\x64\xFE\xCC\x71\xEB\xDF\xA2\x3C\x56\x05\xBE\xE6\xE7\x48\xBB\xF7\xFC\x42\x7E\xDF\xD3\xFE\xB6\xCA\xD2\x9E\xDD\xCD\x4F\x8E\x4F\xC6\xC6\xFA\x96\xFC\xA6\x5E\xDD\x3D\xE2\x5E\x6F\xE8\x7A\x85\x16\x05\x6A\xFD\xBA\xE9\x26\xE7\xBB\x26\xB1\x4D\xF0\xC9\x9B\x7A\x03\xA7\x1C\x4E\xF6\x36\xF7\xC8\x5C\xCF\xA6\x78\x0F\xD9\x20\xC5\x02\xDD\x9B\xBB\xFD\xA6\xE7\xAC\x21\xD4\xA7\x09\xB6\x0D\x3B\xF7\x53\xEE\x9A\xBD\x9E\x67\xF5\x5A\xFB\x67\xDE\xFD\xA9\xBA\x3F\x83\xEE\xCF\xD1\x9A\x79\x3E\x9F\x5E\x25\x89\x48\xEC\x88\xA5\xC0\x47\xFB\xF9\x43\xF4\xB0\xFF\xB9\xAB\x3D\x97\x51\x85\x32\xE8\xF4\xA2\xA7\xCD\x71\x59\xE2\x5E\xCF\xAB\x15\xA7\x60\xC3\xA1\xA6\xCF\x09\x35\x60\x1A\xA6\x48\x3A\x05\x6F\x26\x30\x0D\x0F\x09\x65\x59\xF0\xAE\xF3\xBC\xDA\xA7\x3A\x2E\xF6\xED\xFD\x09\xEF\xF5\xE4\x0A\xF7\x25\xD1\xB7\xFD\xC5\xC1\xF1\x69\x49\x40\xCD\x18\x5D\x07\x78\xC4\x7E\x67\xB8\xE6\xB2\x0A\x3A\x52\xE1\x1B\xAB\x28\xF7\x1A\xF2\x26\x3E\xA8\xA3\x93\xDA\x87\x80\xCB\x5F\xD1\xEA\x85\x55\x80\xE0\xE6\x42\x49\xB5\xEF\x9F\x3D\x47\x85\xAE\xE4\xD7\xBB\xCE\xB9\x0A\x54\xE6\x2F\x25\x93\xC5\x65\x00\x4E\x11\x6C\x2D\xD6\x4E\x43\xBF\xD6\x53\x68\xAC\x67\xD0\x58\x1F\x9F\x70\xB5\x91\x01\x26\xF3\x53\x35\x40\x66\x3D\x83\xCC\xC3\x56\xE7\xBB\x56\x03\x7C\x76\xAD\x86\x28\x4D\x4F\xD9\xC3\x56\xD9\xF4\x44\x63\x5F\xD2\xEC\xD7\x89\xD9\x4D\xF5\x62\x10\x58\x7F\xAA\xD6\x6C\x80\x4C\x8A\x19\xB7\xCF\x14\xFD\x2E\xDB\xDF\x5F\x53\x6B\xC3\x06\xD7\xD2\xEF\x6E\x90\x87\xD5\x1A\x70\xA7\x3F\xD2\x2A\x58\x67\x97\x49\xDF\xA6\x2F\x72\x01\x41\x5C\xD8\x70\x7B\x1C\x81\xD0\x7A\x47\xA8\xB2\xDA\x5C\x54\x21\x9B\x23\x12\xA0\x0E\x63\x02\xC6\x98\x68\x2E\xC6\x44\x1D\xC6\x04\xAE\x84\x56\x80\x84\xF3\xE8\xA4\x0E\x20\x3A\x2C\x6A\xBB\x1E\xC6\x04\x10\xDD\x4C\x15\x71\x03\x87\x31\x2E\x28\x2F\x68\x91\x26\x30\x5D\xE9\x72\xA9\x7A\x43\x8E\x4D\x09\x07\xA7\x05\xEC\xF7\x7B\x54\x8E\x94\x44\xA7\x87\x83\xC0\x27\x8E\x18\xE6\x28\x4A\x52\x4B\x7A\x47\x5D\x74\x88\xDF\xEB\x55\x79\x94\xAB\x84\xE2\x82\xD9\x66\x7A\xA6\x8B\xE2\x11\xB6\xDB\x5C\x54\x2A\x58\x48\x3F\xF4\x02\x74\xE6\x52\x70\x0E\x87\x6B\x25\x58\x8C\x28\xEB\xCA\x28\x39\x1B\x00\xFB\x74\x29\xA9\x97\x89\xB3\xCC\x6E\x2F\x38\x3E\x49\x71\xE0\x00\x45\x9C\x65\xB7\x1F\x92\xBA\x5D\x6D\x59\xE6\x74\x98\x53\x88\xAB\x5C\x7B\x7B\x74\x00\x7E\x63\xD7\x4F\xD9\x6B\xEF\x60\x92\xF5\xB9\x5E\x75\xEB\xCE\x2D\xC2\x61\x4B\xB0\x00\x5B\x02\x23\xA9\x0E\xA6\xD1\x64\xF0\x1A\xD6\x78\x1F\xCA\x0E\x65\xC2\x9B\x89\x17\xF3\x6E\xF0\xC6\x20\x7B\x14\xDA\x00\x7C\x3B\x6E\xCA\x77\x9E\xA3\xD8\xE4\x65\x4D\x09\xC5\xC6\x8E\xE0\x51\xC7\xFD\xBA\x34\x6E\xEC\xA9\x3B\x8E\xCE\xD6\x49\x08\xF0\x14\x88\xE8\xDE\xAA\xBC\x7F\xFF\xEB\x60\x75\xB4\x30\xA6\x11\x72\x9F\x83\x3A\xC9\x18\xDC\xAE\xF3\x63\x81\x92\x32\x4D\x4A\x6A\x79\x89\x85\x1E\x11\x2C\xC0\x0B\x68\xB5\x0E\xDB\x18\x1B\x7C\x43\xF9\x95\x24\xDA\x52\x7C\x02\x94\xD5\x20\x8D\x56\x26\xB5\xE2\xE3\xE5\xE8\x0B\x13\xA5\x3E\x15\x0E\xD8\xE3\x45\x48\x70\x30\x45\x82\x83\x19\x12\x1C\x1C\x9F\x50\xFC\xEE\x90\x04\xF3\x53\x3D\x20\xC1\xC1\x0C\x09\x1E\xB6\x3A\xDF\xB5\x1A\x90\x60\xD7\x6A\x48\x82\xF9\xA9\x32\x43\x98\x74\x42\x62\xC8\xE0\x08\xBB\xE0\xD3\x0E\x1C\x6A\x11\x38\x9E\x10\x58\x4C\x5F\x47\x6E\xA6\x3B\x81\x85\xDA\x11\x2C\xA6\xAF\x23\x7A\xCA\x6C\x71\x7A\x02\x02\xB9\x8E\xFA\x06\x24\xF3\x89\x50\x50\xA9\xCD\xCD\x66\x47\x22\xFB\xE7\x6B\x54\x97\x91\xD3\x01\x29\x7B\x0D\x2E\xB6\xFC\x95\xF6\x6A\x57\xA0\xCB\x4F\x9D\x73\xC7\x84\x48\x21\x97\x73\xF4\x85\x82\xDD\x4C\xCC\xAD\x0F\xC1\x6A\x1D\x94\xF7\x8B\xC2\xCB\x63\xAA\x14\x34\xFB\x75\x30\x57\xF2\x57\xF6\x2A\xFC\xD0\x28\x09\x53\xED\x7B\x81\x8A\xA8\xC2\x46\x70\x9C\x2A\x54\x83\x6F\xC3\xE3\x45\x84\xFF\x1C\x29\xC8\xB5\x67\x99\xB2\xB2\xAC\x55\x64\xB3\xF6\x0F\x15\x09\xCF\x66\xFD\x14\x3F\x0C\xE8\x61\xEC\xF8\x34\x8E\x28\xC7\x41\x38\x82\xDC\xC7\x3B\x30\xE0\x70\x50\xEF\x48\x3B\x5E\x76\x7B\x1D\x1C\x82\xC0\x86\x87\x4F\xAE\xB5\x77\x90\x96\x1A\x90\xCB\x44\xD6\xCB\xD7\x3B\x93\x00\xFE\xEA\x73\x28\xED\x36\xB1\xF7\x0E\x2E\x97\x4A\x13\x4B\x21\x6A\xBA\x2B\x82\x05\x77\x45\x30\xC5\xFA\x04\xD3\xAC\x4F\x38\x85\x6B\xE1\x0C\xAE\x85\xC7\x27\x94\x21\x71\x88\x6B\xFC\x74\x88\x6B\xE1\x0C\xAE\x0D\x5B\x9D\xEF\x5A\x0D\x70\xCD\xB5\x1A\xE2\x1A\x3D\x65\xA7\xB1\xF4\x04\x84\x88\x6B\x28\x78\x25\x5C\x78\xD6\x23\xE6\xD0\x2A\xC6\x83\xAB\x7B\x17\x79\x1F\x42\x03\xB4\x32\x42\xFE\x6A\xCF\x6E\x00\x53\xC0\x43\x5C\xE8\xDE\x5E\xA4\xDF\x66\x6C\x5A\x89\x87\x1F\xFC\x41\xB4\x88\xFE\xB7\x16\x4A\xC4\x50\xFC\xC6\x6A\x8D\xC2\x25\x27\x55\x73\x17\x84\x5A\x70\x41\x08\x53\xBE\xE8\x82\x90\xD7\x73\x2E\x08\x35\x7B\x41\xA8\xE9\x0B\x62\xD1\xF5\x80\xF7\x41\x92\x3B\x5F\x03\x0D\x5A\x7C\x0D\x34\x05\x03\x4A\x15\x5E\x61\x7C\x18\x85\x5B\x3E\xB9\x0E\x1A\xAA\x79\x0F\x21\x79\x2D\x0C\xD8\xA1\xDB\xD9\x1D\xDC\x2A\xF2\x3C\x98\x90\xB3\xA3\x3E\x0D\x01\xE7\xA4\xA0\x8C\x6D\x8A\x8D\x37\x54\x3E\x95\x3D\x1F\xBA\xCC\x14\xC8\x96\x88\x9B\x0C\xF9\x4D\x3D\x36\xB8\x0D\x04\x11\xC5\x6C\xA5\x9E\xCB\x56\xEA\x9E\xCE\x82\xFC\x1A\x40\x81\x82\xF0\xE8\xA4\x56\xA0\x67\x84\x10\x05\x9A\x85\x10\x45\x3A\x7E\xE6\x2A\xAF\x16\xCD\x00\x3F\x7A\x97\x14\xAF\x66\x3F\x0C\xE2\x1F\xE7\x1F\xC5\x70\xEA\x28\x12\x60\x25\xC5\x88\x0B\xB4\xA7\xD2\xBE\xFD\xE3\xA8\x66\x8E\xA3\x3A\x3E\xA1\x50\xD1\xE1\x71\xE4\xA7\xC3\xE3\xA8\x66\x8E\xE3\xB0\xD5\xF9\xAE\xD5\xE0\x38\xBA\x56\xC3\xE3\x48\x4F\xD9\xA7\x2A\x45\xBC\x61\xD2\xBF\x78\xBD\xC4\x4E\x33\x32\x69\x0A\x75\x97\x25\x22\x86\x11\x2A\xF6\x1D\x07\xCD\x6F\x8F\x54\x30\xEF\xA0\x39\xFF\x1D\x1A\x29\x69\x38\x61\x0A\x1B\x65\x6F\xEE\x38\xE5\xED\xA8\xDF\x0C\xC9\x63\x0D\x09\x0D\x4A\x57\x06\xFE\x7D\x0D\xEB\x4D\x98\xB2\xAC\x0A\xED\x78\x0A\x4E\xB1\x19\x9E\x62\x33\x3C\xC5\x66\xDE\x29\x7E\x39\x07\xB4\x26\x22\xF3\x73\xB2\x20\xA2\x59\xC4\xD5\x49\x3D\xEC\xEE\xF6\xDB\xA3\xAF\x1A\xA5\x5E\xA0\xB4\xE7\x79\x81\x47\xA2\x3C\x5D\xCB\x81\xF8\xB9\x3D\xE6\xD5\xA1\xC8\xB3\xC6\x9E\x1B\xDB\xF3\xB2\xF9\x90\x97\xCD\xA7\x79\xD9\x7C\x01\x2F\xAB\x1C\xD1\xE2\x9D\x21\xFE\xDC\x6A\x64\x23\x29\x7D\x5F\xD0\x5B\x0A\x97\xCD\x61\x5A\xFF\xCB\xEC\x1C\x89\x4C\x81\xDB\x4D\xD7\xFA\x71\xAF\x79\xD1\x62\x83\x76\xB1\x05\xB4\xA2\x23\x2E\xB6\x98\x5E\x6C\xB1\x83\xC5\xD2\x52\x7B\x2B\xF5\x07\x2B\xBD\xCC\x42\xFD\xA7\x68\xA1\xA3\xE1\x42\x47\xD3\x0B\x1D\xED\x78\xA1\x7E\xBB\xD0\xCE\x23\x63\xBB\x25\xB6\xAE\x84\x82\xF7\x17\xF7\x11\xE2\x2B\xD3\x3B\xCB\x01\x44\x6D\x0F\x22\x15\xC0\xE7\x3B\x5A\xAD\x03\xD0\xE5\x6F\x32\xAF\xA5\x5A\xEE\x71\xB5\x37\x1E\x9D\xA3\xCB\x93\x96\x19\xC2\xC2\x05\x46\xD4\x1E\x9D\xBC\xA0\x50\x2D\x23\x71\xD1\x93\xD9\xC9\xBB\x3A\x78\x41\xD1\x52\x1A\x3A\x3E\xF4\x9D\xC5\xB7\x86\x24\xF5\xE4\x84\x83\x33\x94\x8C\x66\xFF\x02\x5A\x89\x90\xB3\x7F\x84\x9C\xC7\xE5\x68\x96\x32\x53\xE0\x82\x40\x1C\x3D\x6F\x16\xBA\x1C\xAE\x0A\xC3\x1F\xCA\x42\x81\x8B\xB8\x13\x6B\x0F\xBA\xFC\x34\x21\x8A\xF0\x77\xCA\xF1\x77\x21\xF3\x77\xCA\x5E\x3C\x3F\xD8\xD4\x47\xA1\x99\x7D\x4A\x98\xB9\x6C\x08\xD2\x6C\x08\xD2\xEC\x31\x80\x74\x70\xB9\x7E\x31\x50\xE1\xAC\xE7\x5E\xA7\x72\xC2\x79\xBF\xFE\x53\x53\x11\xB6\xB4\xD8\x1F\xEF\x3D\x15\x8B\xD1\xE5\x2F\x5C\xE5\xC2\x6D\x99\xC5\x09\xDB\xE3\x21\xF0\x27\x81\x8E\xE2\xC4\xC8\x0E\xA2\x0E\x97\x0F\xB6\xBE\x26\x4F\x22\xA4\xF5\x0C\xF2\xEA\x01\xF2\xEA\x1D\x23\xAF\x2B\xD5\x12\x80\x2A\xFF\xEF\xBE\x18\x9A\x80\x5A\x45\x49\x3F\x21\x8D\x09\x25\x18\x4D\x5F\x54\x87\xF6\x25\x27\x48\x2F\x7A\x99\xC5\x41\x04\xC1\x11\xB9\x50\xE7\x29\x50\x43\x08\x1A\xE7\x89\x3D\x50\xA0\x86\x73\x39\xDD\x70\xA8\x40\xA5\x5A\x7A\x01\xA9\xDB\x03\x08\x67\x38\xDD\x00\x42\xE6\x74\x11\x2C\x53\x9C\xAE\x3C\xEA\x38\x5D\xB5\x5F\x53\x5E\xF8\x5E\x85\xF0\xCF\xFA\x03\x49\x29\x18\x6A\xCA\xC2\x05\x8B\x0E\x5D\x3A\xD3\xF9\x3B\x2A\xAF\xE7\xEC\x28\x91\xF4\x60\xB0\xA3\x91\x0D\x20\x98\xD2\x94\x05\x8B\x35\x65\xA1\xE8\x80\x45\xA1\xD3\x66\xB0\x14\xFD\x96\xDF\xE9\xB7\x74\x4F\xA1\xA3\x5B\x85\x8E\x66\x85\x4E\xD8\x29\xCF\x87\xEC\xBD\xCF\x99\x7E\x9F\x3E\x62\xB6\x6E\xC5\x6C\x12\xB2\x3B\xF2\xF1\xF3\x4A\xE9\xA7\x70\x6B\xC3\xD9\xAD\x0D\xA7\xB7\x76\xD1\xC6\x4A\x0D\xFB\x4F\x48\x58\x57\xBB\x84\x57\xA5\x3A\xEC\xC2\xF7\xF6\x0D\x04\x0C\x89\x3D\x12\x2D\x95\xBB\x87\xB4\x5C\x4C\xD6\xAB\x02\xE7\x03\x37\xE7\x8E\x56\x72\x47\xD3\xD5\xAC\x10\x91\x22\xB7\xE5\xF1\xA1\x82\xD4\xFA\x87\x38\x4A\xC3\x13\x26\x75\xFA\x03\x3C\x38\x4A\x38\xC4\x1D\x78\x8D\x35\x47\xE7\x37\x13\x75\xB3\x57\x79\xBD\xA4\x5F\xD3\x33\xEA\xE6\x3A\x3D\x21\x71\x36\xAD\xC2\x3C\x30\x6C\xE4\x50\x86\xCC\x03\xD1\xAC\x79\x40\x1C\x21\x66\xCC\x03\x6D\x46\xB2\x94\x33\x77\x38\x57\x72\x9C\x94\xB1\xEB\x55\xC0\x6B\x09\xC1\x6B\x58\x8F\x3C\x5C\x06\xA9\xCA\xF4\x7E\x0D\xE0\xDB\x7D\x37\x78\x25\xC1\x85\x12\x59\xA2\x28\xBD\x5F\x3F\x9B\x56\xC9\x17\xD2\xDF\x0F\xA4\x7B\xB1\xCA\x0C\x2E\x24\xB7\xD4\x39\x97\x91\x12\x16\x0D\x01\xE0\x0A\x88\xB8\x8A\x88\xFA\x34\xB4\x20\x51\x7C\x4F\x82\x3A\x5C\x28\xF0\xF7\xE8\x67\x23\x0D\xEE\x19\xE5\x49\xF3\x48\xD6\x7E\x1F\xD4\x73\x74\x72\xBD\x7F\x15\x4E\x76\xAF\x57\xF2\xB1\xC7\x6D\x1D\xAF\x9D\xBC\xC1\x2B\x5D\x8D\x24\xDE\x44\xEB\x41\xB8\x5A\xC7\xC7\xA8\xA4\x14\xF2\x2A\xB1\x5C\x72\x57\x23\x73\x7C\x9D\x57\xCE\xB3\x1D\xE0\xB0\xDB\xD8\x0E\xFA\xAF\xE9\xD8\xF8\x7B\x34\xCC\xE3\xCC\xBF\x85\xF3\xEC\x3E\x5B\x38\xF3\x6F\x69\x8F\xCD\xB3\x71\x8C\x6F\x29\x24\x84\x5C\x8E\x0D\x18\xF0\xE5\x8E\xC3\xC5\xED\xD1\xCF\x6E\xEF\x38\xBC\xD5\xC2\xA6\x0E\xF7\xEB\xAB\x5A\xE0\x40\xCC\x57\xDE\xD5\x94\xC9\x3A\x7D\x11\xA5\xE0\x7F\xC9\x09\x5A\xF0\x91\x66\xBF\xBE\xC6\x0C\x80\x36\x0F\x54\x74\xA6\x70\x0E\xD4\x64\x8F\xBE\x86\x91\x15\xBF\xD0\x22\x6B\x15\xE3\xEF\xAB\xD8\xD5\x26\xBD\x9D\x0C\x64\x21\x79\x35\x3A\x3E\x38\x6A\xEC\x9D\x0D\x2B\x92\x53\x50\x93\x9A\xFC\x21\xBE\xFA\x6E\xD1\x4A\xDD\x56\x87\x36\x03\xD5\x4B\x59\xC8\xD1\xA2\x67\xEB\x84\x12\x5A\x53\x49\xC6\xE4\x20\xDC\xB3\x69\xBD\xC3\x45\x8F\xFF\x1C\x9C\x37\xF2\x92\x88\xC5\x72\x40\x88\x31\x3D\x98\xF8\xCD\x24\x9B\xAC\xFE\x1D\x8C\x42\x07\x02\x11\x6C\x9F\xBB\x26\x03\xF3\xCA\x90\xEC\xAB\x5D\xD6\x56\xDF\xF9\xA7\x15\xAE\x0A\x88\x7F\xC0\xD1\x1C\x8A\xBF\x71\x89\xE5\x7C\x3A\x3D\x08\x1A\xDD\x26\xA9\x95\xC4\x72\x7E\x3E\x6D\xE1\xF6\x77\x6E\xD5\x9E\xBD\x73\xF4\x9C\x3B\x67\xBE\x55\x7B\xFA\xCE\x99\x6F\xD5\x9E\xBD\x73\xE8\xA9\x69\x93\x42\xD6\x91\x4B\x20\xEB\xF3\xD2\xBB\xD0\x3C\x49\x68\x17\x19\xD0\xAC\x91\xD7\x6D\x8E\x4C\xDD\x54\x3E\x92\x32\xDD\xCB\xA4\x49\xCF\x7C\x43\x4E\x63\x2E\xE3\x26\x3D\xC3\xBD\x19\xE4\xE6\xD4\xEC\x54\x2B\x99\x85\xFC\x41\x22\xC5\x1F\xEA\xA5\x1D\xD4\x83\xE4\x9F\xDD\x37\xFB\x89\x42\xBB\xAF\x0E\xD3\x8A\x76\x5F\x1E\xA6\x21\x95\x6F\x77\x1C\x47\xD0\xB1\x1A\xEC\x6C\xA1\xD8\x81\xD7\x2F\xBC\x16\x57\xEE\x74\xF9\x06\xA7\xD1\x42\x2F\x44\x0B\x03\x41\x2F\xED\x1B\x1D\xB9\x23\x6C\xB3\x36\x77\x92\x07\x17\x95\x63\xFA\x35\xE2\x60\x35\xBB\x5A\x3A\x87\x6F\x0F\xE7\xA4\xE9\x72\x16\x43\x07\x25\xCD\x26\x5B\x37\x55\x9C\x02\x0D\xDE\x64\xEC\x4A\xA9\x71\x6E\x2D\x89\xA4\xF3\xD8\x71\x4B\x41\x20\x07\xC1\x0D\x6D\xC0\x37\x6F\xD2\xE2\x70\xC2\xAE\x56\x44\xA8\xFD\xC7\x6C\x50\x35\x8F\x99\x1B\xEE\x3B\x13\x84\x73\x9D\x09\xC2\x1D\x39\x13\x74\x09\x3A\xF0\x7A\x60\xAD\x06\x51\xBF\x60\xE0\x5C\xD1\x52\x3F\x27\x7E\xC8\xFF\xE4\xDD\x65\x63\xAC\x32\xEF\xF3\x55\xB0\x3E\x60\x48\xA6\xEF\x41\x3D\x2B\x98\x5D\xFE\x1E\x64\x0A\xD3\x5E\x7F\xAA\x27\xDE\x3A\x55\x76\xF9\x1B\x6C\x87\xB3\x1C\x73\x05\xBA\xA9\x83\x55\xCA\x9F\x90\xBE\x88\xD2\x50\x53\x88\xEA\x65\x76\x09\x62\x08\x8F\x90\xAF\xC8\xDC\xCD\x82\x08\x42\xD2\x64\xF4\x98\xBD\x1D\xBA\x80\x84\x5C\xB0\x84\xA4\xAE\xA3\x93\x3A\x9C\xEB\x00\x12\x3A\x07\x90\x10\x7C\xD9\xB3\x90\xF7\x0C\x1F\x74\x5E\x43\x6E\xCF\xDA\x8D\x20\xB6\x25\x07\x5D\xBE\x59\xE2\xE1\x82\xD6\x37\x44\x76\x52\x60\x22\xF6\xA5\x39\xDB\x6A\x95\xB9\x10\x92\x2F\xD2\x3C\xE7\xE3\x81\xF1\x51\xD9\xE0\x78\xA5\x73\xCA\x5C\x4C\xC6\x47\x35\x63\x7C\xE4\xEC\xC5\x87\x8A\xA8\x6F\x7C\xE4\x5C\xC2\x87\x8A\xB0\x3D\x93\x92\x49\x38\x3C\xEE\xF2\x07\x7B\x47\x5C\xB6\xE6\xA1\xF1\x51\x1F\x02\xCD\xC6\x47\x4D\x55\xA8\x94\x99\x36\x3C\x56\x4A\x9C\x4B\x44\x80\x7F\xEC\xA6\x18\xCE\x56\x1F\xCC\xB1\xC8\x5C\xF6\x64\x2A\x27\xA7\x2A\xD0\xC7\x26\xB5\x9E\x7B\x32\xB5\x3B\x99\xBA\xDD\x65\xCD\xBB\xAC\xDB\x5D\xE6\x55\xEA\xDE\x2A\x49\x16\x53\xAD\x05\x95\x2A\x57\xA3\x08\xC7\x67\x6A\xB1\xA3\x4D\xFF\x56\xD5\xD3\xC6\xD3\xA7\x81\xB5\x46\xF7\xAD\x35\xC6\x7C\x97\x92\x82\x38\xAD\x69\xDE\x49\x27\xA3\x35\xB6\x49\x93\x9D\xDE\x3D\xCC\x39\x14\x6C\xC0\xD6\x30\x35\x67\x9A\x57\x7E\x96\x0C\xAB\x3F\x1B\x70\x0D\x20\xAE\xE0\xC6\x15\x5C\x70\x7C\x6D\xF5\x1A\x53\x77\x56\x5E\x6B\xA9\x60\xC9\x76\x57\xAB\xD6\xC8\x86\xB0\x3A\x36\x96\x6A\x9F\x9D\xB6\x1A\x7F\xF4\x1A\xE0\x13\xBB\xDE\x8C\x5B\x3B\xB7\xBF\x60\xAB\x58\xB2\x18\xEC\x0E\xA3\xF2\x82\x4E\xAA\xDF\x49\xF5\xD5\x54\xC4\x7A\x69\xAE\x3F\xDB\x5B\x4B\x6F\x2E\x7C\x00\xB4\x34\x50\x6E\x9E\x3C\x73\x5C\x55\x1D\x38\x81\xAF\x8E\x0E\xB3\x08\xB0\xEA\x96\xD0\x9A\xB2\xEA\x78\x10\x5C\x58\xB7\x25\x78\xD9\xB9\x4D\x14\x92\x62\x40\xE3\xC7\xAB\x75\x20\x8E\x5D\x10\x52\xCA\x4D\x05\x41\xF9\x5B\xB8\x03\xAB\x9C\x3D\x56\x93\x39\xD3\xB7\x1E\x04\xAB\xFC\x82\x13\xF6\x8A\xD2\x93\x5C\xBE\x14\x57\x52\x90\xB2\x7B\x2E\x4F\x35\x33\x13\x34\x48\xC8\x05\xE3\x98\x3A\x36\x5D\x95\x9C\x40\x4A\x77\x10\x6B\x79\x18\x59\x8C\x75\xDC\x36\xD0\x2B\x88\x6A\x56\x09\x55\x22\x50\x80\x3A\x81\x57\x49\x0F\x38\xB5\xB7\x8A\x62\x90\x5D\x6F\x88\x81\x39\x3A\x31\xAF\xCA\xA8\x0A\x42\xBB\xDB\x5C\x4B\x84\x8A\x1F\xBD\xE5\xFE\x19\x0F\x6D\x2A\x87\x10\x4A\x39\x84\xA8\x10\x4F\x7C\x3D\x5D\x10\x21\xEC\x15\x44\xA0\x2A\x49\xE4\xC1\x76\xA8\xE0\xC2\x08\x95\xCF\xC9\xB8\x95\x2B\x8F\x20\xE1\x59\x0A\xFC\x76\x41\x0A\xFC\x95\xC9\xD8\x70\x88\x5B\x2F\xDB\x79\xEF\x4B\x82\x25\xBA\x2B\x8D\x10\xAD\xD6\x09\xA7\x42\x7F\xFF\xFD\x83\xD2\x08\x89\xE3\x6C\xBB\x2A\x96\xC9\x4C\x69\x84\x54\x4A\x23\x44\x90\xF6\x4B\x23\xA4\xAE\x34\x02\x3F\x6F\x4B\x23\xA4\xAE\x34\x02\x3F\xEF\x4A\x23\xA4\x6D\x69\x04\x7E\xD3\x2B\x8D\x90\xCC\x29\x8D\x90\xB6\xA5\x11\x7A\x1F\xE8\xA7\xD3\x1E\x7E\x62\x98\x52\xBB\xF7\x91\xB6\x34\x42\xDA\x95\x46\xA0\x77\x46\x6A\x4C\xB8\xA3\xC4\x23\x49\xD2\x7F\xCE\xD1\x19\x1C\x2E\x3C\x02\x7B\x0A\x01\x81\xBD\x5F\x34\xC1\xB7\x67\x9B\x5A\x73\xE5\x84\xA8\xCD\xBA\x0F\x49\xAF\x76\x42\x26\xA5\xA2\xFA\xD5\x04\x14\xAD\x96\x32\x5F\x11\x63\x9C\xF6\x37\x65\x70\x2F\x27\x1D\x9F\x9F\x0E\xF8\xFC\x54\x32\xF7\xA7\x03\x3E\x3F\x95\xDC\xFD\xE9\x14\x9F\x9F\x4A\xFE\xFE\x74\x8A\xCF\x4F\xA5\x76\x42\x3A\xC3\xE7\xA7\x54\x3B\x41\x09\x0C\x20\x82\xF8\x44\x53\x47\x47\x8A\x96\x87\x97\x33\xA7\xAB\xA8\x2D\xB7\x95\xFB\xAE\x84\xC2\xA2\x45\x87\x2B\x54\x8B\xFA\xA5\x4D\x95\xB7\x6B\xA7\x75\x9A\x41\xDE\xEC\xBC\xCB\x9B\x3D\x7F\x66\x79\xB8\x60\x2D\xC1\x82\xB5\xFB\x73\x21\xA5\xE7\x42\x75\x90\x37\x3B\xE9\xF2\x66\xA7\x03\xF1\xAC\xFB\x5C\x27\xC8\x75\x9F\xEA\x04\xBE\xEE\x43\x4E\x2C\x4C\x7B\xA5\x13\x52\xD1\x01\x4D\x41\x59\x6D\x07\x65\x6D\x8C\x20\x64\x44\x08\xE9\xB5\xF5\x51\xA8\x6E\x51\x7C\x82\x46\x90\x82\xB3\x91\xD8\xE4\x98\xFE\xB0\x36\x8D\x05\x5A\xA6\xA8\x63\xC3\x74\x47\xB7\xF5\x58\x58\x36\x1B\x3B\xE2\x19\xAD\x4C\xCC\xC3\xD1\xC0\x3C\xD3\xE9\x57\x1F\x93\x14\xB0\x58\x49\xE9\x98\xD1\x36\x93\xC7\x50\x3D\x48\x59\x60\xD6\xFB\xD2\xC0\xA4\x3B\x73\xA4\xD6\x8A\x41\xB3\x46\x2B\x84\x18\xB4\x33\xDB\xE8\x7F\x94\x9A\xE0\x05\x66\x1B\x0F\x67\xBE\x5A\xC7\xC8\xC8\xC7\x28\xDA\x24\x90\xB0\xFA\x2A\x5E\x3D\x42\x39\x94\x5A\x36\x06\x22\x5A\xA9\x16\x13\x0F\x71\x13\x0B\x96\xA9\xB6\x5D\xA6\x19\x0A\x3C\x6A\xAE\xC0\xA3\x66\x04\x1E\x14\xA6\x10\xD2\x4E\xDC\x51\x0E\x23\x06\xC2\x8E\xEA\x84\x9D\xAB\x49\x06\xE8\x64\x9D\xAB\xC5\x3F\xF0\x31\xED\x8D\x79\xCC\x52\xDA\xD4\xB4\x1F\x8F\x94\xB6\xCE\x07\x7E\x28\x31\x77\x3B\x42\xDB\x49\xA6\xA8\xCB\x09\xDD\xFD\x3C\x26\xA0\xCD\xEB\x97\xD5\xAE\xDE\x89\x0B\x67\x8C\x01\x75\xC4\x9C\x31\xD5\x3C\x8E\x91\x51\x8D\xF0\x7F\x62\x3B\x5A\x3B\x79\x52\x14\xE9\x05\xE5\xEF\x11\x4F\xCE\x27\xC9\x70\xA9\x6F\xA6\xB3\xDA\x3F\x01\xDA\x06\x10\x4E\x99\xB9\xC2\xC5\x66\x2E\x35\xEB\x10\xAE\x7A\x0E\xE1\x5C\x4D\xB3\x53\x7B\x02\x62\x94\x26\x4B\xE6\x1E\x9D\x1C\xAB\x93\x49\x9D\x22\xF5\x68\x85\xFF\x68\xC1\x12\x23\x4E\x7A\x76\x19\x44\x8A\x18\x91\xB2\xB9\x88\x94\x75\x88\x14\x31\x22\xE1\x09\x44\x44\x8A\x20\x9B\x83\x48\x11\x64\x8C\x48\x11\x04\x82\x48\x11\x23\x12\x3E\x60\x44\x8A\xA8\x22\x02\xDE\xC1\x48\xEB\x0C\x67\xB5\x55\xA0\x21\x99\xD4\x39\x39\xE0\xB5\x05\x86\xF0\x66\x24\x41\x91\x79\xE1\x42\x2A\x5B\x35\xD5\x88\x13\x5A\x8F\xF1\x4A\x91\x5A\x63\x11\xD7\xAF\x89\x6D\x76\x62\xB5\xCE\x28\x12\x3A\xBB\x0D\x59\xC0\x13\xAB\x75\x79\x12\x71\x47\xD3\x80\x19\x94\x6B\x34\xDA\x12\x5D\x49\x7B\x34\x31\x94\xD5\x92\x54\xBA\xC1\x9B\x9C\xFE\x2D\x21\x46\x31\x3D\x46\xDE\xE6\xF6\x3A\xE9\x8D\x10\x43\xC2\x23\xEC\x92\x11\x48\xAE\xAC\x76\x49\x19\x1C\xCA\xEB\x83\xFF\x26\x90\x61\xD7\x0C\x4A\xFC\x67\xEE\x1C\x76\xCB\x08\x24\xAB\x56\xBB\x5D\x85\x1C\x9A\x04\xFD\xB1\x83\x59\x2C\xCB\x18\x24\xC7\x56\xCB\xAE\xD0\x0C\x4D\x83\xFE\xD8\xC1\x3C\xAE\x90\x31\x48\xA8\xAE\xAE\x70\x85\x61\x68\x1E\xF4\xC7\x0E\xE6\x71\xA5\x8C\x41\x92\x72\x75\x65\x5B\xC5\x86\x26\xC2\x7F\xED\x60\x26\xA5\x8C\x42\x12\x78\x55\xB6\x65\x6F\x28\x91\x17\xFF\xA5\x06\x73\xC9\x06\x73\xC9\xD6\x0A\x57\xA2\xAE\x95\xC7\x39\xE3\x5A\x76\x3B\xCA\x75\xF8\x8F\xE2\x1E\x10\x43\x09\x57\xC2\x15\xB0\x0C\xBB\x61\x17\x2C\x35\xFC\x9F\x5C\x38\x11\xA8\xB5\xC2\x15\xA5\x6D\x85\xFD\xCA\x69\x69\x28\xAE\x3E\x6E\x5C\x99\x43\x46\xE2\xB1\xFC\xD5\xA6\x5B\x2F\xAC\x6A\xEA\x82\x71\x78\xBD\x39\xCA\x61\xFD\x83\xB0\xB7\x1D\x69\x2B\x66\xA3\xDC\xD4\x9C\x28\xB7\xF9\xDA\x8A\xE9\x28\xB7\xF9\xDA\x8A\xD9\x28\x37\xC5\x51\x6E\x9A\xE5\xC5\xE3\x93\x7A\x09\xD9\xDC\x5D\xD6\xAB\x32\x16\xC1\xAA\x18\x96\x88\xA1\x97\x83\x47\xF1\xD9\x08\x62\x3E\x78\x25\xFD\x64\x50\xAB\x59\x88\x52\xCA\x4D\x84\xA6\xD4\x40\x56\x90\x51\x21\xEF\x2A\x82\x92\x18\x67\x57\x7D\x99\x3C\x01\x4A\x29\x9E\x92\xB1\xC3\xD0\x2E\x1B\x1F\x2F\x3C\x58\xB2\x2F\xE1\xA4\x67\x9A\x4D\x10\x11\xE5\xFA\x50\xC7\x0B\x29\xAD\x4E\x65\xA6\x63\xD0\x95\xA2\xA2\x35\xF1\x21\x49\xAB\x50\x70\x46\x0F\x24\xA2\x75\xF2\x82\x82\x2B\xB3\x80\x69\xDA\x52\xD9\xEC\xB0\xC4\xC4\xE4\x76\x62\xED\xE3\x6E\xE8\xCC\x7D\xE1\x21\xEF\x36\x9B\xDD\xEE\x7E\x7D\x29\xEE\xFF\xC2\xB3\xDB\xFB\x89\xC7\xB0\xF7\x13\x4F\x54\xEF\x27\x9D\x8D\xFE\x6F\xC4\xF2\xF6\x77\x5B\xE5\x99\x26\x9D\x1C\xE5\x34\x61\x12\xF7\x42\x17\x80\xBD\x7D\xAD\xCE\x28\x05\x85\xE8\x14\xC4\xED\x3B\x66\x43\x45\xB9\xBD\xDB\x77\xE4\xDC\xBE\xA3\xCE\xED\x3B\x76\xD7\x38\xD7\x52\x9A\x7B\xBB\x64\x94\xA1\xE2\x09\x75\xFC\x8E\xC9\xF1\x3B\xBE\xBC\xE3\x77\x30\xE5\xF5\x1D\x38\x47\x98\xC7\x1D\x76\xFA\x34\x8C\xF3\xF1\xCB\xFF\x57\xD2\xC4\x3D\x05\x3C\xCF\x68\xC8\xF3\x8C\xA6\x79\x9E\xED\x5C\x4C\x77\xCA\xF3\xA0\x20\xD7\x79\xA1\x87\x03\x2F\x74\x3D\x08\xF7\x70\xF9\xAD\x3C\xE1\x8E\xC4\xAD\x4C\x75\x46\x70\x50\x54\x1F\xA4\x8D\xA6\x08\x5C\x34\x45\x38\x8C\xA6\x20\x55\x63\xCD\xC5\x70\xFB\x56\xE5\x39\xD1\x14\xAC\xAF\x83\x90\x05\xCC\x39\xC7\x8A\xC4\xCB\xD0\x89\x97\x61\x77\xAC\xFC\x27\x36\x9A\xE2\xB2\x61\xDD\xAC\x3B\xA6\x43\xE5\xD3\xA1\x9A\x0D\xE9\x56\x2E\xA4\x5B\x51\xD6\xC0\xA9\x73\x45\x8F\x3A\x1F\x33\x9F\x29\xC2\x62\x85\xEF\x94\xC5\xFB\x69\xA8\x9B\xF7\x07\xBA\xF9\xA9\x10\xE0\x47\xA1\xE9\x2E\xBA\xC8\xEF\x35\xCB\x3E\xC1\xE5\x03\x12\xF1\xD4\x21\xF7\x9B\x9F\x11\x7A\x9E\x11\x7A\x9E\x28\xA1\xC7\x71\x5E\x19\xFE\x38\x59\x27\x1D\x5B\x9C\x0C\x38\xEB\x8D\x8E\xB3\x46\x39\x27\x11\x79\x67\x89\xB8\x6A\x62\xD0\x4F\xD6\x71\x8F\xA7\xDE\x89\xA8\x93\x11\x2F\x0E\x4B\xD4\x79\xE6\xCB\x8B\xA4\x9C\xC4\x49\x39\xDB\x7F\xFB\xB2\x02\xCE\xF6\x5F\x5F\x24\xDB\x24\x4E\xB6\xD9\xFE\xEB\x3B\x10\x6B\xB6\xFF\x7E\x32\x90\x68\x92\xC5\x12\xCD\xA2\x19\xCC\x15\x66\x48\x94\x39\x59\x2B\x27\xCA\x24\x3D\x51\xA6\x74\xA2\x8C\x64\x97\xBC\x9C\x1C\x13\xFD\x53\x92\x63\x28\x0A\xAE\x1C\xC8\x31\xF9\x8C\x1C\xE3\xCE\xD2\x12\x9F\x25\xD5\x49\x31\x6A\x27\x22\xCC\x52\x5F\x84\xD1\x07\xA0\x64\x81\x25\x27\x81\x05\xF2\x8E\xBD\x60\x83\x18\x8B\x2F\xAC\xB9\xD0\xCC\xF5\x47\x14\x43\x71\x82\x87\x74\xD2\xCC\x2D\x54\xB4\x90\x4C\x39\x28\xD4\xC4\x95\xE6\x8A\x41\xCC\xC2\x2C\x91\x1C\x53\xEB\x56\x4C\x2A\x24\x13\x31\x31\xC4\x28\xE7\x2C\xD1\x7E\xD6\xA4\x24\x37\x64\x52\x1C\x48\x3B\x88\x7E\x78\x2A\x28\x63\xB1\x48\x3B\xC9\x53\x2C\xED\x44\xA0\x7A\x65\x76\x05\xFD\xE2\xFE\x42\xB6\x13\x80\x66\x64\x9F\xA5\x67\x64\x9F\x67\x64\x9F\xA7\xAD\xEC\xF3\xEA\x52\x25\x8E\x3D\x7C\x40\xAF\xD6\x91\x68\xCB\x23\xAA\x9B\xBA\x91\x5C\xAF\xBF\x15\x22\x1C\xF2\x3A\x88\xEC\xBE\x1B\xBC\x3D\x44\xC3\x2E\x7A\xCD\x7E\xFD\xED\xC8\x28\xEE\xD7\xCF\x86\xC8\x59\x5D\x96\xA5\x5B\x79\xBD\xCE\xF1\x02\x56\x37\x78\x09\x31\x8E\x15\x7B\x3D\x20\x6D\x81\xC8\x3E\xBB\xD9\xAF\xAF\xE9\x6A\x3E\x43\xC2\x39\x28\x95\xCB\x93\x8C\x7C\x59\x0C\xC9\xB1\x3A\x9D\xD4\x31\x69\x50\x66\xD1\x0D\x9F\xE3\x45\x87\xFC\x4D\x3A\x41\x2A\x85\xA8\x16\xF7\xC3\xF2\xC8\x75\x71\x8F\x0E\x6A\x9A\x03\x45\x11\x63\x83\xA0\xCE\x2C\xF5\x4F\x6A\x36\xD3\x21\xB9\x3E\xD5\x80\x81\xFC\xD8\x84\x42\x8D\x0D\x12\x32\xB7\x28\xBF\x84\x08\x0C\x42\x36\x82\x68\xAF\x97\x90\x8D\x41\xB7\x36\x86\x84\x3F\x32\x77\x8E\x5A\xE6\x18\xED\xD1\xD7\xF0\xFC\x74\x37\x3F\xEB\x1D\xA1\xA4\xFB\x08\x96\x8C\x8B\xD8\x10\x49\xCE\xB9\x02\x75\x64\xF7\x34\x10\x43\x06\xC9\x2A\x84\x1C\xB8\xD8\x79\x67\xE9\x05\x08\x28\x29\x82\x16\x21\xA0\x76\x39\x39\x67\x10\x30\xBE\x99\xB2\x88\x7A\x37\x78\xF7\x6B\x87\x81\xB1\x0D\x20\xB2\xF7\xEB\x01\x0A\x46\x7B\xBD\xFB\xF5\x02\x1C\xD4\x73\x52\xEC\xF4\x70\x90\xD3\x08\x48\x52\x39\x2E\x0C\xBE\x47\x48\xA1\x16\x2B\x0E\x32\x19\x31\xD9\x7A\xF6\xE8\xFC\x68\x5D\x4C\xEA\x04\x01\x92\xB9\x75\x9B\x05\xEB\x26\x95\x9B\xB9\x1C\xAD\x34\x4C\x2B\xF3\xB9\xB4\x32\xEF\x68\xA5\xE4\x45\x35\x60\x20\x3B\x3A\xA9\x0D\xE4\x73\x18\x72\x03\x39\x33\xE4\x06\x42\x61\xC8\x0D\x93\x4B\x7C\xC0\x0C\xB9\x31\x92\x57\x35\x06\x0D\x05\xD5\xE6\x75\xC6\x3C\xB5\x46\xF9\x25\x5D\x26\x90\x04\xFF\x27\x97\xBF\x72\x62\x82\xCA\xD7\x52\x0E\x6F\xCA\x38\x9E\x38\x17\x3D\x82\x42\xBC\x00\x0A\x31\xD1\xF8\xC5\xBE\x79\x59\x1F\x18\x31\x03\x23\x99\x0B\x8C\xA4\x03\x06\x0F\x0A\x19\xAE\xE1\xD8\xA4\x8E\x21\x99\x03\x8C\x18\x12\x06\x46\xDC\x02\x23\x76\x66\x57\x07\x8C\x98\x2A\x90\x04\x8E\x27\x24\x5D\xAB\x21\x6A\xE4\x21\x33\xD1\xCF\x92\x03\x14\x1A\x4C\x2E\x14\x50\x70\x8D\xF4\x84\xF2\x1B\x13\x60\x8E\x50\x8A\x63\xAA\x6D\x90\x8A\x53\x93\xA6\xB2\x1D\x94\x14\x9E\x4A\xEF\xE7\xCC\x76\x22\xD3\x2C\x7F\x19\x77\x0B\x1B\xAA\xDC\x08\x19\x15\x89\x3D\x8C\x37\x72\x02\x39\xEE\xF4\x4A\xAD\xF1\x4A\x2F\xA8\x2A\x3B\xE4\x90\x80\x16\xF4\x4B\x21\xAE\x72\x33\xBD\x51\x3C\x99\xF2\x27\xC8\x75\x34\xE1\xC4\x5D\xC4\xFF\x1D\x9D\x50\x15\xE4\xDE\xC5\x87\x12\x25\x39\x25\x11\x73\x40\xCE\x31\x9A\x39\xBB\x04\x62\x7A\x0F\x09\x31\x07\xBC\x4D\xE6\x31\x1F\xF3\x01\xBA\x6B\xDE\xE1\x78\xEE\x0E\xC7\xDD\x0E\x6B\x46\x77\x0D\x9A\xD0\x5D\x43\x3C\xC3\x1A\x68\x88\x99\x35\xD0\x6E\x7F\xAF\x76\xAE\x97\x61\x4F\x77\x93\x31\x47\x53\x70\xB4\xBA\xE0\x39\x25\x10\x9B\xDA\x5A\xE2\x6C\xE6\x73\x10\x99\x81\x6C\x91\x9E\xA3\xD6\x92\xD3\x0E\xC9\xF6\x55\x96\xB3\xF8\x22\xD0\xC8\xD6\xBB\xED\xA0\x43\xB6\x64\xFA\x0B\x4F\x87\x4C\x80\x19\xB2\x25\xBA\xA7\x35\x8A\xF6\x7A\x7B\x6A\xCD\x89\x20\xA3\x3D\xFA\xDB\xED\x74\x22\x48\x6C\xE0\x70\x85\x12\x41\x22\xC1\x9D\x6E\x85\x64\xF8\x01\xED\xB8\x81\x8F\xF8\x83\x98\xBF\x9E\xB2\xA8\x53\x15\xB9\xAA\x4D\xB9\x38\xCC\xE3\x15\x8A\xF7\xD7\x9A\x0D\x4F\x9F\x64\x4F\x5B\xE4\x18\x7F\xC3\x25\xE8\x19\xE6\x2B\x04\x8E\x52\x01\xBF\x9F\x07\x8A\xFD\xC3\xBA\x5E\x14\x85\x44\xCC\x33\xBB\xAE\x70\xAE\xBD\x08\xE8\xFC\x70\xB4\x5F\xC0\x77\xE2\xD5\x3D\x06\x4D\x1B\xEB\x41\xB4\xCA\xDE\x2A\xBD\xE1\x38\xE0\x5F\x5A\x01\xC7\x31\x48\xFE\xA0\x27\x93\xA9\xF3\x67\xF5\x59\xFE\x93\xA7\xCF\x1A\x30\x75\x6F\x5E\x56\xBB\x9F\xD1\xF9\x3D\xA3\xF3\x7B\xBC\x3A\x3F\x12\xD2\xEB\x64\x68\x6F\x5D\x62\xD3\xB6\x54\x2F\x5B\x22\x86\xA0\xD3\xF9\xC5\xB3\x3A\x3F\xB2\xC7\xEF\x1A\xD8\xE3\x2D\x17\xA8\x2C\x61\x97\x74\x66\x9D\x5F\xE6\x74\x7E\xA5\xE8\xFC\x76\xB1\xFD\x7D\x37\xE9\xBD\x7A\x5F\x4E\xE8\xCB\xBB\xA5\xF3\x3D\x9D\xD6\x4B\x54\x7D\xA2\xF3\xDB\xCD\x1F\x5D\x86\x5D\x34\x85\xB6\x7B\xA9\x0F\xC0\x2E\x58\x96\xEE\xAF\xEE\x94\x95\xA4\xEA\xDB\xE5\x74\x7E\xCB\xFC\xD9\x2B\x60\x37\x4D\xA2\xED\xBE\xA4\x0F\xC0\x6E\xB8\x42\xBA\xBF\xA6\xF3\xED\x20\x55\xDF\x6E\xA7\xF3\xBB\x82\x3F\x7B\x25\x2C\xD3\x24\xDA\xEE\xBB\xF4\x01\x58\x86\x2B\xA5\xFB\xEB\x3A\x6D\x27\x6B\xFA\x96\x5B\x9D\xDF\x95\xFC\xE1\x67\xC1\x15\x34\x8D\x76\x80\xDD\xFA\x00\x5C\x01\xCF\x92\x01\xDE\xD0\x69\x3C\xA7\x75\x7E\x57\xC8\xA7\xE1\x4A\x9A\x48\x3B\x80\xD2\x07\xE0\xCA\x76\xFD\x6F\x72\xDA\x3A\x71\x60\xB8\xA2\x37\x5D\x50\xEC\xB8\x00\x25\x24\xC8\x04\xB6\x0E\x0C\x32\xCA\x32\xF9\x7A\x3D\xE3\xBB\x30\xA9\x77\x21\x5B\xB7\x1B\x4F\x85\xE8\xFC\x32\xD8\x75\x59\xDF\x05\xC6\x68\x25\xC0\x5C\xE2\x10\x80\x4C\x74\x7E\x19\x02\x89\x3C\x03\x1A\x72\x31\x98\x72\x5B\x40\x24\x64\x9D\xDF\x2E\xD6\xF9\xED\x9A\xAF\xF3\xCB\xE6\xE9\xFC\x62\x2E\x0D\x36\x57\xE7\x97\x0D\x75\x7E\x65\xAB\xF3\x8B\x17\xE8\xFC\xCA\x7F\x72\x3A\xBF\x67\xFC\x1D\x9E\xD1\xF9\x3D\x7D\x75\x7E\x77\xB9\xC2\xF5\x1C\x9E\x95\x4B\xE2\x75\xF1\x4B\x18\x32\xF2\xFE\x36\x8C\x3C\x17\xB3\x1C\x26\x74\xE5\x00\xFA\x2E\x2F\x1B\xF8\xE5\xEF\xF6\x53\xBD\x82\x5F\x3E\x44\x36\xEA\xBF\x79\x52\x27\x21\x19\x25\x0A\x6F\x07\xD9\x22\x66\x82\x63\x69\xD0\xAB\xAC\x7F\xB8\x18\x7C\x5E\x12\x87\x75\xA9\xD5\xFB\x09\x70\xC7\x66\xD8\xB4\xB3\xCD\x9F\xBA\x81\x13\x9B\x75\xB9\x72\xFF\xFB\xD7\xF3\xD2\xAF\x1E\xAE\xF5\xB7\xA5\xD2\x00\xAD\x35\x64\x81\x23\x62\x03\x24\x0B\x92\x31\xAF\x5F\x62\xF0\x93\x9E\x50\x39\xC8\xCA\x10\xCD\x7B\x85\xD7\x15\xF9\xD8\x80\x86\xD0\x06\x6B\xE2\x60\xF1\x68\xC5\x4F\x0E\xF7\x00\xCD\x42\x50\x50\xFE\xFE\x00\x61\xE9\xA1\x0F\x41\xF9\x3B\x12\x25\x8C\x7F\xF3\xE2\xDE\x16\x2E\x16\x97\xDB\xA8\x47\x45\x01\x81\x14\x6A\xC6\x7A\x63\xF7\xE2\xC9\x16\x3B\x47\x43\xB1\xF3\x49\xA1\x2B\x9C\x22\xE3\x69\x24\x40\x47\x83\x8C\x92\x62\x6F\xE0\x5D\x29\x3C\xFB\x91\x41\x04\xE2\x93\x4E\xF9\x8B\x21\xE5\x2F\xA6\xD7\xB5\x5D\xD6\xC8\x79\x3B\x04\xBE\xAC\x6B\x40\xEE\xBF\xF0\xF4\x3A\x82\x14\xF5\x3E\xE7\x14\xF2\xF3\xF9\x07\x71\xD9\xB3\x4E\x31\x84\x0F\xFF\x90\x1E\xFE\xE6\x37\xAA\xBC\x35\x6D\x05\xF3\x4C\x5B\xEF\xD2\x6C\xDB\xFA\x69\xCD\xC6\xAD\xFB\x34\xF8\xA4\x49\x27\x20\x89\xB5\xEA\xA2\x6E\xF6\xEB\xF7\x6B\x67\xA9\x3A\x59\xA7\x2D\xE0\xD2\x1E\xE0\x12\x81\x8E\x33\x73\xF4\x00\x17\xCF\x7B\x05\x3E\x47\x09\x6A\xAA\x9A\x88\x80\xD3\x02\x45\x07\x38\xC5\x2A\xD4\x0E\x70\xEC\xF6\x07\xC1\xB4\xEA\x2C\xB6\x7A\x8D\xC2\x2D\x93\x96\xB6\xA4\x42\x7B\x16\xA8\x91\xD5\xF6\xD6\x22\x35\x6D\x2D\x52\xB3\x07\x93\xAC\x45\x80\x93\x74\x07\x33\xB2\x30\x6D\x2B\x02\x57\x4D\xAA\x43\x60\x19\x9B\xAB\x48\xE7\xA0\x18\x81\xD5\x5C\x53\x91\x4B\x1F\x75\xF9\x95\x40\x0C\xC9\x29\xC9\x47\xB2\x78\x41\xE0\x93\x42\x7A\xD1\xBA\x92\xD9\x75\x25\x8F\x7B\x5D\x3E\xAF\xCC\x11\x1C\x82\x6A\x3A\x24\x35\x43\xA4\x43\x2C\x48\x20\x3E\xC5\x5E\x7D\xC1\x1E\x9D\xD0\xB6\xAF\x5A\xD5\xB0\x7B\x9F\x03\x8F\xEA\x30\xCC\xE7\xF2\x8F\x33\xF7\xBC\x4F\xF6\x0B\x43\x86\x12\xFC\xD1\x06\xC9\x67\xCE\x6C\x39\xA7\x93\x96\x4E\x31\x02\xCB\xA0\xA8\xB6\x56\xE5\x3C\x44\xCF\x7E\x49\x3A\x98\x8C\x6B\xC2\x7F\x1D\xE0\x5A\xD9\xDD\x66\x8F\x6F\x21\x60\x20\x3F\xF5\x0F\x8A\x63\x6E\x49\x41\x8B\x1F\xBC\x47\xA0\x68\x6E\x96\xF5\xBA\x88\x0E\xC1\x2A\x63\x94\xE8\xDB\x83\xAA\x60\x6B\xFD\x08\x7F\xB1\xD5\x7C\x95\x90\xAF\xA9\x53\xBB\x7E\xAA\x1A\xB3\xE1\x2D\x13\x75\x62\x2A\x7A\x79\x7B\x9F\x6E\x48\x85\x70\x9F\x6E\x40\x43\xD6\x1A\xAC\xD5\xD3\x1E\x2D\x5A\x6B\x35\x2D\x97\x42\xD2\x35\x79\xDB\xC9\x6A\x7D\x18\xBB\xD5\x52\x21\x6F\x84\x84\x8F\x4D\x20\xFE\x3A\x38\x14\x1B\x0E\x85\xD8\x2C\x4D\x54\x26\x5E\xAD\x4B\xCA\xC1\x20\x48\xE0\x73\x46\xA7\x04\x14\xC5\x47\x97\x10\x1F\x2D\x10\x23\xE9\xA7\x83\xC0\x1C\x93\xB5\xDA\xDE\x64\x3D\xB4\x55\x53\xF0\xEC\xD2\xDC\xE0\xD9\xA5\xBE\xAD\x9A\x2C\xED\x31\x94\x64\xA9\x5E\x72\x3A\x75\xD5\xB7\x54\x2F\x71\xF8\xEC\x94\xA5\x5A\x4D\x59\xAA\xBF\x9E\x36\x8F\xB7\x89\xF6\xCE\x52\x84\xB3\x6A\x2D\xD9\x6E\x5B\x97\x91\x9F\xF8\x0F\xB4\xFA\xD2\xF1\x2C\x1B\xC9\x31\x69\x6B\x3D\x6E\xED\xB7\x5D\xE2\xF2\x4D\xE4\x9D\x50\x22\x70\xBE\x8E\x4E\x79\x8F\x37\x5C\x66\xD6\x30\x67\xCE\x90\x59\x41\x68\xF6\xEB\x6B\x40\x91\xB7\x0E\x24\xE0\x43\x58\xFE\xF7\x73\x03\x03\xA9\x66\xBB\x5A\xB4\xD7\x4B\xA4\x02\x8E\x54\x82\xE7\xC4\xEC\x4F\x22\x84\xEE\x0F\x86\x20\xBA\x3F\x98\x71\xDC\x09\x1E\x13\x90\xB8\x73\xD2\x33\x35\x5F\x33\xCF\xD4\x9C\xB0\x3A\xBC\x5C\x2B\x34\xC1\x67\xBA\x0D\xD3\xD0\x88\x6C\x52\xB3\x3C\x07\x0D\x40\x69\x5A\xB8\x6E\x04\xA5\x7A\xDC\xA3\xAF\xA9\x93\x7E\x5A\x1F\x9F\x0A\x22\xD3\x34\x92\xF9\x73\x90\x9A\x85\x62\xEE\x9E\x9D\x05\x15\x16\x3D\x52\x95\xAC\x01\xD1\x06\x46\x50\xAC\xC2\xB8\xA9\xC6\xD0\xB9\x6F\xC4\xFC\x7D\xA7\x17\xC1\xCB\x7C\xC6\x7D\xC3\x77\x85\xCF\x29\xA3\x44\xCF\x7D\x23\x7E\xEC\x2C\xC4\x94\xFB\x06\x11\x3D\x7F\x2E\xD1\xF3\xA7\xDC\x37\x62\x72\xDF\x88\xC9\x7D\xC3\x67\x2D\xAF\xEA\xBB\x6F\xF8\x37\x17\x4A\x84\x8B\x4E\xCB\xAB\x06\xEE\x1B\x8A\x94\xD3\x4E\xE8\x40\x61\x12\x99\x05\x41\x80\x78\x81\x0A\x29\x9E\xD2\xFD\xC6\x6D\x7E\x09\xED\xB4\xBF\xFE\x94\xF6\xD7\x9F\xD1\xFE\xFA\xC7\x27\x94\xCF\x70\xA8\xFD\xE5\xA7\x43\xED\xAF\x3F\xA3\xFD\x1D\xB6\x3A\xDF\xB5\x1A\x68\x7F\x5D\xAB\xA1\xF6\x97\x9E\xD2\xC2\x6D\x7A\x02\xFC\xB6\xB4\x89\x3E\x00\x65\xEB\x3D\xE8\xD5\xBE\xCD\x6E\x1F\x45\x9E\xD6\x5A\x7B\x94\x6C\xC9\xA7\x42\x21\x9C\x57\xC2\x72\xE6\xAB\xCA\x17\x66\x88\x60\x35\xA8\xC2\x47\x63\x24\xE4\xC5\x65\xB3\xDB\x0B\xDD\x55\xE1\x4B\x24\x78\x9C\xD2\xEC\xFA\xC6\x38\xB3\x84\x54\xE1\x63\x87\xA6\xC2\x93\x22\x7C\x0A\x32\x2A\x9C\x99\x1D\x9B\xD4\xDA\x95\x3F\xD2\x84\xBA\xF8\x72\x2C\xA5\x4B\xA2\xBD\xDE\x7D\xBA\x77\x56\xDF\x3F\x53\x48\x97\x9B\xB4\xA7\x55\xF8\xB7\xD9\x83\xA2\x20\x80\x50\xE4\x6B\x88\xEC\x03\x81\xD3\x27\x6C\x3E\x47\x65\x4E\xAE\x7E\x67\xDC\xAA\xBD\x20\xB4\x94\x6C\x36\x64\x52\x12\x82\xA6\xCC\xB9\x7B\xF8\xAF\xF2\x7A\x7F\x1F\x84\xE0\xD3\xB3\x92\xFF\xA2\xD4\xB3\xAC\x90\xB0\xDF\xB8\x66\x2F\x7A\x27\x91\xD0\x87\x24\x0E\xB5\x0F\x92\x36\x6B\x8C\x47\x06\x0C\xF0\xAA\x88\xAB\x33\x2B\x53\xFE\x17\xBA\x85\xA2\x9E\x52\xA2\x7F\xE9\x51\x56\x39\xEA\xA3\xC8\x77\xBE\xDF\x27\x9E\xEE\x23\x41\x10\x10\xDA\x6F\x6B\xCA\xB7\x8A\x9C\x85\x1C\x70\x4D\x87\x3D\x59\xAD\x7D\x1B\x34\xFB\xF5\xB7\x89\x4B\xCA\xC5\x47\x1E\xFC\x38\xA5\xB3\xF7\xED\x07\xF0\xCF\xA3\x05\x25\xDB\xA4\x11\x93\xAE\xF6\x43\x88\xA4\x33\xB1\xE3\x35\xFB\x80\x77\xB2\xD6\x37\x78\x17\x23\xBE\x75\xF2\x3A\xDD\xAF\x7F\x51\x7E\x94\x75\xB6\x5F\x7F\x54\x7E\x2C\xD7\xFE\x7E\xFD\x49\xF9\x71\xD5\x7E\xFD\x80\xFC\x79\x4D\x6D\xF6\xEB\x5F\x8F\x20\xEC\xF4\x17\x01\x84\xA4\xBF\x08\x20\x24\xFD\x05\xA5\x4A\xB4\x17\x71\x9E\xEF\x0F\x7A\x0D\x5F\xA7\xB9\xE1\xAB\x35\x37\xBC\x47\x53\xC3\xF3\xBA\xD9\xAF\xDF\x84\xCF\xF4\x7E\x7D\x37\xB6\x11\x47\xDE\xBF\x50\xD2\xB9\xBC\x5E\x7F\x45\x41\x48\xAE\xBC\x5F\x52\xD4\xE9\x6E\xEC\xF4\x55\xC5\x29\x3F\x6C\xD2\xE0\x01\x78\xF8\x81\x8E\x30\x84\x7B\xF4\x27\xA3\xCA\xC7\x7F\x7F\x3D\xAA\x0C\xFE\xFB\x8B\x51\x95\xE2\xBF\x1F\x8D\xAA\x0C\x21\x72\x31\x42\xF4\x0B\xAD\x07\xD9\xAA\x00\x41\x73\x79\x3C\x84\x0E\xE2\xCF\xF7\x40\xD8\x1E\xDF\x3A\xDF\xAF\xBF\x97\xA3\xDF\x4C\xEF\x36\x20\x53\x59\x7A\x7B\x5D\xD8\x14\x8A\x09\xFE\xB3\x42\xB4\xAE\xB0\xFA\xB4\xFD\x5C\x9B\x11\x98\xB5\x2C\xBE\x64\x56\x95\x41\x38\x85\x70\x31\x88\xC1\xBC\x0D\xC7\x59\x1D\x3C\x02\x7D\xA2\x99\x4D\x07\x38\xCF\x57\x6C\xDD\x9E\x1D\xFA\x8A\x11\x41\x7B\x99\x3D\x33\xF4\x15\x9B\x26\x68\xD4\xF7\xA5\xF6\x87\x86\xBE\x62\xD3\x04\x8D\x5A\xFD\xA0\xBD\x13\xF2\xF6\xB5\x7D\x89\xBD\x63\xE8\x24\x26\x01\x82\xF6\x6D\x6E\xF2\x55\x8E\x5B\xB4\xA0\x78\x6B\xFC\x68\x0A\xFC\xC6\x88\xA0\x66\xFA\x22\xC2\xA7\x95\xB1\xBD\x02\xBF\x10\xDA\x8F\x7A\xEC\x3D\xA9\x4F\xC8\x85\x4D\xA9\xDA\x8E\x4D\x6A\x1F\x0C\xDF\x4B\x71\xBF\xD6\xAF\xC1\x7B\x29\x06\x1F\x8A\xDE\xBD\xC4\xBF\xDD\xBD\x14\x73\xFE\x9F\xCF\x7B\x8D\xD5\xA0\x99\x49\xBD\x1A\x62\xFB\x83\xFB\x75\x80\xF7\x8D\x55\xD4\xA4\x46\xC1\x75\x52\x53\x75\xD7\x98\x3F\xE2\xEA\x2D\x0E\x36\x8F\x80\xF4\xB4\x71\xFB\x8B\x39\x58\xD4\xB9\xFD\x41\x68\x37\xE2\x96\x3A\xD9\xCF\x44\xDD\xDF\xC9\x7E\xFD\x86\x98\xFF\x7D\x18\xCF\xD0\xE7\x5E\xF9\xD3\xEF\x3E\xBB\x5F\x7F\x1E\xC9\xC5\xDB\xFE\xE6\xC1\x8F\xFF\xFC\x97\x2F\xFE\xC6\xF7\x5D\xAF\xEF\x8E\xDD\xEF\xD7\x7D\xE8\xED\xDF\x70\xBD\xFF\x19\xF6\xB0\x0B\xF7\xE8\x6F\xAB\xB5\x3D\x8B\x68\xF2\xD5\x07\x06\xA9\xEC\xB4\x0D\x29\x31\xBD\x25\x2F\xDA\xD0\x7E\xCE\x6B\xC8\x4D\xC8\x27\x96\x28\x68\x6A\x0D\xC6\xAA\xC6\xEA\x17\xE1\x39\xF5\xDC\xE5\x66\x6C\x7E\xAC\x1A\x41\x5E\x19\x18\x15\x9E\xF1\x0F\xF0\xB9\xBE\x0F\xE7\x78\x5F\xD0\xE0\x1E\xF1\x0A\xF0\x77\xDC\x94\x5F\x91\x75\xE0\xBB\xFE\xF3\x4F\x76\x1A\xAF\xA3\xF8\x3F\xF7\xC5\xB5\x39\xB9\x5F\x1F\x25\x87\xEF\x9F\x07\x51\x77\x86\xD7\x79\xF7\x05\xF3\xDC\xC3\xC3\xBD\xDE\x7D\xC1\x62\xFF\xF0\xE1\x7B\xE2\xAF\xC3\x3D\xFA\xBD\x5D\x7C\x33\x79\x8B\xB3\xB2\xFE\x1D\x31\x4F\xE3\xFD\x01\xE4\xA4\xAE\x7F\x47\xDC\x72\xD8\xEF\x0F\x58\x61\xFF\x8E\x98\x5B\xB7\x2C\xF6\x7B\x03\xD3\x7E\x83\x21\xD0\xA7\xD9\xB4\x2A\x56\x73\x1E\x85\x10\x4C\xCB\x74\xDF\x87\xDD\xA4\xAE\xCB\x69\x1B\x36\x48\xF9\x37\x10\x60\x17\xA3\x86\xCE\x35\x11\xD3\x7A\x84\x07\x08\x3F\x78\x8F\x6E\xF8\x25\x8C\xC0\xAC\x42\x51\xFE\x36\x1F\x13\x24\xB1\xE1\x1E\xBD\x11\xAF\xEE\x67\xDA\x7F\x8F\x6E\xAA\xDC\x48\x8F\xBC\x85\xF7\x46\xDC\x40\xD1\xEE\x01\x8F\xC6\xCF\x04\xFE\x53\x7B\x17\xDA\x2F\xA9\xE6\x99\xFD\xDB\x7E\xFF\x04\x66\x71\x0B\x15\x86\x6B\x07\x9B\x3F\xD8\x11\x6C\xE2\x05\x91\x0F\xF1\xB6\x91\x0F\x83\xD7\x04\x98\x78\x20\x38\x0E\xE0\x12\x33\x95\x9D\x01\xCB\x72\x0F\x2A\xF1\x50\x70\x94\xE1\xF1\x31\x91\x5C\x16\x1C\xE3\x1E\x0C\xBC\x21\x7C\x5A\x0F\xFC\x41\x29\x6C\xB9\x10\x9C\xDC\x1C\x33\x1F\x40\x3A\x67\x1A\x82\x66\x16\x40\x06\x69\xB3\x2A\x1C\x2E\x6D\x87\xE3\x70\x69\x3F\x67\x38\x5C\xDE\xB1\xBE\xEB\x33\x6D\xC1\x0C\x8F\xCB\xA3\xDD\xA3\x7B\xA3\xBD\x69\x96\x5F\xA6\x26\x53\xA3\xDD\x33\x8F\x63\x8E\x89\x79\x6C\x0F\x0D\x92\x61\x68\xBA\xA7\x74\x44\xAC\xE6\x04\x73\xFC\xE8\xCF\x18\x03\x0E\xFE\xC2\x23\x0F\x7E\xFC\x57\xFF\x9A\x28\xF4\xB7\x33\xD8\xF6\x58\x1F\x99\x4D\xE1\xB1\xBA\x1E\x5F\xE9\xCE\xE3\xC5\x80\xDE\x76\x9F\x20\xCE\x6F\x06\xAB\x2E\x06\xB5\x7F\xB2\xD6\x8C\x57\xFE\x14\x5E\xCD\x91\x51\xE3\xED\x15\x12\xF1\xB4\x42\x22\x5E\x60\xB4\x3D\xAF\x1D\x5E\xB1\xD5\xF6\xBC\x9E\xC6\xAB\xF3\x7A\x01\x5E\xE9\x59\xBC\x9A\xD1\xDA\x10\x5E\xD1\xB2\x44\xEC\x91\xD2\xC1\xB1\xFB\xCB\x06\x22\x81\x31\xC2\xD1\xB4\xBE\x74\xFF\x06\xD7\xD3\x0D\xED\x2B\x6F\xD8\xAF\x09\x73\x7E\xFD\x5D\x77\x6D\x78\xF6\xD3\xF7\x13\xD4\xCA\xBB\x89\x70\xDA\x97\xE3\x25\x68\xBF\xDD\xAA\xB6\x45\xF9\x0A\x7E\x93\x75\x4F\x5E\xC9\x44\x76\x68\x2A\xF5\x7B\x89\x2E\xED\xC5\x07\x08\x64\x9C\x2F\x99\x95\x6F\xDD\xD6\xC9\xD6\xF6\xB6\xEE\xF7\x66\xB7\x4E\x0B\x41\xD0\x4F\xF6\xC6\x99\x39\x1B\x67\x9E\xB4\x8D\x23\x82\xE0\x40\x11\x41\x64\x3F\xF3\x00\xE3\xF5\x76\xB0\x30\xBC\xDF\x8E\x38\x22\x83\x19\x2D\x80\x45\xB4\x3D\x2C\xA2\x69\x58\x44\x8B\x61\x11\x91\x56\x6A\x7B\x58\x44\x43\x58\xC8\xF0\xF8\x98\x02\x07\x19\x16\xD1\x10\x16\x02\x09\x96\x4F\x18\x17\x04\x4F\xD6\x0F\x17\x7A\x5B\x3C\x99\x73\xC4\xFF\xA9\xE0\xC9\x3B\x3F\xF5\xA8\xF1\x44\x7D\x9D\xE0\x09\xB1\x84\x04\x90\xA8\xCD\xDC\xCE\xC6\xF1\x08\x3C\x76\xD8\x8A\xBA\x98\x97\x40\xA8\x62\x4F\x17\xB4\x3C\x4F\x15\xE4\x0D\x34\x41\x73\x6E\xC8\x58\x4A\x76\x40\x3C\x1C\x3C\xEE\x0D\x1E\xCF\x1B\x3C\x1E\x0E\x1E\xCF\x5E\x98\x12\x3C\x44\x4B\xFC\x60\xBC\x46\xF4\x33\x34\x8E\x84\x53\x2E\x35\xD7\x46\xBB\x06\xB4\x5E\x0E\x8F\xF0\x58\xA2\x7C\xFC\xAB\xA4\xBC\x6D\xB2\xD0\x27\x64\x65\x8F\x39\x54\x73\xF0\x7A\x5B\xDF\xA1\x8B\xC1\xD0\x79\xE8\x62\x30\x8D\x71\x17\x83\x05\xEE\x43\xEC\x8A\xDC\xC3\xB1\x81\x33\xD1\x63\xB6\xD9\x0D\x5E\xCF\xB1\x55\x0C\xE6\x2E\xB6\x8A\x6D\xE7\x3E\xCF\x56\x11\xCF\xDA\x2A\x7A\x2B\x41\x5A\xF1\xCE\xD8\x29\x2A\x7F\xF8\x8A\x2E\xB6\xFD\x9D\x73\x63\xDB\xA7\x8D\x3C\x5A\x62\x16\x63\xD6\x41\xAA\xB5\x8A\xE3\xD7\xEB\x94\xEC\xE3\x75\x76\xAC\x32\xEC\x1F\x91\x77\x06\x21\x7D\x00\xF2\x05\x5E\x14\xB9\x21\x57\x8E\xAA\x40\x0E\x16\xCC\xC4\x85\xBC\x53\x97\xA1\x3F\x05\x7B\x63\x3B\xEF\x9F\x43\xA4\xAF\x4F\xD9\x17\xBD\xB0\x9E\xF5\xC4\x52\x4F\xE7\x5D\x8B\x75\x3E\xA8\xF3\x86\x27\x96\xAC\x8A\x92\x92\x5D\xCB\xF9\x0B\x40\xCE\x06\x38\x6A\xDE\x0B\x23\x3F\x54\xF8\x32\x74\x67\x04\x90\x6F\x52\x18\xDE\xCC\xD7\x32\x3A\x81\x37\x77\x0E\x4C\x49\xDF\xB0\x98\x43\xC2\x56\xF1\x9C\x0D\x87\xF8\xA9\x3A\xE1\x9C\x69\x14\xD5\x9E\xB3\xB9\x4C\x43\xD2\xF9\x05\x70\xF8\x77\xD2\x50\xD1\x82\x6E\x6A\xE4\x44\x90\xB0\x05\x3D\x87\x84\x02\x9B\x28\xE1\x60\xC2\xF6\xCB\x6C\xB5\x36\xAD\xED\x19\x72\xF0\xE8\xCB\x90\x1C\x95\x12\x16\xEE\x41\x76\xB4\x17\xB4\x96\x2C\x38\x83\x14\x44\x99\x5C\x4E\xFB\x95\xEC\x30\xA2\x2B\xC1\x9D\xC6\x35\x9A\x63\x93\x3A\x99\x1B\xCF\x95\xB8\x78\xAE\xA4\xB5\x3D\x27\xEC\x6D\x9F\xB4\xB6\x67\x1C\x86\x96\x4D\x9E\x3B\x89\xAC\x93\x18\xC7\xA4\xCA\x20\x21\x70\x1C\xED\x59\xD6\xE7\x84\x12\xA8\x9D\x87\x12\x64\x6C\x64\x9A\xD1\xED\xA9\x29\xDD\x1E\x8D\x96\x40\x06\x19\x24\x47\x27\x75\x26\xCA\xBC\xA1\x99\x29\x03\xC3\xB6\xF5\xAC\x5D\x5F\xC6\x28\x96\xB5\xEB\xCB\x0C\x64\x84\x67\x7E\x17\x97\x0B\x19\x73\xC6\x94\x33\x2F\x93\x30\x35\xDC\xE8\x23\x34\x68\xCE\xE6\xE9\xBA\x35\x54\xD7\xFE\x6D\xB5\x41\x26\x9E\x28\x48\x0E\x06\xFC\x13\xAB\x62\xBF\xE5\xE6\x60\xA6\x8C\xDA\xBE\x58\x46\x5B\x87\xA9\x4C\xCC\xDB\x5D\x8E\xE9\xBC\xB5\x70\xE3\xE7\x21\x63\x0B\xB7\x5D\x3F\x42\xE5\x9D\xA7\xCC\xC4\x78\xAA\xF2\xCE\xFE\xDB\x66\x70\x53\x2E\x79\x7B\xA1\xD8\x40\x96\xB8\x92\x32\xB0\x38\x3A\x7C\xA1\x79\x31\x77\xE6\xC5\xFC\x49\x8B\x0E\x9F\x31\x2F\x7A\xF3\xCC\x8B\xFD\xE8\x70\x7F\x4E\x74\xB8\xFF\x28\xA2\xC3\xBF\x8E\x63\xB4\x5D\xEC\x50\x17\xAA\x8D\x7C\xE8\xA0\x74\x54\x21\x06\x55\xBD\x88\xA6\x42\x62\x15\xFE\x3F\x25\x81\x4C\x06\x66\x68\x0E\x37\x4A\x18\x4F\x12\xC4\x93\x84\xC3\x8D\x3C\xA7\x5A\xF1\x08\x4F\x92\x36\x0F\xE4\x13\x67\x86\xF6\xE6\x52\x08\x6F\x0A\x4F\x7C\xC2\x13\x9F\xF0\xC4\x9B\x63\x86\xF6\x76\x62\x86\x5E\x9C\x01\x52\x2D\xCC\x00\xA9\x9E\x4E\x78\xA2\xA6\x94\xFA\x3D\x93\x7B\xDC\x39\xA5\x3F\x39\x89\x5E\x3A\x7F\x91\x77\xE8\xBE\x53\x7D\x64\xDF\x31\x93\xE8\xE5\x1D\xDB\x25\x7A\x19\xC8\x28\x73\xF9\xC7\x27\xDC\xD7\xC5\xEB\xCF\x5D\xF8\x47\x6F\xBB\xB9\x2F\xF2\x75\xC1\x69\x26\x43\x27\x17\xEB\x55\xA9\x11\xB6\xE8\x51\x3B\xBC\x2C\x72\x35\x89\xEC\x3B\xDB\xCC\x0A\x3F\x73\xAD\x1F\xAD\xAB\xB3\xBA\x9F\x6B\x69\x36\xBD\x82\x04\xE4\x73\x86\x3F\x96\xE2\x93\x9E\x8F\xF9\x5C\x0F\xFC\xA1\x8F\xF9\x42\xE7\xFC\xE8\xB1\x3B\xE7\x8B\xEB\x8E\x73\x2E\xA7\xAF\x49\x80\x11\xF9\x10\x47\x0B\xB8\xDF\x88\xCB\x29\xF5\xB9\x5C\xE4\x6B\x35\x7B\x3B\xAB\x36\xAC\xDE\x5B\x60\xA4\xF4\xD8\x48\x09\x11\xB2\xD0\xD1\x02\x5B\x65\xD7\x0A\xD9\x75\x6E\xB5\xA8\x16\x3C\x09\x1F\xB7\xF6\x6B\xC1\x87\xF6\x01\x6F\x5A\xF6\xB8\x75\x01\xDA\xFB\xF3\xA2\x49\xFC\xBE\x73\x77\x97\x7C\xF7\x51\xA7\x11\x18\xBC\xDE\x36\x18\xE6\xD6\x61\x30\xCC\xCE\xE7\x1F\xCD\x99\xFF\xF6\x71\x3D\xF1\x4C\x54\x4C\xB7\x6B\x34\xD1\x7E\x98\xD9\x1C\x2C\x50\x7D\x2C\x50\x43\x2C\x90\x81\x18\x1B\xD4\x0E\xB0\x40\x41\xF4\xF4\xC0\x02\xFD\xF5\x83\x05\xE4\xC2\xA2\xB8\x10\x60\x4E\xDB\x79\xAC\x8E\x38\x65\x46\x9D\x50\x2E\x8D\x68\x52\x67\x94\x25\xBB\xA9\xA3\xFD\xFA\xE6\x5E\x91\x27\xBF\xE3\xEA\x39\xC8\x7A\xF9\x79\x9A\x82\xDB\xF1\xDF\xB3\xB5\x39\x48\x35\xA8\x36\x36\x6B\xAA\xC6\x1F\x83\xEF\x6A\x51\x99\xCD\xA6\x17\x73\x74\xD1\x3B\x2E\x44\xC7\xB7\x9A\x14\xE8\x9C\xEB\xC2\x77\xA3\xD5\xF1\xD4\x78\xF9\x7E\xFD\xAF\x20\x84\x7C\x30\xDE\xAD\x10\x02\x75\x89\x9F\xA7\x83\xB3\xB5\xD9\xAC\x0B\xC8\x9B\x3A\x87\x62\x65\xEB\x20\xBC\x12\xCC\x19\x52\x0A\xDC\x7E\x17\xE4\x5B\x77\xB7\x43\x15\xFB\xF5\x77\x43\x08\x85\x1B\x0A\x7B\xE0\xE8\x89\xA5\xD2\xE1\x21\x0A\x01\xCF\xC3\xD9\xF3\x14\x36\x9B\x3A\x81\x7C\x7A\xC0\xA4\x37\x60\xBE\x5F\xFF\x6F\xFD\xB9\x25\x0D\x4F\xD6\x8D\x82\x53\x3B\x08\xF7\x80\xD9\xAC\x13\x28\x9A\xDA\x87\x64\x65\xEB\x0C\xF8\xF8\xE5\x04\xFC\x95\xAD\x33\x38\x76\x7F\x44\x9F\x47\xF4\xFB\x23\x7E\xB7\x00\x8F\x4B\x3E\x3D\xE0\x35\xD6\xA3\x33\x58\xFE\x3B\xA6\xFA\x69\x9B\x66\x2E\x71\x46\x89\x07\xBC\x86\xA0\x89\xFF\x93\x13\x84\x91\xFC\xBF\xD1\x45\x07\x70\x83\x54\x1A\x2C\xA3\x2C\xB8\x47\xE7\xE5\x4F\x9E\xEB\x47\xCA\x2D\x3E\xBC\x3B\xF3\x33\xD9\x59\x46\x32\xD1\x78\xFA\xE0\x43\x74\x74\x82\x10\x9A\x23\x6B\xFB\x2E\x23\x99\xDF\xE6\x4E\xF1\x59\x28\xF1\xDB\xDC\x29\xBE\x01\xFF\x50\x11\x71\x9A\x03\x4A\x66\x12\x71\xCD\x3E\xCA\x24\x54\xF9\x10\x51\xFE\x86\x18\xFC\x46\xD6\xEF\x23\xF1\x62\x27\x87\x80\x3D\xF8\x38\x17\x43\xAE\x8D\xAB\x51\xB7\xC1\xB5\xB2\x6C\x50\xBE\x83\xFD\xCC\x0E\x15\xBE\x3E\xC0\x39\x06\x22\x06\x76\xBC\x10\xD0\xE4\xF1\xD1\x01\x39\xEA\x01\x79\x50\x40\x4D\xA8\xEF\x46\x82\xFC\x09\xB6\xAF\x72\xEA\x41\xA9\xD2\x7C\xEB\x1F\x2F\x3C\xFB\x86\x5F\x1D\x84\x32\xFA\x90\xDC\x52\x78\xF6\x42\xEF\x29\x05\x9E\xAD\x9F\x3A\x4E\x85\xD1\x54\x63\xD5\x1D\x56\x35\x5B\x07\x7D\x46\xC1\xCD\x42\x43\x88\x78\x18\xBB\xBA\x10\x31\x44\xC7\x27\x74\xCA\x87\xEB\x4B\xD9\xE5\xB6\xFC\xA3\xD6\x19\x2F\x86\x02\x72\x4B\xBB\x50\xFE\x31\xCD\x5E\x72\x1A\xBA\x5E\xD1\x3C\xAD\x69\x9E\x19\xEE\x49\xBB\x2B\x3D\x67\xB5\xB5\x79\x6C\x66\xD6\x1C\xD9\x8D\x8D\x8D\x7D\x08\x8D\x82\xE4\xF2\x04\x42\xFB\x49\xAF\x29\xFF\x44\x3C\xFC\x63\xF3\x98\x00\x43\x61\x87\x87\x0B\x7F\x3E\x7C\x82\x1D\xC2\x27\xA1\x6F\x6F\x6C\x6C\x5C\xD5\x81\x88\xD0\x8C\x6E\xC6\x0E\x24\xE1\x1C\x90\x24\x78\xF4\x3E\xF8\xAB\xAD\xD5\xF2\xC1\x60\x60\xB5\x7C\xC7\xAF\x0E\xAC\x96\x9F\xC6\x9F\xF6\x39\x3B\xB1\x5A\x96\xAF\x3A\xC7\x4C\x6A\x3B\x7C\x09\xA1\xFD\x4A\xB0\x9F\xD5\xDD\x83\xE1\x6D\xD9\xB4\x76\xD1\x5F\x7B\x1C\x5F\x00\x59\xC0\xB3\x67\xBF\x00\xCD\x13\xB2\x86\x7D\xF2\x85\x6F\x9D\xFD\xC2\xBE\x27\xE6\x0B\x07\x04\x4A\x37\xCD\x7E\xE1\xA2\xF7\x78\xC0\x44\x96\x96\x0E\xF9\x1F\x26\x45\xE9\x9C\x53\x62\x39\xC7\xD2\xA1\x85\x86\x0C\xD5\x95\xF9\x25\xF9\xC5\x9B\x67\xC1\x68\x59\x19\x88\xF7\xA3\x18\x05\xE1\x1E\x7D\x33\xE2\x6E\x49\x05\x0A\xAE\x5D\x93\xA4\x05\x29\x79\x5F\xEC\xD1\x41\xD3\xAB\xB2\x76\x3B\x27\x40\x25\x16\xEE\x94\x55\x6B\x27\x6B\x1F\x59\x07\x9F\x83\x1C\x48\x89\xAB\xF0\xFF\x8F\x4E\xA8\xA8\x8C\x3E\x0D\x71\x4F\xD1\x15\xB1\x02\x23\xB2\xEB\x4D\x15\xB1\xA2\xAB\xAB\xCC\x48\x0A\x8C\x88\x74\xBC\x24\x64\xB2\x2E\xE0\xF1\xA9\x70\xA3\x1D\xAA\xBA\x90\x19\x25\xE7\x0C\x4D\x29\xB9\xFC\xC3\x78\x81\xB8\x2B\x45\xE1\xBE\x4C\xA5\xE3\xBA\xFA\xEB\x83\xD5\xEB\x69\x35\xA3\xD9\xB4\x2F\x73\xD8\x7A\x8F\xD9\xFA\xA1\x1E\x26\x9A\xD6\xD7\xFD\xA3\x77\xFC\xC7\x39\xF7\x1C\xFF\x89\xC9\x69\xE5\xF5\xFB\x74\x97\xDA\x01\xDA\x9A\x30\xC8\x16\xB4\xDE\xE8\xEA\xF9\x04\x77\x77\x33\xD4\x1E\x78\xC8\xB0\x79\x2B\x14\xFF\xE6\x81\x12\x72\xEF\xC9\x5D\xE2\x81\xDF\xE0\x77\xB5\x05\x57\x93\xD0\xA7\xD2\xB7\x2D\x7D\xD7\x44\x5A\x66\xE8\xBB\x7E\x3C\xD4\x57\x77\xF4\x5D\x13\x6D\x9C\xA1\xEF\xBA\x47\xDF\x1F\x03\x6D\xFC\x58\xE0\x0A\x3A\x07\x10\xAE\x20\xD2\x16\xCA\xD8\x9F\xFA\xB5\xE1\x2D\x4B\x25\x3D\x97\x56\x0A\xCF\x7E\x70\xF8\x26\xB0\xE5\xF3\xB9\xEC\x36\xDD\xA2\x56\xDD\x51\xC7\x34\x8E\x0C\x19\xAF\xD6\x01\x84\xB7\x90\xF3\x36\x1E\x08\xFF\xB4\x0D\x4E\xD6\x09\xB3\xB2\x75\xCA\xC9\xA2\x29\x9B\x6C\x0A\x49\x03\x21\x04\xC2\x72\x90\xD7\x37\x7E\x75\xBA\x87\x6B\xAF\xB9\x47\x00\x21\xF7\x60\xAB\x28\xBE\xF1\xF1\x61\x53\xBE\x8E\x18\x10\x5F\x98\xE7\xFE\x20\x01\xE7\xA0\xC4\xEE\xB1\xEB\x0E\x78\xA2\xE2\xB6\xDF\xD8\x50\x59\x1C\x1C\xB0\x4E\xCA\x1F\xA7\x6E\x3E\x24\x8D\x65\x06\x12\x14\x24\x4D\x9D\xF1\x02\x53\xF7\x29\x4E\x49\x15\xC8\xF4\xA4\x5B\x65\x68\x42\x71\x43\x9E\xCB\xD4\x1D\x29\x55\x82\xD2\x4B\x88\xDD\x0B\xE9\xEE\xE1\xB3\x04\x7C\xA0\x0E\xD6\x3F\x8D\x8B\x13\x70\xD0\x32\x52\x81\x45\x20\xCB\x08\x21\x03\x1C\x23\x40\xE0\x15\xED\x32\x22\x7B\x76\x4D\x26\x11\x52\x0A\xDE\xDA\x97\x2F\x24\x54\xAC\xF3\x0D\xCE\x07\xAD\x85\x8B\xDF\x0E\x48\xDA\x5C\xE4\x98\x63\x88\xDB\x01\x71\x53\xBA\x01\x02\x1E\xA0\x4D\xED\x80\x1B\xC0\x82\xC4\xC5\x58\xA5\x0B\xF3\x56\xFA\x76\xFD\x14\x68\x2A\x16\x0E\x3E\x68\x92\x37\x39\x13\x29\xB1\x75\x5F\xEA\xA1\x15\x44\xB6\xBC\xA3\x8E\xAD\x77\xC4\x06\xA7\xEB\xC4\x26\x27\x21\xA1\x74\x8B\xF1\xA4\x4E\x6D\x72\x47\x1D\x4F\xEA\xC4\x06\xFC\x34\xE5\xA7\x81\x3C\xD5\x83\xA7\x1A\x9F\xBA\x9F\x09\x21\xA7\xF5\x8E\x34\x54\x0C\x23\xA6\x6C\xE1\xAE\x96\x1F\x73\xE3\x56\x41\x7C\xBA\x8E\xF1\x63\x47\x4E\x4B\x3E\x73\xC7\x9F\x96\xBD\xF7\x54\x8E\x38\xAA\xE2\x3C\x30\xC4\x19\x52\xF0\xF0\xCB\x37\x36\xAE\x5A\x29\x88\x55\xDC\x47\x19\xF6\x92\x96\xB1\xA5\x32\xDD\x31\x7E\x3F\xB1\x08\xC5\xB8\xC1\x41\x56\x26\x94\xC8\x0D\xDF\xAC\x1F\xA1\x34\x72\x09\x95\x72\x7F\x52\x39\x46\xD2\x0B\x39\xAE\x35\xEB\x95\x1F\x41\x21\xAD\x4A\x5D\xD1\xF3\x5E\x23\xE3\xD2\x8C\xF4\xD9\x7F\xC3\x4D\xF1\x98\x66\xA4\x4E\xD6\xA7\x6B\x2D\x08\x9F\x03\x5D\xFE\xF2\x2B\x85\xA8\xF7\x4B\x77\xF4\x17\x85\xA9\x1C\x39\xA4\xF2\x5E\xB1\x5F\xE3\x3E\x59\xD5\xFD\xD4\x78\xFE\xDA\x9F\x39\x17\xD2\xE8\x66\xA5\xC8\xBA\xCA\x1A\x79\x7D\xBA\xF6\xE4\x98\x92\x73\x52\x79\x41\x38\x30\x33\xC3\x94\x53\x90\xBF\x82\xAE\xB9\xC7\x49\xAA\x63\xF0\x57\x6B\xAA\x6A\x5F\x58\x36\x4C\x88\xC0\x4D\xD1\xE3\x40\x1B\xB8\xE3\x61\x2B\xCF\x95\xC8\x9F\x1E\x0C\x34\x13\x2A\x1E\xAA\x8A\xF9\xEE\x1F\x3A\xA8\xB0\xE9\x1C\x52\x31\x34\x95\x6F\xA3\xD3\x97\x56\x29\xE4\x8C\x23\x69\x95\x1A\xC8\xE6\x39\xAE\xA4\x87\x28\xC1\xF6\xAC\x36\x7B\x01\xCB\xD9\x65\x9A\x81\xC4\xFC\x65\xD8\xC5\x86\x95\xAD\xC7\xC5\xCE\x6E\x07\x46\x7F\xCF\xFE\xF9\xE0\xA5\x24\xB4\xC2\x43\x72\x9C\x7D\xC5\xED\x45\x75\x9C\x33\x3F\x53\xD9\xE3\x40\x04\x64\xCE\xDD\xC0\xF4\x04\x22\x9B\x8B\xF4\xC7\x1B\x82\xB4\x15\xE9\x1C\x82\xF1\xC7\x59\x09\xE2\xCF\xD9\x00\x0A\x02\xD3\xC6\xDD\x46\x74\xE0\xD3\xFE\x6D\x94\xBA\xDB\x08\x5B\x0E\x6E\xA4\x4C\x48\x20\xA7\x98\x4D\x39\xBF\x39\x64\x7C\x23\x3D\x2C\x6E\x0F\x81\x69\xFB\x05\x33\xFD\x5C\x2F\xCD\xFD\x90\x2C\x3E\x2C\x0E\x1B\x6D\xF9\xFC\xC1\xD5\xD4\x8E\x95\x08\x19\xCE\xA6\xAE\xA7\x0C\xE9\x7A\x3B\x8A\xA6\x1B\x2A\x6D\xBB\xF3\xA1\x8D\xE9\x2E\xC2\xC1\xEB\xAC\xBD\xA6\xB2\xDE\x35\x95\xE1\x3D\x43\x2B\x2F\xDA\x6B\x2A\xED\x5F\x53\xD9\xE0\x9A\x4A\xA7\xAE\xA9\xAC\xA9\x47\x78\x8D\xAC\xD6\xE3\xF6\x9A\xCA\xF0\xE6\x9B\xBE\xA6\x64\x96\xAA\x5D\x55\x31\x73\x5B\xE5\x30\xE2\xDB\xAA\x80\x71\xDB\x1E\xAF\xBD\xFE\x85\x55\xC0\xB8\xBB\xB0\xB2\xEE\xC2\xA2\x71\xC3\x1E\xB4\xE6\x5C\x5A\x29\xA4\xFD\x71\xBB\x7B\x2B\xEB\xEE\x2D\x63\x15\x67\xB9\x6D\xD1\x3E\x36\xDF\xE8\xF5\xB2\x0F\x7D\x45\x56\xD9\xA5\x27\xBA\xA8\x39\xE1\x1B\x59\x77\xC2\xD6\xB4\x13\x0E\x73\x68\x87\xBD\xD4\x6F\x9C\xE2\x7A\x71\x22\xA0\xB9\x19\x94\xFA\x9D\xC6\xE2\x27\x14\x70\x8E\x8E\x79\xAF\xE6\x16\x8D\xD7\xAE\x68\x3C\xD7\x8A\x9F\x71\x50\x72\xE5\xE6\x9C\x8B\xD0\x58\x4E\x9D\x64\x15\x59\x85\x80\x63\x4F\x6A\x6D\xD7\x0F\xB3\x0D\x8E\x24\x47\x4B\x67\x96\x40\x64\xAF\xA2\xD8\xBA\xA0\xFC\x34\x03\xAA\x85\x40\xAD\xEC\x06\xB0\xDD\x8A\xC1\xA2\xEC\x45\xFA\x6D\xCC\xA7\x93\xAE\x44\xE3\x3B\x75\x2F\xE6\xB4\xE7\xCA\x15\xB2\x2B\x57\xD8\xB9\x72\x55\x91\x38\x8D\x51\xE0\xA2\x38\x6B\x49\x15\xDF\x68\x91\x91\x6D\xAE\x25\x6D\x87\x19\xB0\xE2\xC7\x61\x64\xF3\x9D\xC5\xE5\xC9\xCC\x0E\xD7\xF9\xE0\x75\xF6\x5F\xF6\xC1\x7B\xC7\x8C\xC7\xEA\x42\xFB\xEF\xA3\x4A\x10\xE7\xF7\x6C\x87\xC3\x24\xEF\xDA\xF4\x58\x0A\xCD\xF9\x29\xE6\xE3\xA3\xF8\x14\x4E\xE3\xA3\xB3\x3C\x2D\x8E\x4E\x27\x28\x52\x24\x62\x3C\x93\xE9\x90\xCC\xCC\x4F\xB0\x79\x5D\xCD\x01\xAF\x7A\xB4\xE0\xBD\x4C\x1D\x15\x01\x6F\xD9\x41\xF7\xE9\xBC\x8C\xD6\xDC\xC8\x4E\xEE\x1D\x15\xFD\xF2\x39\xAE\xC1\x4A\x01\xD4\xAA\x8D\xCF\x99\x35\xC5\x63\x83\x8A\x0A\xAD\x4A\x6C\xCE\x3C\x53\x7C\x9F\x1A\x43\xD8\xB3\xCB\xEF\xEA\x65\x8E\xE3\x70\x1C\x7C\xE4\xCD\x3E\xD2\x53\x8F\xFE\xBD\xEA\x14\x04\xFB\x56\xEB\x40\x08\x52\x80\x9F\xBF\xAA\xA9\x43\xD0\xF6\xAA\xE6\x39\xDA\xBB\xDE\x97\x87\xD0\x90\xE4\x2F\x16\x04\x7A\x54\x36\x24\x7C\xF7\x5B\x25\xF8\x28\x71\x8F\x42\xEB\x53\x63\xD0\xF4\x20\x91\xB9\x06\xE4\x3A\x29\xD1\x43\x81\xDD\xE7\x16\xF3\x14\xCD\x49\x3F\x9A\x39\xFD\x98\xAF\x83\x75\x75\xA6\x33\xD0\x88\xCD\xD8\xE3\x92\x29\xF3\x95\x4B\x2E\x05\x08\x19\x0F\xFD\xCE\x6A\x2C\xB1\xF7\x92\x60\x21\xF0\x25\xB3\x8D\x57\x87\x6C\xEA\xCA\x6E\x2F\x7C\x9F\x99\xB7\x0A\xD1\x35\xBB\xAD\x0E\x25\xC1\x42\x60\x8C\x33\xDB\x04\x36\xC5\xD7\xDA\xAE\x37\xB5\x2E\x3C\x83\x6C\x10\x17\xA6\x09\x90\x45\x5C\x3F\x86\x2C\xEA\x47\x3F\x35\x60\x51\x15\x5D\x60\xF6\xB3\xC3\xA7\x33\x0B\x7A\x54\xE5\x5B\xAD\x1A\x8B\x9A\x2D\xA0\xF2\xC2\x14\xA5\x64\x7F\xF7\x53\x7D\x4B\xC4\x7B\x41\xBD\xB7\xFC\xEA\xCB\x37\x36\xBC\x7B\xEB\xE8\x26\xCF\xBB\x78\xE9\xD2\xA5\x3F\xB1\xDF\xCB\x6A\x28\xC0\x27\xF8\xDF\xC3\xF6\x85\xED\xDF\x9E\xF7\x7D\x5C\x3E\xE0\xA7\x38\xE3\x31\xAE\xCD\xAE\x13\x4C\xC0\x9F\x98\xAD\x78\xE8\x14\x7D\xF9\x9B\xB4\x77\x77\x82\xB6\x2F\xDF\x08\x90\x49\xFF\xE3\x4F\xCD\x30\xE9\x7E\xBF\x8C\xED\x02\xD5\xA1\xDF\xF7\x08\xF0\xC9\x7A\x3B\xB5\xBB\x91\xDB\xDD\x84\x76\x37\xC6\xDD\x15\x91\x44\x76\x97\xFC\xDB\x12\xFC\x7D\x5B\x9D\xF6\x76\x57\x6A\x24\x26\xB8\xBB\x09\xE9\x97\x51\xD0\xA4\xDD\x4D\x38\xBF\x19\x24\xB2\xBB\x43\xEA\x63\xD7\xAB\x14\x21\xA5\xDA\xCC\x66\x1E\xEF\xCF\x3A\x65\x55\xF4\x41\x97\x7F\x43\x3C\x8D\xE2\x0C\x60\x43\xD2\x25\x09\xA4\x75\xF9\x33\xD4\x26\xE6\x5C\x12\x19\xCB\x60\xF3\xC0\x32\xE7\xBA\xF2\xE5\xBA\x8A\xA7\xF2\xF4\x31\x65\xF6\xAC\xF7\xCF\x95\x67\x55\x95\xB6\xD1\x65\xEC\xF5\x1D\x33\x73\x47\x94\xD3\xA7\xF8\x8C\x9E\x7F\x75\xE8\xFC\xAB\xE3\x96\x29\x43\x96\x4C\x51\x46\x86\x65\xE4\x89\xF6\xE8\x6B\x04\x8F\xD4\x21\x36\x35\x10\xE6\x9F\x7F\x70\x80\xE3\xED\x96\xBF\xFB\xC1\x3E\x62\xDA\x33\x4D\x65\x78\x9B\x78\x47\x28\x01\x25\xA8\x95\xE9\xAB\x83\x24\x97\xE1\xD5\x81\x92\xA8\x92\xCB\x43\x1B\x32\xE5\xC6\x2E\xB6\xBF\x8A\xB8\x53\x62\xD5\xE9\x06\xA2\x7F\xAE\x70\x62\x8A\xB2\xAA\x4C\x48\x31\x40\x2A\xBA\x2A\xD1\x07\x18\x3F\x2C\x25\xE1\x20\x6F\x7A\x47\x28\xAA\x08\x72\x92\xE3\x28\x87\x0A\xA2\x04\x3F\x4C\x70\xCE\x09\x27\x65\xB7\xAA\x8A\x48\xDB\x86\x60\x4F\xF1\x69\xE1\x19\x4E\xF1\xA6\x0D\x64\x2D\xE0\xA7\xED\x26\x24\xD8\x98\xA9\xD4\x21\xEB\x55\x6A\xDC\x55\x15\x5F\xEE\xAA\xA2\x6D\xDE\xE6\xAA\xEA\xDD\x4E\x90\x9A\x57\x97\xBE\x5A\x57\x67\xE2\x9E\x50\x1D\xB7\xF5\xC9\x75\xE1\xD9\x8D\xFB\xA7\xD4\xA9\xB2\x5B\x3F\x32\x7C\x3E\xB7\xE9\x1C\x92\x86\xCF\x85\xE0\x08\x3D\xA9\x0F\xDC\x0B\xFA\xBD\x17\xEA\xE4\x8D\xF2\xE0\xF3\xF6\x85\xE4\xCE\xFE\x76\x3E\x35\xD8\xF3\x4E\x02\x00\x9B\xC0\xB1\xD5\xE9\xDB\xEE\x7E\xBD\xFD\x3E\x29\xD4\x55\x67\xF6\xD2\xA5\x4B\x97\xE2\x23\x4F\x66\x25\x19\x0A\xC5\x8A\x07\x69\x8F\x8D\x0D\x20\x9E\x4A\x7B\x1C\x3F\xE6\xB4\xC7\x14\xDE\x35\x10\x07\x5A\x4B\xBC\xB6\x81\x1C\xFE\x5C\xF2\x06\x21\x7C\xF2\xCA\x80\xA6\x6A\x2A\x06\xD4\xB7\x28\x42\x7B\x65\x75\x53\x29\x2E\xBB\x62\xA0\x40\x12\x55\x48\x18\xF4\x13\x57\xB3\x7D\xC6\x9B\xDF\x9B\xF2\xE6\x17\x9F\x7C\x05\xE9\xB1\x49\xAD\xC0\x1C\x2E\xFC\xBE\xA1\x4B\x81\xB9\xB9\x20\x03\x06\x44\xE2\x3B\xA1\x9E\xCC\x3C\xCF\xDD\xD6\x8D\x87\x5B\x37\x9E\xDE\xBA\xC7\x58\x04\xC8\x6D\x5D\xD0\xA3\xD7\x10\x89\x17\x08\x02\x83\x29\xB5\xDB\xB8\x54\x38\x80\x29\x3A\xD8\x9E\xAC\x5F\xEE\xD3\x41\xE2\x50\x0C\xE4\xCC\x4D\x28\xA4\x4D\xCC\x4D\xB5\x95\x7C\x15\x8B\x87\xDA\x9E\x6D\xAA\x94\xD9\x9A\xBC\xB1\x77\x36\x55\x81\xAF\x7C\x46\x21\x56\xFD\x05\xE5\x5B\x08\x8D\x46\x2E\x1A\x42\x83\xE2\xB2\x82\x54\x42\x82\x45\xED\xEC\x68\x27\x1F\x3E\xBE\x42\x12\x12\x03\x30\x9E\x8B\x2F\xE3\xBE\x6F\x37\x98\x2A\x23\xBD\x94\x06\x75\x6C\x52\x6B\x18\xCF\x2B\x89\x48\x6D\xC6\x6C\x24\xD5\x2D\xEE\x68\x17\x0C\xE0\x20\xEE\x86\x6B\xAB\x01\xD6\x9A\x8A\x46\x34\x30\xE2\xF8\x3E\xF2\x8F\x37\x50\xA0\x6C\xFC\x7F\xB1\x32\x81\x86\x57\x5C\xB2\x48\x35\xE4\x2F\x68\x9E\x40\x48\x8C\x76\x08\x89\x91\x01\x55\x65\x30\x02\x05\x23\x8A\xF4\x19\xCD\x85\x04\xB7\x11\x48\x8C\x5A\x48\x8C\x18\x12\xA3\x16\x12\x6E\xB8\xAE\x2E\xA2\x61\x9D\x9A\xC4\x39\xA6\xAC\xFE\x3D\xD3\x54\x05\x62\x25\xE2\x0F\xA1\x05\xA2\x04\xE2\x04\x5F\xE7\x1E\x28\x42\xFD\xB6\x74\xC9\x74\xAD\xBD\xC7\x5C\xB9\xC3\x4C\x99\x70\xCD\x8C\x09\xD7\x1C\x9F\x54\x66\xC6\x84\xCB\x4F\x87\x26\x5C\x33\x63\xC2\x1D\xB6\x3A\xDF\xB5\x1A\x98\x70\x5D\xAB\xA1\x09\x97\x9E\xB6\x95\x3B\x4C\x9B\xBB\xED\x09\xAC\x13\x5A\xCC\x45\x86\xC2\x0E\xEB\x84\x2A\x3E\xBA\x54\x5F\xA5\x98\x53\x5F\xA5\x70\xF5\x55\xA2\xA9\xFA\x2A\x51\x2F\x34\x26\x84\xC8\xC9\xA6\xF9\x1C\x9F\x8E\x2E\x95\xFB\x17\x95\xF6\xD7\xF5\x59\xB1\x96\xD8\xCF\xBC\x4B\x72\x3D\xD5\x94\xE3\x20\x00\xEF\xB6\x0A\x85\xBC\x87\xDC\x73\x89\x94\x63\xBE\x95\x75\xF0\xAB\x75\xC8\x92\x07\xF8\x5B\x55\xD4\x23\x41\x21\x97\x6D\xD4\x5B\x9C\x07\x4D\x6F\xD5\x31\xA8\xAD\xB3\x10\x9D\xDB\xA4\x0A\xE4\x10\xF3\xDF\xC8\xCB\xA9\x3B\x70\x94\x5C\x1B\xE4\xA3\x14\x04\xE0\x21\xDB\xF7\xB8\x07\x44\xA4\x36\x17\x7C\x1D\xAD\xEB\xB3\x08\xC8\xEF\xB7\xEA\x30\x72\x28\x0F\x4E\x71\x28\x48\x0D\x47\xBE\xF2\xD4\xFF\xCF\xDE\xBF\x07\xD9\x75\x5D\xF7\x81\xF0\x7E\x9D\xF7\xB9\x7D\x4F\x03\x0D\xAA\xA5\x86\x3E\xAD\x73\x8A\x5F\x55\xD3\x45\x8C\x90\x1A\x06\xE0\x30\xAC\x08\x1B\x13\xA0\x01\x92\x22\x99\x4C\xB9\x4A\xA9\x4A\xAA\xF8\x07\xA7\x46\x3E\x0D\xCB\x6C\x10\x42\xA9\x6A\x48\x74\x4B\x80\x65\x3A\x15\x45\x72\x22\x65\x64\x8F\x13\xCB\x4E\x6C\xF8\x21\xD8\xB2\xAD\x44\x7E\xC8\x32\xC7\xD6\xD8\x9A\x89\x64\x73\x24\xCA\x91\x6D\x59\xA2\x14\xC9\xD6\xA4\xEC\x98\x9E\x51\x62\x25\xB2\x8D\xA9\xF5\xD8\xE7\x71\xEF\xED\x07\x44\x5A\x96\x1D\xDA\x25\xA2\xEF\xBD\xE7\xEC\xC7\xDA\x6B\xAF\xBD\xF6\x7A\xFC\x96\x2A\xFC\x17\x67\xB5\x55\x8B\x32\xFA\x4F\x47\xDF\x7A\xDC\x1D\xFA\x72\x63\xBD\xAB\x7E\x92\x2D\xE4\x78\xC5\x98\xF2\x2D\xB3\x89\x06\x84\xA4\x6B\x43\x02\x54\xC0\x2C\x1A\x10\x32\x1A\x13\x32\xA1\x2C\x47\x91\xEE\xA6\x80\xF8\x46\x9D\x0D\xA6\x9E\xF2\xD4\xA3\x1B\x9C\x01\x17\xDD\x68\x72\x9A\x7A\xC6\x53\xCF\x21\xE7\xBF\x23\x20\x17\x43\x3A\xA0\x65\xC2\xB4\x7C\xC1\x0D\x52\xEA\xB4\x3E\x61\x52\x70\x10\xA3\x60\x70\x24\x56\x82\x53\xC9\xF9\xB2\xAD\x23\xEC\x06\x1B\xA2\xBE\x50\x93\x8F\x20\x65\xB7\xA1\xC3\x5F\xD3\x1B\x90\x5D\x81\xFC\x2D\xD7\x09\xE9\x92\x51\x34\xB0\xDD\xE2\x2D\x66\x4F\x3E\xF4\xF6\xC0\x7C\xC8\xE4\x7B\x11\x59\x91\xD6\xFF\x45\x61\x45\x92\xAB\x64\xBA\x96\x62\x2C\xF8\x56\x72\x05\xEC\x8D\xB7\xFC\x4D\x93\x16\xBF\x11\xB9\x6C\x5B\x5F\xB1\xDB\x16\x79\x55\x0B\xAF\x7E\xCF\xB3\x3D\xE3\xF9\x1F\x7A\x96\xE2\x4C\x65\xEE\x15\x32\xE6\x8F\x3F\x2B\xF9\x88\xFE\xCD\x3B\x3B\xD5\x79\x4A\xF5\x0C\x8F\xAB\xDA\x21\xD7\x53\xB1\xA9\x0A\xB9\x95\xFD\xD7\x91\x4F\x2F\x42\x44\xBE\x18\xBB\xD6\xC4\x3E\xBD\xDC\xD8\xB5\x26\xF2\x8E\xBF\x8D\xF9\x5B\x27\xDF\x9A\xD1\xB7\x06\xBF\x0D\x1F\x23\xDE\x03\x6A\xA3\x25\x6F\x98\xE5\x6A\xD7\x14\xDD\x78\xB9\x49\x36\x9B\xE8\x52\x93\xB2\x83\x3A\xAB\xDE\x7C\x55\xCA\xD8\xA4\x7E\x1B\x4C\xF5\xA7\xEC\xED\x15\x1B\x2E\xDD\xC0\xC4\x2A\x72\x6E\xC2\x1E\x95\xDA\x01\x83\x82\x59\x48\x21\xAF\x7E\x50\x3C\x45\x0E\x3B\x28\xCE\x07\x25\x17\x32\x48\xB9\xED\x89\x9A\x77\x3F\x76\x5B\x52\x38\xEA\xAF\x53\x12\xA4\xA6\x11\xFC\x09\x9D\x98\x13\x5C\x4C\x1E\x16\x95\xFC\xBE\x7F\xAD\x5E\xF2\xBA\x9E\xDA\x53\x5E\xD7\x0E\x26\x75\x04\x53\x86\x7D\xE1\x08\x74\xD9\x34\x95\x6C\x9A\x0A\xDC\x8D\x2B\x50\xBE\xE5\x7A\xED\x0A\xA8\xA0\xE2\xBF\x23\xA6\x00\x32\x37\x28\x98\x42\x72\x89\x26\x68\xC1\xE1\x1B\xF4\x50\x5D\x9D\xD6\xF5\xB2\x57\x75\x44\x33\x5C\x3E\x8D\x37\x9A\xE9\xD5\x66\x99\x82\xCA\xF1\x89\xBF\x29\x61\xC1\x4D\x0C\xF1\x5F\x37\x0A\xDF\x3D\xD4\x7F\x4F\x49\xCD\xCB\xD2\xD6\x32\x54\x70\x48\x86\x01\x91\x37\x6D\x13\x41\x86\x24\x82\x29\x5E\x97\xA6\xB0\xB4\x21\xF4\xCA\xA4\xBA\x62\xED\xE6\x89\x85\x1A\xC1\x70\x65\xC8\x80\xE1\xFA\x45\xC9\x70\x51\x70\x41\x90\xE8\x61\x41\x14\xD8\xB6\x51\xB2\x20\x0B\xFC\xBF\xE0\x8A\x4F\xFF\xF9\xF0\xF5\xD7\x9B\xAB\x89\xA7\x99\xA1\x03\x91\x32\x8E\x0C\x4F\x36\xEB\xA8\xE3\xE2\x7C\x0F\x2E\xCE\xC6\x5C\x9C\x13\xD1\xBC\x86\xE8\x52\xBD\x90\x4D\xF5\x3C\x9B\x16\xDE\xE0\xFF\xFE\xCA\xB0\x69\xBA\x90\x4D\x15\x14\x83\x6D\x3D\x17\x3B\xE0\xD9\x4B\x58\xF4\xCC\x3C\xF7\x08\xAA\xF5\xC3\x75\xD2\x03\xE9\x52\x8C\xD7\xA5\x58\xBC\x2E\x0B\xDA\x54\x90\x42\xC1\xC3\xF2\x6A\x83\xEA\x5D\x15\xFF\xC6\x59\x3D\xCC\xC3\x3B\xBE\xD9\xD8\x61\x98\x11\xDF\x3E\x6E\xE0\xE9\xD0\x38\x0A\xF3\x56\x83\x5D\xF0\xAB\xCF\x8E\x95\x0D\xF9\xFA\x37\x66\xBE\xE6\xD0\x14\x05\xA6\x7A\x0B\x76\x8E\x57\x4E\x0D\x1A\xE9\xA7\xCF\x4F\x34\xB8\xEB\x94\xA1\x14\x81\x61\xF0\xE0\x88\x5D\x0B\x5E\x55\x57\x59\xEA\xD2\x17\xFC\x2E\x85\x63\xE9\x4B\xFE\x89\xAD\x36\x7C\x85\xCD\x19\xA0\xE5\x30\xE7\x09\x05\x29\x9A\xE9\x70\xF8\x52\xFF\x15\xDE\xB2\x34\x77\x31\x71\xD8\x6D\x19\x75\x1E\x38\xB0\xFE\xCB\xCF\x8A\x6D\xCE\xCE\x46\x21\x7E\xF1\x59\xA2\x76\x88\x19\x7A\xDB\x27\x6F\x3D\x0A\xB1\x6B\xBE\x92\xE6\x57\xE6\x9A\x1F\x44\x21\xBE\xA0\x1E\x40\x7A\xB8\x7D\xBE\x07\x78\x21\x3D\x78\x5D\x47\xD8\x4D\xF0\x6C\x40\x54\xBC\x25\xD6\xAB\x81\x91\x76\x76\xD2\x2E\x4C\x14\x34\xF3\x6A\xE3\xBC\xBE\x4C\x0E\x87\x1D\x7D\x3F\x97\xEC\xD4\xAC\x7C\x6D\xA3\x7A\x85\x12\xA8\x89\xC1\x5D\xA8\x13\xAA\xC4\xD6\xA4\xA8\x76\x65\x54\xA5\x12\xBF\xCD\x29\xCA\x8F\x64\xCD\xCE\x8E\x6B\x81\xC0\x34\x8C\x77\x17\xEB\x89\x7C\x85\x7F\x2F\x79\x85\x82\x05\x54\x5D\x81\xAA\x97\x61\x5A\x1F\x22\xCB\xA8\xA3\x9C\x6B\x7A\xAC\x3E\x0C\xCB\x38\x8C\x7A\xC5\x9E\x82\xC3\x14\x5F\xFE\x76\x0E\xC7\xC3\x9F\x2C\xC4\xB8\xED\x57\x50\x6C\xAE\xE0\xF6\xC6\x37\xBD\xAA\x8F\x40\x59\xDF\x06\x4B\xF5\xCB\x20\xAA\x57\xA9\xC9\x23\xB8\xEF\x8F\x80\x7B\x68\xA2\xE0\xB6\xDA\xC2\xCB\xEA\xC3\xB0\xCA\x8D\xA2\x92\xF7\x72\x6C\x3B\x64\xC8\xC0\xCB\xF9\x1F\x12\x29\x87\x3D\x0D\xA2\xEB\xA2\x80\xDB\x20\x6B\xEB\xDB\xE0\x65\xF8\xCF\xCB\x60\x15\x09\xB2\x0A\x47\xC0\x6D\x84\x22\xB0\x30\x85\x43\x64\x6C\xA7\x94\x51\x9E\x86\x85\xE5\xFA\xF0\x82\x69\xC4\x3C\x8D\x74\x38\x0D\xD4\x08\x85\x06\xF5\x61\xA8\xBE\x96\xE9\x17\xF5\x6D\x30\xF9\xFA\x4F\x7F\xB4\x72\x16\xAA\x03\x4F\xB9\x9B\xEF\xAD\xD0\xA9\xA0\x9A\x1D\x6D\x5D\xC1\x32\xFE\xB0\x0C\x87\x50\xAA\x1E\x22\x43\x3E\xC1\x36\xB4\x35\x1E\x36\x0E\x05\xBE\x66\x49\x8B\x43\x4B\xC3\x95\xF4\x99\x38\x4A\xB6\xCD\x15\xB7\xAD\xAF\x74\xD5\xB6\xDE\xB1\x63\x3A\xEF\x23\xDD\x91\x83\xEC\x53\x2C\xF6\x50\x66\x7E\xE0\x93\x73\x97\x3A\x73\xD9\xEB\x76\x18\x24\x48\x42\x4A\x77\xDB\xC6\x78\x7D\x89\xC0\xF9\x29\x0C\xD2\x5C\x22\x73\xDA\x76\xDB\xE4\x37\x6A\x3C\x67\x51\x91\xB8\x81\x3B\x84\x2C\x6B\x4B\xB4\x64\x4B\xFE\x55\x5B\x04\xE9\x4C\x78\xD0\xCD\x94\x6A\xD5\xC3\x92\xB7\x97\xFD\x9F\xDC\xBC\x79\x53\x6F\xB5\x4D\xB5\x6E\x14\x9E\xBF\x84\xD1\xBE\x74\x03\xCA\x2B\xCD\xF2\x75\xC8\x61\x19\x8A\x9D\xEB\x17\x36\x9B\x43\x60\x2E\xE0\x36\x3E\x4C\x45\xA3\x97\x18\x68\x6B\x89\xBE\x24\x09\x5B\xAF\x90\x08\x77\xFE\x1D\x3B\x9A\xBD\x8B\x55\x5B\x1F\xA1\xA3\x6A\x85\x82\x68\x9A\xDB\x5A\xBF\xB3\xA3\x21\x85\x68\xD3\x9B\x4B\x10\xF9\x1D\x68\x9B\x97\x41\x7A\xBE\x59\x5D\x6B\x5E\x1E\xE2\xD1\x14\xDC\xD6\xD6\xAF\x80\xC3\xF5\x1A\x1C\xAA\x23\x38\x52\x1F\xB5\xA7\xE0\x68\x73\x04\x5E\x01\x6B\x78\xD0\x37\x6B\x20\xCF\xD6\xAF\x84\x35\xB0\xCD\x51\xE8\xDE\x8D\x21\x1A\x4E\x49\x6A\x49\x46\xA3\xC9\xAF\x1B\x45\xE1\x2A\xD1\xFC\x14\x23\x99\xE2\x1A\xBC\xB2\x46\x1D\x8A\x0E\x76\x50\xDC\x2F\xDC\xD6\xC2\x2B\xFB\xAE\x2A\x40\x82\xC1\x34\x34\xF7\xB2\x3A\x82\xA3\xB5\x85\x55\xD9\x72\x0A\x26\x81\x30\x78\x8C\x1D\x26\x1D\xE5\x36\xAF\x6A\xEB\x77\xA0\x7E\x79\xA0\xD3\x51\xA6\xD3\x2B\x21\xAB\xD7\xEC\x29\x78\x65\x7D\x04\x6E\x63\x6C\x66\x38\x8A\x14\xDA\xD1\xB0\x06\x2F\x47\x1A\xBD\x62\xAD\x79\xA5\x04\xA6\x1D\x85\x57\xEE\x36\x8C\x97\x23\x51\xEB\x97\xC3\x9A\xDF\xD9\xD9\x6E\xEB\x35\x6E\xA3\x25\xA5\x09\x7F\xB1\x70\xB4\x7E\x25\x12\x15\x5E\x41\x3C\x3C\x41\x9E\x9F\xC0\x12\xAA\x35\x4B\x14\xCA\x5F\x14\xF3\xD1\x8D\x2C\xF3\x71\xC8\x9D\x4B\x26\x9A\xE2\x17\xDD\x61\xE9\x16\x1F\x96\xEE\x05\x1C\x34\xEF\x9A\x52\x40\x3D\x28\xFF\x09\x72\xD0\x1C\xAF\xB5\x7F\xEE\x93\x64\x39\xA5\xAA\xCE\x9A\x15\x5E\x64\x09\x87\x27\x05\x87\x78\x82\x3A\xDF\xB8\xB5\x9A\xB6\x81\xDF\x7E\x9C\x22\x05\xD7\x1A\x4D\xC0\x31\xEB\x86\x35\x30\xE5\x9F\x78\x88\x2B\x93\xFA\x8F\xEB\x37\x34\x91\xFF\xB8\xBE\xD0\xC4\x9B\x35\xF7\xF1\x9E\x7F\xFB\x42\xFA\xE0\xD7\x37\xBD\x79\x63\x9D\x50\x9E\x4F\x82\xAC\x47\x5F\x52\xB8\x57\xDC\xD6\xC6\xAF\x90\x18\x8E\x59\xCD\xA2\x8E\x62\x8A\x08\xA3\xE9\x91\x01\x41\xBF\xA1\x49\x41\x9F\x6F\xD4\x5A\xED\x20\x05\x7D\xA1\xD6\xA0\xF0\x04\x00\xCA\xB8\x75\x75\xCC\xEA\xAB\xF5\x1F\x44\x9D\xA7\xF0\x9F\xA0\x5B\x3E\xFE\xFE\x86\xC6\xA2\x8C\x89\x28\x80\x44\x5F\x38\x4B\xF1\xBF\xDA\xE7\x6D\x63\xDF\xD0\x28\xB0\xFC\x93\x02\xDB\xFD\x54\xB6\x8D\x7A\x43\x83\x2D\x9A\xB6\x91\xDF\x63\x50\xDD\xEF\x55\xF7\xBB\x5B\xF8\xFB\xA1\x7D\xDE\xBF\x6D\x9F\xF7\x8F\x76\xBF\xC7\x0B\x7F\xFF\xFF\xED\xD3\xFE\xED\xFB\xBC\xBF\xBE\x4F\xFF\xDF\xB4\x4F\xFB\xFF\xCD\x3E\xEF\xDF\xB5\x4F\xFF\xF7\xEC\xF3\xFB\xBD\xFB\xF4\xFF\x4B\x6A\x9F\x06\x7E\x45\xED\x33\xC2\x8F\xA8\x7D\xBA\xF8\xB5\xFD\xBA\xF8\xF8\x7E\x5D\x7C\x6A\xBF\x16\x9E\xEB\x1F\x48\x17\x3E\xF0\x85\xFD\xBA\xF8\xDD\xFD\x66\xF1\xEF\xF7\x6B\xE1\xF7\xF7\x6B\xE1\xF9\xFD\x5A\xF8\xCA\x7E\xB3\xF8\xB3\xFD\xE8\xF0\x16\xBD\x4F\x17\x4F\xE9\x7D\x5A\xF8\x07\x7A\x9F\x59\xFC\xA3\xFD\xBA\x78\xE7\x7E\x5D\xFC\xD3\xFD\xBA\xF8\xDE\xFD\x5A\xF8\x81\xFD\x1E\xF8\xA1\xFD\x06\xF9\xC3\xFB\x8D\xE1\xBD\xFB\xB5\xF0\x53\xFB\x8D\xE1\xFD\xFB\x75\xF1\x73\xFB\xB5\xF0\xA1\xFD\xC6\xF0\xF4\x7E\x5D\x7C\x78\xBF\x16\x7E\x65\xBF\x16\x9E\xE9\x1F\xC8\xDB\x86\x64\xBF\x78\xD8\xF9\xB9\x8D\xA0\x93\xF0\x01\xB2\x31\xD1\x85\x57\x9C\x63\x65\xC0\xF8\xE3\x67\x1B\xB3\x46\xC1\xC1\x78\x99\xC6\xC3\x90\xF2\x51\x38\x0D\x54\x11\x56\x93\x99\x76\x57\xE4\x5C\x81\xAA\x7E\x8D\xF4\x55\xFA\xF3\xD7\xFB\x3F\xE7\xBE\x5D\x52\xFE\xB9\x4F\xF5\xFA\x6D\xA6\x42\x23\x3F\x3C\xA8\xC5\xCB\x66\x1F\x05\xFA\x0E\xA3\x4E\xE2\xE1\xEA\xD3\x16\xB4\x4F\x43\x38\x1E\x85\x74\xAF\x9B\xB4\x4B\x79\xD1\xEB\xC6\x35\xDA\xE7\xF7\x8B\x15\x56\x5F\x62\xF0\x25\x54\x3B\xD9\x49\xFC\xD8\x43\x13\x32\xB3\xB5\xFE\xB1\xAD\xC6\xF1\xB8\x6A\xDB\x59\xB2\xC1\x71\xD1\x50\x3C\x04\x29\xE3\xD1\xF2\x51\xDF\x76\x3A\x68\x3F\xDD\xFF\xC5\x68\xB3\x1D\x32\x6D\x28\x92\x9B\x51\x51\xA5\x80\xAA\x0A\x13\x90\xF4\x8D\x03\x4C\xC3\xEE\x36\x85\xA0\xDF\x2B\x0A\x51\x03\x2D\xA3\x21\x0B\x7C\x8B\x57\x02\x45\xF3\xC4\x76\xFC\xE5\xD6\xFF\x8F\x0F\x4D\x70\xD4\x2B\xF8\xBF\xFB\xD7\x64\xB2\x86\x27\xDB\xBF\x3E\x9C\xB6\x19\x4D\x1B\x9F\x19\xB4\x49\xC1\xC2\x29\x91\x01\xEC\x1A\x8D\x90\xAE\x18\x60\xD7\xAA\xB7\x5D\xA5\x4C\x9C\x62\x48\x98\xAF\xF2\x12\x76\x71\x62\x8D\xA5\xEC\x03\x42\x01\xE3\x94\x65\x8E\xE6\xAB\x1D\x18\xBC\xBB\x52\x32\x86\x4C\xA9\xD4\x85\xFF\x36\x70\x9B\xA8\x4F\x6D\x7A\xDD\x92\x35\x08\x2F\x7F\xAE\xB3\x04\x9E\x61\xDF\x7F\x4A\x37\x1F\x5E\xDC\xA8\xF0\xDF\x46\xD4\x73\xFE\x77\x6F\xDE\xBC\x99\xA0\x52\x88\x4A\xA8\xBE\x84\xDD\x82\x03\x73\xFF\x5A\xE3\x98\x10\xE0\x7C\x71\x9E\xAB\xC1\xC8\xF2\xBB\x10\xD7\x30\x1C\x48\x01\x11\xB3\x6A\x37\x62\xEC\x5C\x88\xE4\x88\x37\x1C\x98\x9E\x4A\x61\xF2\x6F\xA0\x44\x86\xCE\xDF\xDD\x88\xED\x92\x0D\x93\x6C\xDD\x1A\x2F\x5A\x58\xE3\x29\xC7\x57\xE2\x43\x0F\x89\xD3\x1A\xEC\x26\x8E\xE7\x2A\xE7\x24\xD0\xA4\x61\xF0\xDE\x09\xE3\x8A\x2F\x1A\x1D\x6F\x77\x11\x6A\xE3\x6E\x1B\x27\xA4\xE2\x15\x60\x93\x0A\xF6\xC1\x49\x65\x38\xA4\x3A\xF6\xB9\x24\x33\x40\xBC\x89\x3D\x2B\xFF\x6D\x10\x6D\x42\x8C\x37\xD2\x04\xA2\x4D\xB2\x9F\x05\x63\x09\x91\x3D\xED\xC8\x9E\x06\xB2\x47\x43\xB2\x47\x48\x76\x4B\xB0\xDC\xF6\xFC\x5A\x63\x85\xEC\x76\x11\xD9\x63\x26\x7B\x0A\x71\x20\x3B\xB5\x96\x53\xC1\x4C\x11\x15\x42\x6F\x59\x80\xC6\x76\x21\x10\xBB\x32\x03\x4E\x06\x35\x72\x1D\xE0\x5C\x70\x36\xB5\xEE\x83\x10\x74\x88\xB5\x17\x8C\xD7\x6E\x11\x08\xCD\x87\x29\x3C\x62\xE9\xE7\x8C\x4E\x48\x2A\xA1\x00\x19\x90\xD9\xF4\x43\xC0\x0F\x4C\x64\xCB\x44\x76\x1C\xD4\xA2\x2F\xD7\xCE\xE7\x0C\xCA\xE2\xA8\x66\x98\xBE\x7F\xAD\x11\x11\xE4\xB7\x5B\x9F\xA3\x0A\x8E\x5F\x09\x72\x24\x75\x93\x6F\x90\xEF\xCF\xCA\x94\x78\x8E\x71\x28\xC1\x45\x12\x89\xA9\x18\x91\xF9\xC0\x52\x44\xDD\x99\x89\x29\x16\x8C\x2C\x25\x4F\x9F\xF5\x9A\x5E\xC6\x7F\xB8\xD5\xD8\x2B\x01\x0B\x84\xC1\xD3\x4E\xAA\x6E\xFB\x3F\xD3\x5B\x38\x78\x09\x82\xD7\x6D\x93\x72\x72\x69\x04\x71\xB0\x97\xD0\x72\x91\xB7\x34\x96\xC5\x32\xA7\x18\x2A\x5A\x41\xC4\x6B\xE6\x4E\x48\xB4\x11\xF2\x3C\x6E\x1F\x12\x28\x4E\xC8\xDD\x53\xF8\x3B\x6D\x60\xE4\x05\xFC\x3B\xE6\x5E\xDB\x71\xAF\x15\xEE\x95\x0D\x17\xD1\x86\xA1\x22\x57\x67\xE6\xD8\x76\x86\x41\x6C\x3B\xD8\xEC\x94\x2E\x3B\x94\xDA\x92\x11\x87\x63\x8F\x28\x29\x9D\xB3\xFF\xAD\x9C\x28\xF2\x9D\x2E\xFC\x1B\x70\xB3\x70\xB2\xED\xC2\xCD\x92\x74\xBD\x26\xB8\x59\xD2\x85\x9B\x85\x80\xDB\xC1\x9D\x9F\x95\x51\x69\x01\xE9\x60\xB3\xB0\x5B\x1A\xB7\x55\x27\xA3\xDC\x78\x1E\xFD\xE6\x49\xFA\xCD\x13\xCB\xC9\xC6\xEB\x91\x8A\x10\x73\x64\xD4\xA3\x0C\xF0\x59\x66\x5F\xE1\xE0\x7E\x5D\xFD\x63\xFC\x5C\x7D\x17\xCA\x9F\xE2\x7B\x82\xA0\x19\x41\xEC\xCC\xB2\x9A\x63\x90\x22\x59\xB0\xC6\xE1\x8D\x54\xA8\xEF\x02\xF5\x7D\x5E\xF3\xF9\xD1\x58\x64\xF3\x08\x22\x92\x42\x44\xCE\x6F\xAB\x0D\x38\xFF\x6D\x14\x6F\x47\x8C\xDB\x4B\x74\x5D\x04\x91\x1E\x71\xEE\x21\x6A\x38\xD1\xF9\xB5\x26\x12\x9A\x45\x44\x33\x54\x49\x98\x66\x4D\x44\x68\x0D\x7C\xD6\x25\x74\x38\x10\x69\xE2\x9E\x34\x09\xD3\x44\x4E\x3F\x59\xE1\x16\x85\x42\x9F\x7A\xC3\xA9\xB7\x2D\x6E\x07\x11\x10\xB6\x6D\x14\x52\x8D\x24\x39\x7E\xF7\x61\xD3\x1D\x78\x33\xB4\x11\xAF\x3A\x1E\xE7\x76\xA0\x87\x08\x89\x1A\xC7\xEE\x65\x73\x3E\x90\x89\xB3\x55\xD8\x0B\x87\xCB\xFA\x0E\x59\x67\x1B\x8E\x9A\x59\x16\xE5\xA3\x69\xCA\xA3\x14\x75\x67\x8A\x74\x8C\xC2\x31\x6A\x37\x1F\xA2\xBC\x4A\x30\xA8\x1A\x3C\xDA\x0D\xC4\x09\xCD\xC3\x51\x6C\x47\x84\xB6\x4C\x68\x83\xAA\xDF\xFD\xB3\x34\xC6\x0D\x24\xDA\x44\xC7\x7B\x14\x71\x66\x99\xC2\xAE\xA7\xB0\x1C\x9D\x9A\x09\x1D\x09\xF3\xE9\x45\x27\xE7\x92\xE4\x94\x54\xEF\x24\x86\x9B\xE1\x42\xFE\xF2\x86\x16\x9D\xAB\xE7\x3E\x89\x5C\x70\xA1\x28\x84\xBE\xCC\x96\x54\x1C\xBA\xDE\x0C\x73\x40\xCE\xE0\x79\x13\xEE\x01\x3D\xAD\x99\x63\xC5\xE7\x34\xFF\x34\xD2\x85\x22\x99\x91\xD3\xDC\xFD\x4D\xBC\xD6\x44\x67\xE4\xFC\x26\x68\xE3\x7F\xC4\x31\xEF\x14\x12\xDA\xCF\xC3\x6F\x93\xCD\xC6\x9D\x5F\x1B\xA8\xB5\xD7\x27\x3A\xDF\xB6\x28\xD9\xDF\x44\x30\x2E\xFE\x09\x52\x69\xEC\xA9\x39\xB6\xD1\xA0\x36\x9B\xC8\x9B\x37\x36\xF1\x52\x9C\x65\x4A\x1B\x67\x0B\x7A\xA1\x89\x28\xFE\x86\x0F\x9B\xE4\xC1\x49\xCA\x9B\x1B\xC5\x51\xC2\x82\x95\x9E\x4A\x24\x1A\x87\x67\x27\x8F\xA7\xE7\x58\x72\xF3\x5B\x11\x03\xCA\xD1\xCB\x42\x03\x7A\x2A\xEE\x1A\x75\xFC\x14\x5B\xC5\xE8\x3D\x91\xE9\x0A\x12\x8A\x80\x21\x96\x4B\xF9\x41\x6A\x87\xF2\x10\x22\x48\x1F\x9C\x24\x7D\x0F\x69\x3F\x2E\x46\x7A\x4E\x25\xF7\x7D\xD8\x6B\xC2\x51\x8A\x31\x50\x1D\xB5\xB8\x8E\x58\xD8\xCA\x01\x42\x83\x89\x21\x79\x90\x28\x9F\xF4\x3D\x76\xD0\x01\x10\xF1\xE8\x92\xEE\x05\x2E\x3D\x1A\x1E\x8E\x04\x06\x01\x92\x51\xFB\xFD\xE3\xAE\x80\x04\x99\x3A\x65\xFA\x46\x0F\x4E\x62\xFE\x21\x85\x94\xE7\x8C\x23\xE7\xE1\x3A\x24\x52\xF7\xB3\x4C\xB4\x27\x5F\xBC\x88\x7C\xFE\xAB\xEA\xBE\xEE\x18\xEA\xA7\x1F\x8F\xA6\x1F\x41\x4C\x6B\x44\xE8\x6A\x73\xED\x8F\xA7\x1F\x0D\xA6\xDF\x2D\x4E\xD2\xBD\x30\x9E\xBE\xA0\x40\x44\x3C\xFD\xAE\xFD\xFE\x71\x9A\x3E\x41\xE1\x94\x6D\x93\xE1\x5D\x26\xF6\x69\x9D\x93\x7B\x20\x23\xE6\xE1\x2E\x72\x1C\x3C\x85\x4B\x44\x6D\x93\x78\xD7\x76\x4D\x53\xD1\xD1\x88\x73\x2A\x9E\xA0\xB8\x28\x48\x24\x42\x12\x27\x45\xAB\xD9\x52\xC4\x54\x21\x01\x54\x05\x64\x78\xA7\x8D\xB0\xB7\x04\x72\xFC\x2D\xA7\xCE\xEA\x18\xA2\x3A\x2B\x39\xBC\x28\xF6\xE6\x5B\x43\x10\x02\x11\xF2\x9F\xBD\xEA\x1C\x9B\x67\x21\xF6\xEE\x5B\x29\x34\xAD\x25\xD0\xF5\xA8\xC5\x7D\x5A\x7D\x0F\x5D\x47\x28\x20\xD8\xF5\x41\x69\xF9\x70\xDF\x70\xEE\x4C\xC1\x74\xA3\x5A\xFC\x72\x1C\x9E\x30\xA8\x71\x15\xFC\x74\xBF\xA2\x26\xF0\x7D\xDE\x73\xDD\xCC\x8E\x70\xB4\xFF\xF0\xE5\x6E\xE9\xF0\xF1\xF0\x25\xA7\xEE\xCA\x1A\xE6\xC3\x57\x0A\x6F\x6A\x71\xE6\x51\x77\x19\x77\x9E\x60\xDB\x41\x4B\xAF\x23\xCB\x77\xCC\x88\x3C\xD8\x42\x29\x6E\x33\x39\x37\x89\x20\x07\x77\x16\xB7\xAD\x4F\xDB\xB0\x68\x90\xF7\xE2\xE2\xC1\x49\x10\x10\x32\xC9\x32\xA1\xE8\x76\x24\x3F\x6A\x99\x65\x56\x50\xC2\x0B\x51\x1E\x5F\x4B\xEB\x88\xF7\x9B\xEA\xE6\x92\x72\x3C\x44\x46\x29\xEB\x14\x68\x5D\xD0\x48\x9A\x12\xE2\x87\x26\xDA\x9E\x82\x92\xD7\x1D\x87\x5F\x27\x54\xCB\xD0\xC9\x00\x52\xE2\xBB\x82\x23\xE6\x4A\x82\xB7\x7A\xA2\x6D\x62\x9E\x2E\xF5\xC2\xF4\x7C\x2D\x31\x67\x59\x5A\x5C\x1A\xEA\x29\xE6\x50\xBB\x98\x17\xF8\xEC\x5A\x9D\x4B\xE7\xC2\x1D\x38\x02\xBE\x4C\x23\xE7\x92\x90\x09\x2C\xF0\x8A\xD2\x71\xA9\x05\x3A\x8F\x64\x45\xCB\x8E\x1E\xE5\x70\x45\xC3\x8C\xCC\x29\x28\x68\x4D\x4B\x30\xD5\xFF\x7A\x35\x68\x0D\xA5\xF0\x69\xF8\x92\x33\x17\x34\xAA\xB9\x71\x19\xE1\x0A\xC6\x90\xB0\xE2\x5D\x82\xDA\x04\x0D\xE5\xE6\x83\x5D\x3B\xDF\x4D\xA1\x05\xD8\x46\xAD\x70\x6A\xF8\x17\xB5\x25\x3F\xD4\x74\x34\xD3\x12\x1A\x5C\x72\x7B\x8A\xD6\x8E\x55\x5D\x26\x65\x32\x24\xE6\x83\x1D\x29\x73\xFA\x69\x40\xCC\x73\x1D\x29\xCF\x53\x6D\x2F\xDA\xE7\x23\x52\x12\xDD\xA2\x9A\x36\x8F\x54\x86\x59\xB2\x8A\x8A\x36\x47\x35\xFD\x5A\x14\xBF\x6B\xB5\xDB\x66\xD4\xF4\x38\xD8\x54\x54\x93\xF4\xA7\x88\x57\x94\xC8\xD4\x49\x53\x4A\x23\xD2\x4C\x49\x13\x78\x3E\xE3\xD7\xC2\xE1\xC3\xC2\x2B\xE6\xA3\x9F\x16\x95\x3A\x48\xBC\xA1\x0C\x19\x11\xE3\x09\x93\x4D\x5A\x4A\xA5\xA5\x4E\x96\x85\x77\xA9\x79\xC3\xA3\xA2\x96\x35\xFF\x2A\xAF\x98\x3A\x2B\x20\xE5\x8C\x68\xCB\x83\xA0\x86\x0D\x37\x6C\xE9\xE1\x50\x76\x3A\x91\x71\x9E\x93\x4C\x1C\xEE\xAE\x6B\x50\xE4\xDB\x90\x04\xF2\xA8\x69\x67\x46\x46\x8D\x67\xDE\xB6\x9C\xE9\xC4\xDB\x53\x06\xF0\xE0\x24\xF4\x2B\x5B\x30\x0C\x2D\xE1\x51\x74\xDD\x4F\x0B\x21\x21\xD8\xF0\xDC\x70\x98\x36\xF4\x3D\x1D\x8C\xD1\x0E\xC6\x68\x7A\x19\x47\x63\x99\x8A\x0A\xA6\xC2\xE3\x19\xC5\x4F\x14\x90\x15\x9F\x8F\x75\xBC\x8D\xD7\xC2\x39\xE5\xC3\x9B\x37\x2E\xC5\x51\xA4\xB4\xB1\x4E\x54\x0A\x1A\x84\xE3\xC6\xA3\x07\x27\x72\x9D\xA0\xFA\x05\xB4\x20\xD3\x81\x82\x42\xC9\xE4\x74\xBE\xB9\x01\xC5\x0C\x38\x7E\xCF\xF0\x79\xE6\x3A\xD5\x43\xC6\xEC\xBA\x66\xE5\xA9\x90\xFC\x49\x6F\xAA\x30\x8F\x48\xE4\xF4\x74\x7C\xB3\xA1\x74\x32\x9C\x7F\xFC\x20\x25\xE6\x48\x2F\xF1\x70\x74\x9C\x37\x97\x8A\x25\x69\xD8\x77\x74\x4E\x62\x6E\xA9\x04\xB2\x23\xE8\x7B\x6E\xA4\xBB\xC1\x3A\xD4\x0A\x74\x38\x8F\xB9\xCF\xC0\xB9\x0E\xCC\x83\x33\x57\x5E\x39\x81\xA3\x7E\x36\x1C\xD3\x3B\x6A\x7F\x70\x43\xC6\x0D\x68\x0A\xCA\xC2\x69\xA8\x39\x13\x6E\x6C\x31\xCF\x5B\x71\x9D\x6D\xA6\xEB\xE0\x67\x99\x6A\x4F\x44\x33\x4F\xC4\x7D\x44\x25\x3F\xF2\x0D\x4E\x19\xD2\x4D\x62\xB2\xB3\x7A\x14\x1C\x69\x97\xDF\x18\xB3\x70\xA7\x8E\x52\x41\x50\xA4\x20\x4E\x76\xCB\xCA\x6E\x23\x0D\x85\x6E\xF1\x02\xA4\x88\x97\x48\x29\x87\x20\x1A\x8A\x25\x0D\x05\xAF\xC4\xA2\xE7\xE2\x25\x33\xF1\xE9\xD9\x89\xA0\xBB\x53\x2C\x1D\x95\xB3\x47\x8A\x51\x28\x52\x4A\xDD\xD7\x06\x6C\x1D\x97\xEC\x20\x47\xF9\x7E\x96\x9A\x29\xFE\xFF\xAA\xCB\x94\x0D\x41\xD2\x1A\x4C\x9F\x29\xB9\x4D\xC8\x1D\x6F\x36\x23\x73\xE9\xDD\x78\x85\xF6\x77\xD1\xD0\x1C\x51\x85\xAE\x62\xFE\x37\x7E\xF4\x93\x3F\xAA\x1F\xE2\xF5\xF5\xAB\x17\xAA\x6F\xBF\x2A\x29\x17\xC7\x1B\xBC\xBC\x95\x17\xF8\xB2\x78\xC2\x1C\x0F\x06\x08\x57\xBD\x35\x3C\x73\x37\xC3\xF9\x45\x27\xCC\x5D\x38\xBD\x07\x43\xF8\xA0\xE2\x9A\xDF\x14\x45\x88\x4D\x51\x10\xBB\x2F\x2F\xB4\x0D\x79\xB3\x29\x68\xCE\x91\xAA\xCE\x19\x8A\x67\xF9\x73\x8A\xEA\x9C\x6E\x29\x3C\x0F\x88\xCE\x11\x97\xFC\x9F\xF6\x77\xE8\xBB\x43\x83\xD8\x5A\x60\x99\x90\x81\x83\xB4\x18\x18\x7F\xFE\xEF\xDC\xBA\x6D\x73\x25\xDA\x26\x54\x61\x7A\xEA\x6E\x73\xEA\xB4\x26\x96\x25\x42\x60\x6F\x48\xC9\x75\x73\x8E\x6C\x75\xED\xAB\xB5\xBA\x00\xB2\x21\xA2\xCD\xB6\x89\xFD\x6B\xB6\x6E\xBC\x95\xA5\x34\x10\x9E\xE4\xB7\x7A\x7B\xA9\x6D\xE2\x3B\xAC\x6A\xD2\xAB\xA7\xD5\xDF\xA1\x8A\x68\x09\xA4\xD7\x4E\xDA\x70\xD3\xFC\x67\x1D\x7D\x1A\xCE\x99\x27\xF0\x9C\x3A\x23\x76\xAB\x73\x29\xE2\xAC\x24\x11\xBE\x9B\x93\xA1\x39\x99\x7E\xCB\x18\x49\x8C\xB8\x0F\xB9\x13\xE2\xCD\x20\x03\xD5\xBA\x79\xB8\xD1\x10\x7B\x7B\x69\x13\xAF\xFC\xC9\x96\xB7\xC8\xC1\xFF\xF8\x37\xC7\x09\x72\xEB\x14\xDD\x68\x2F\xB5\xEB\x86\x41\x7A\xBA\x4D\xC4\x15\xA4\x59\xB8\xF3\x08\xB9\x02\x0E\xDB\xA1\x71\xE4\x42\x6C\x07\xA6\xAD\x51\xE9\xB1\xA7\x70\x1F\xCD\x5B\x23\x64\x34\x05\xC4\x4D\x44\x94\xD1\x4C\xD8\x64\xCB\x6F\xB7\x4B\x09\x8A\xFB\xD8\xE5\x5D\x96\x2A\xDE\x91\xDD\xE5\x26\xE6\x51\xA9\xA6\xC4\x07\xAF\xB4\x4B\x91\x36\x86\x34\x06\xD4\x65\xF4\x65\xFF\x15\x64\x8B\xAD\x56\x66\x67\x24\x85\xD5\xF9\x98\x32\x88\x7E\x62\x38\x51\x4D\xA6\x67\xC8\x6A\x57\xA6\x05\xFE\x83\x17\xF8\x23\x93\xA4\x8C\x0B\xBF\x4D\x01\x9F\x06\xB2\x87\xF0\xB0\x10\x6C\xCE\xD8\x43\x0B\x31\x68\x1F\xE3\x72\x23\x13\x6C\xAD\x51\x55\xB1\x5F\xDC\x6E\xFD\x2B\xCE\xAF\x35\x84\x5E\xE3\x50\x09\xC6\xA7\x5C\x7B\x4C\xE9\xFB\x26\x11\xF5\x4F\x66\x40\x5A\x4F\xAF\x2F\xB7\x61\xAD\xA5\x46\x90\x61\x58\x10\xE7\x53\x76\x30\x45\xC4\xCC\xC2\x0E\x7A\xD7\x95\x22\xDF\x4C\x8C\x9B\xC6\x50\x77\x1C\x4C\xBE\xCB\xE3\x7C\x01\xC1\x59\x20\xED\xEF\x7C\x70\x62\xBC\x82\x68\x93\x0D\xB6\xFB\xBD\xA3\xF8\x4A\xB9\x4E\x30\x00\x04\x81\x43\x63\xDB\xE3\x55\xE1\x23\x4A\xBA\x0E\x19\x72\xA4\x99\x71\x75\x60\x53\x7D\xE8\x1A\xE5\xB7\x3F\xBE\x35\xB1\xEC\x5C\x4B\xB7\x58\x6C\x0F\xE6\xCE\xA6\x91\x05\x44\x29\x42\xCF\xC8\x12\x0C\x73\x66\x2F\x05\x9E\x6A\xFD\x9B\x5A\x69\x50\xF9\x3F\x18\x78\xF5\x4E\xB3\x55\x6B\xB8\x77\xE3\x99\xBD\xBB\xD9\x36\x7A\xB4\x73\x75\xB7\x73\x75\xBF\x73\xA9\x88\x9D\xEC\x5C\x53\x3B\xCE\xB5\x35\x90\x9D\x6D\x26\x13\xCA\x5C\xBB\xCD\xDF\xD4\x5B\x92\x3D\xBD\x6E\x8E\x52\xF6\x92\xF6\x7A\x6B\x2A\x64\x31\x54\xB5\x24\x6C\x84\x55\xD6\x98\x8B\x33\xFC\xF2\x11\x06\xC2\xA3\xE3\xD7\xB5\xAF\x45\x45\x9C\x12\xFD\x91\x7C\x0E\x37\x1D\x1B\xC9\xD6\x51\x26\xBA\x1A\x17\x94\xB4\x3B\xC7\x8A\x75\x01\x6E\x53\x3C\x5A\x47\x3D\x9E\x25\x5E\xD7\xCE\x97\xA4\xCA\xF3\x12\xB6\x4D\xC1\xC5\xAA\xB0\x6B\xFC\x88\x47\x10\x14\x21\x85\x90\xD5\xBD\x12\x8A\x4D\x6A\x10\xAF\x71\x31\x5B\x38\xB9\xD1\x73\x1C\x54\x5F\x5B\x98\x4C\x22\x1A\x2F\x4E\xF6\xCC\x24\x2A\xBA\x75\x24\xBB\x17\xBE\x86\xC4\x61\x40\x07\xB6\x58\xB3\x64\xE3\x4A\xD2\xE4\x2F\x29\xFE\x28\x32\xFA\x8A\xDB\xEE\xDC\x91\x74\x0A\xDF\x81\x4A\xE3\x49\x42\x12\x4E\xAE\x53\x2E\x29\xFF\x4C\x5F\x42\x72\x9D\x9C\x00\xFD\x35\x35\x3D\xA6\x38\x57\x80\x60\x1C\x34\x55\x9F\x94\x8D\x9F\x1E\x53\x9A\x08\x4B\x3F\x19\x48\x4E\xC3\x53\xD7\x5B\x7E\x06\x59\xB1\xDD\x60\xDF\x56\x7C\xC2\x1C\xE5\x03\x69\x15\x14\x14\xF7\xA8\xDB\x80\x10\xFF\x36\xC0\x5D\xBC\x47\x1D\x21\x34\x37\xB5\x41\x09\x9A\x2B\x1D\xE6\x05\xC3\x6E\xC8\xA1\x2A\xC4\x2E\x6B\x23\x99\x59\x44\x5D\xD3\xFA\x27\x5A\x7A\x8A\x10\x59\x6A\x43\xD0\x0B\x5C\xA4\x8E\x29\xC9\x17\xAB\x94\x77\x06\xB2\x66\xBA\x6E\x6E\xBF\xE0\x5F\xD3\x22\xE3\x35\x29\x65\xEB\x9B\x87\xA8\x80\x94\xBD\xD4\x44\x7C\xCE\xB2\x39\x10\xEA\x9C\x75\x16\x30\xA7\x80\x52\x7D\xF2\xFE\x88\x05\x56\x53\x6E\x6F\xE5\x82\x83\xBB\xBD\xF3\x3F\xE1\x11\x6E\xBC\x01\xE3\xCD\x7D\x6B\xD2\x89\xE3\x4E\x08\x56\x6E\xD0\xC9\x7A\x1D\x73\x27\xEB\x5D\x1D\xCF\xBE\x93\x75\xEE\xE4\xCE\x41\x27\x02\x69\xF7\x0A\xBF\x13\x4B\x33\x34\xD9\xE3\xB5\x61\x03\xDD\xF1\x60\x51\x36\xDC\x4C\x41\xDA\x85\x7F\x5A\x49\x81\x38\x51\xD8\x8F\xA9\x15\xB1\xE1\xAF\x8B\xC6\x29\x7B\x36\xA5\x87\xAA\x7F\x7E\x95\xB0\xCC\xC2\x8A\xBB\x26\x65\xE5\x81\x02\xB0\x19\x89\xC4\x10\x50\x3C\xB3\x88\xA6\x5D\xC6\x45\xFD\xCC\xA6\xD8\x61\x04\xDE\x9F\x2A\x66\x1D\x85\xA8\x7A\x5B\x9F\xFE\xCC\x35\xAE\x3E\x76\x4D\xD2\x9F\x0D\xBE\xC6\x35\xC3\xC2\x80\xC8\x06\x12\x86\x64\x78\x3C\x67\x44\x17\x33\x1C\x02\x4D\x4E\x81\x94\x23\x15\xF3\x5E\xBB\x78\x06\x15\xAC\x50\x55\x48\x75\xF0\x10\x54\x59\xD7\x9E\x83\xC4\xAF\x08\xC7\x24\x7E\xAD\xF5\xEA\x84\xC2\xBF\xD6\xF1\x3B\x8B\x7F\x1D\xEF\xFE\xBA\xBB\xFB\xEB\x69\x25\x00\x7E\x09\x03\xC1\x24\xA0\x50\xBF\xA1\x16\xC1\x33\x34\x65\x42\x6E\x7E\x8A\xDE\xD3\x67\xF1\x44\x3A\xDB\xD8\x8B\x28\x5A\x36\x20\xF1\x1F\x51\x2D\x81\x30\xEA\x73\x4D\xBA\xD6\x64\x94\x96\xE3\x35\xDE\xD3\xD7\x28\x69\xA7\xFA\x3E\x99\x1F\x1F\x66\x9C\x06\x41\x68\xE3\x0C\x39\x3E\xBB\xA5\xBC\xAE\x55\x01\x09\x6A\x7A\x31\x57\x0E\xBE\x7B\x86\x7D\xE8\xD7\xE3\xE1\xD7\xE3\x8B\x7E\x5D\x0F\xBF\xCE\xB2\x5E\x01\x89\x7F\x46\x85\x00\x4F\x55\x5C\x35\x3A\xEA\x9C\x75\xEB\xBC\xE6\xF7\x36\x06\x74\xCB\xFE\x9F\x53\x0F\x4C\xD8\xED\x75\x8F\x5A\xF7\xDB\x53\x71\xD7\x82\xF2\x77\xB3\x36\xF7\x00\xA9\x8D\x29\xB8\x35\xCA\xC3\x89\xF0\x68\xBF\x54\x47\x60\xC1\xDD\x47\x51\xDA\xFD\x2E\x74\xDE\x5E\x12\xBE\xEE\x4C\xA7\x77\x11\x3E\x6A\xC4\x2A\x93\x27\xA4\xA5\x4D\x7C\xEC\xBB\x42\x9C\x30\xEA\xB9\x5C\x9A\x25\xA6\x87\x1A\xCB\xCA\x71\x13\xA1\x96\xBF\xCB\x0B\xC3\x7D\xC6\x93\x61\xA5\xD9\x9D\x30\x77\x17\xEC\x0D\xBC\x77\x18\xEE\xF2\xBD\x5A\x9B\x6D\x50\xFE\xF7\x7F\x93\x23\x5D\x79\x6C\xB7\x37\x14\xA9\xA1\xAB\x0F\x22\x2F\x0B\xF9\x28\xB2\xF4\x53\x12\xBF\x79\x66\xA2\xEC\x29\xD9\x79\xA0\xA5\x4F\x53\x6B\x30\x41\x3C\x3D\x5C\x6B\x56\x82\x1F\x0E\x81\x1F\xBA\xDB\xBB\xEB\xE6\xAE\xF0\xEB\x5D\xF3\xBF\x56\xFF\xE2\x2A\xCF\xA8\xF8\x20\xD9\x71\x94\xDF\xF9\x74\x18\x1D\x0D\x62\x95\xC7\x40\x17\x92\x92\x13\x4F\x4B\x5D\x58\x21\x6C\x8A\x17\x1C\x82\x69\xE0\x2C\xF7\xC6\xF9\x4F\xAA\x37\xA0\xE4\xBA\xC4\x88\x84\x60\xFC\x27\xD5\x85\x4D\x7F\xFC\x42\xBB\x6E\xEE\x6C\x44\xAE\x3C\x15\xC8\xB6\x3A\x78\xB7\x76\xC1\xA5\xC5\xFA\xFE\x6A\x07\x69\x45\x20\xB0\xDE\xBC\x11\x9B\xC2\xEB\x86\x02\xBB\x06\x6E\xD3\xFF\x28\x41\xE3\x18\x3E\x17\xC3\x5A\xD0\x3B\x64\x83\xA0\x14\xFE\x75\xB3\x4A\x80\x14\x4C\x2A\xA4\xC2\x2A\x49\x9E\x70\x51\xD0\x9B\xDE\xBC\xB1\xE1\x1C\x57\xA1\xF2\xB0\x25\x32\x64\xB2\x33\x8A\x87\x12\x9E\x37\xF7\xD3\x29\xFB\x4D\x1D\x7A\xB9\x43\xAD\xD8\x68\x65\x0A\xFF\x49\x29\x04\x60\x4F\x98\x15\x01\x73\x3C\x3B\xDB\x3A\x2B\x84\x8D\xE6\xB8\x70\x6C\x5B\xCB\x9D\xFD\xAC\xA0\xFA\x69\xE4\xB9\x27\x5A\x6F\x2E\xFB\xED\xC7\x91\xA0\x27\x4C\x19\xC0\xAD\x9A\x99\x65\x2C\x2A\x05\xAA\xFA\x81\xAB\x3D\x07\x7D\x8F\x0B\x3B\x8E\x6D\x31\xE4\x22\xA4\x9B\x2F\xED\x34\x01\xF5\x20\x99\x34\x15\x65\xED\xAE\x26\x66\xF0\x50\x32\x09\xA1\xCE\x18\x53\x71\x15\x3F\xBD\x7F\xC2\x58\x94\x74\xB5\xDC\x08\x65\xCA\x64\x14\x91\x77\x97\x6A\x67\x4F\x91\x4A\xAD\x2F\x93\x2A\x46\xF0\x40\x49\xC8\x2C\x0F\xD5\xD3\x45\xC7\xFF\x87\xBF\x3D\x50\x42\x53\x54\x91\x34\xBB\x4D\x28\x00\x22\x25\xB8\x48\x70\x17\x01\x87\x70\x91\x2F\xD6\xEE\xF2\x60\x11\xC1\x0E\xE3\xA2\x6C\x17\x17\xC5\xF3\x38\xA6\x8E\x9F\x09\xC8\x50\x32\x42\x87\x23\xB4\x2F\x78\x84\x6E\x3C\x42\xBB\xC7\x08\xD9\x07\x84\x7A\x92\x3B\x69\x5C\x87\x1C\x3A\x20\x98\x79\xB1\x09\x66\xF6\x18\x8E\x93\x50\x2D\x1E\x4E\xF1\x41\x63\xA8\x72\x1E\xF5\x55\x7D\xFF\x55\x4A\xDF\xA7\xCB\xE8\x2C\x63\x10\x5B\x30\x53\x10\x40\xE9\xFB\xAF\x09\x4C\x12\xDD\xA2\xCF\x70\x41\xC1\xBB\x48\xFD\x74\x97\xC8\xF8\x1C\x93\x47\xB5\x9F\x53\xD6\x65\x8B\xF3\x9C\xB2\x5D\xE7\x14\x8D\x48\x9C\xE1\x9C\x92\x8B\x90\x85\x39\x65\x38\xA7\xA8\x80\x48\x32\x24\x05\x84\xD8\x89\x56\x22\xA3\x24\x6E\x17\xBB\xD2\x8D\xD3\xF5\x5B\x89\x63\x6F\x5C\xA3\x45\xE8\x09\x81\x5F\x7A\x7D\xF1\x46\x93\x9F\x86\xB7\x42\x7E\x8D\xAA\x5E\x77\x6D\x34\x5C\x40\xB4\x89\x88\xAC\x74\x89\xB9\x74\x11\x22\x19\x48\xF1\x9C\x31\x76\xDB\x5D\xE9\x76\x16\xA7\x33\x76\x84\x34\x7B\x10\x52\xCE\x9F\xEA\x67\xAF\x49\x9C\x18\x0D\xD1\x84\x21\x8A\x7D\x6B\x5A\x8C\x1A\x8C\xF6\x68\x90\x2E\xE6\x62\x77\xB7\x82\x26\x0D\x8E\x97\x89\x42\x45\xD8\x63\x8F\xEB\xA3\x06\xDF\x7A\x7D\x91\xCA\x26\xDC\x61\x5C\x83\x0A\xB7\x94\x45\xB6\xB8\xDB\xD9\x4C\x6D\x3B\xDA\x90\xF1\x5E\x94\xFC\x9C\x63\xB9\x68\xCD\x35\xAA\xB4\xA7\xF5\xD5\x87\xC9\xC4\x10\x31\xF1\xBB\x12\xD8\x62\x31\x60\x2B\xB3\x26\x42\x12\x15\x75\xA0\x62\xBF\x49\x52\xEC\x27\x93\xBC\x5A\x24\x43\xF1\x2E\xA3\xDD\xF6\x80\xBE\x8B\xC4\x95\x37\x07\x25\x0F\x87\x26\x58\x70\xD5\xBF\xBE\x26\xD7\xF5\xBB\xE8\xFA\xC3\xB6\x0A\x0A\x12\xF3\x8A\x7F\x0C\xAC\x6D\x3A\xD6\x76\x10\x07\xD6\xE6\x20\x14\x9A\x18\xD7\x58\x19\x46\x42\xD1\x98\x16\xB0\xB6\xA5\x00\x29\x36\xCF\x13\x6D\xF0\xC0\x64\xD6\x0E\xB4\xC0\x93\xE0\x32\x29\xB1\x63\xE9\x11\x13\x77\xC4\x81\x3B\x8A\xE7\x8D\x31\xDB\xF6\x8A\xEF\x14\x9B\x8E\x45\x62\xAF\x1E\x9C\xA8\xD3\x12\xEC\x85\x47\x3E\xF0\xDC\x58\x9B\x54\xD5\xBF\xA2\x99\x0F\x1B\xAC\x13\x88\x71\x3B\x91\x7E\x30\x08\x42\x4A\xAE\x37\x04\xD5\xDC\xC9\x4C\x8A\x82\xB8\xDC\x19\x80\xC8\xEC\xA2\xBA\x39\xC7\x8B\xE6\x4C\x76\x1B\x42\xB0\xA4\x9D\x38\xD1\x25\x17\x1B\x14\xA9\x19\x87\x79\x53\x32\xBA\x92\x88\xE2\x01\x1F\x2A\xFF\x25\xD5\xE2\x6D\x32\xBB\xDE\x71\x24\x33\x21\xAA\x71\x19\x3D\x88\x13\xCD\x4E\xD7\x4F\x5D\x27\x46\xCC\x08\x6A\xB1\x22\x66\xC4\x93\x15\xE7\x1E\x08\x2E\x9F\x47\xCC\x18\xCF\x31\x63\x60\x40\x62\xC7\x6B\x27\xCD\xEB\x0B\x88\x8A\xDF\x32\xD6\x50\x6A\xB3\x1F\x78\x29\x68\x9F\x91\xAD\xC1\x5D\xF7\xEF\xFA\xC4\xAF\x7E\x26\xBA\xD0\x68\xBF\x7A\x19\xF4\xE3\xF2\xD1\xFF\xF0\x77\xFF\xC2\x97\x9F\x0C\x9F\x80\x92\x9E\x17\x3F\xF9\x78\xA3\xFD\x64\xF0\xB9\xD1\x7E\x8A\x1F\x85\xB7\xEC\xB7\x34\x51\x80\x79\xB6\x7E\xBB\xDD\x62\x17\xAF\xAE\x63\xFC\xE2\x3C\x7F\xFF\x58\xCD\x05\xD7\x54\x67\xFB\x69\x54\x17\x62\xA8\x42\x90\x67\xE4\x0D\x57\xFC\xD8\x6E\xBB\x34\xF9\x10\xCD\x72\x96\x94\x63\xBB\x05\xF1\xC6\x84\xA3\xDE\xEE\xB0\x29\xB8\xBF\x4D\x28\xE9\xD4\xCA\x44\x95\xB6\x28\xE6\x5F\xE3\x18\x3A\x0E\xF2\xB3\x8F\x49\x24\xE5\xC6\xC4\x0D\x1B\x31\x83\x46\x28\x74\x9C\x58\xBD\xF8\xF7\x91\xD5\x57\xA2\x6D\xF3\x24\x18\xA4\x65\xDC\x51\xC8\x20\x85\xCC\x2E\xB4\x8C\x87\xB4\x1C\x3F\xF9\x78\x63\x90\x96\xDD\xE7\xC6\x20\x2D\xCD\xE3\x7C\xAC\x86\x48\xE9\x74\xD2\x39\x1B\xD2\x6F\x69\x32\x36\xFB\x25\x04\x5A\x90\x50\x2E\x3E\xFE\xF3\x58\x9D\x17\x7C\x5F\xCA\x03\x49\x0D\xD5\x42\xC0\xC7\x29\x37\x4A\xBE\xB2\x41\x60\x64\x90\x30\x39\x32\x6E\x24\xC3\x46\xB2\x02\x32\xC8\x37\x26\x0E\x45\xAD\x4D\x21\xFE\x3B\x13\xC2\x6E\x48\x69\x3F\xAC\x73\x68\x0C\x2E\x6B\x33\xEC\x86\x2A\x56\x6F\xB7\x75\x31\xE8\x47\x8F\xFB\x81\x0C\x8A\x2D\x6C\xD9\xCC\xB5\xAC\x0B\xBF\xCA\xD7\x9F\xC6\x78\xAE\x51\xC3\x71\x1D\x86\xEF\x9F\x9C\x76\x76\x87\x55\x27\x6D\x0A\xFA\x9B\x4C\x55\x73\x15\x4A\xAF\xDB\x9F\xAE\x27\x4C\x17\xF2\x58\x43\xFA\xD3\xEF\x84\xC9\xDF\xF7\xFA\x71\x1E\x91\xBE\x04\xA9\xB7\xE7\x21\xE5\x21\x7B\xB5\x71\xF1\xA2\xEC\xD7\x09\x94\xEF\x7A\x5B\x53\xFE\xF7\x4A\xED\x3C\xF4\xF7\x01\xFF\x55\xEA\x11\x8A\x37\x2B\x7F\x84\xD0\x75\x3C\x95\xA9\xD7\x90\xE3\xDB\x90\x9F\x5F\xAB\x7E\xB9\xB7\x4E\xBA\x46\x9A\x64\x68\x94\x16\x92\x2D\x7A\x89\x57\xE4\x21\x5E\x1E\x2A\x34\xC1\x0B\xD3\xAF\xA6\x2C\x4F\xC3\x31\x10\xC1\x50\x90\x8A\xDB\x04\x95\xF6\x9C\xC2\xE6\x83\xFF\xB8\x49\x70\x11\x13\x94\x4E\xAC\x6A\x71\xC7\x4D\xBE\xC5\x35\x04\xB7\x58\x7E\x0E\xF9\x20\x09\x0B\x45\x51\x25\x0C\x75\x43\x91\x4B\x24\x36\x12\xC6\x15\xF3\xBA\x4E\xB9\x7C\x72\x49\xC6\xF5\x92\x42\x9D\xEE\x51\xE1\x40\x2F\xDE\x92\xDA\x84\xDC\x15\x7D\xF9\xA3\x01\x44\x02\x0B\x6C\x2D\x9A\xB7\x4F\xB6\x02\x10\xD3\xB5\xDF\x1A\xC1\x83\x06\x30\xE7\xEA\xC6\xD5\xF0\xA6\xC5\x1B\x83\x0B\x90\x07\xC3\xF3\xE0\x1C\x47\xAB\x0D\x0E\x02\x39\xFF\x63\x0E\x6C\xD5\x5C\xD2\x95\xD0\x53\x45\x5A\xDC\x45\x81\x92\x3C\x0A\xFA\x35\xD9\x5B\xCC\x77\x4F\x4B\xE4\x09\x8A\x73\x91\x45\xDC\x6D\x93\xB6\x4D\xC4\x09\xF8\xD1\x5F\x53\xAE\xCE\x21\xF2\x51\xFB\xD7\x94\x22\xA4\x1B\xD1\xB1\x92\x2D\x4F\xD9\xCB\x7F\xF0\x5B\x23\x5C\xB0\xBB\x20\x65\x94\x28\x3C\x31\x8B\xD3\xFA\xAD\x90\x9F\xCE\xDE\x7A\x0D\xB2\xFF\x56\x9B\xD3\xFA\xEA\x35\xD4\xA2\x2F\xDF\x38\xBD\xFC\xD6\x6B\x27\xED\xAA\x9C\x1A\xCF\x28\x2A\xD6\xE3\x57\xDB\xEA\x47\x05\x39\x23\x66\x44\xCD\x31\x79\xB2\x9E\x3C\x4D\x44\x25\x7C\xDA\xBA\x1C\x04\x05\xD3\xD8\x27\x6D\xB3\x84\x23\x9C\x8A\x9B\x42\xC1\xD2\x31\xE5\x08\x71\x6E\x34\x56\xFE\x36\x34\x36\x69\x87\xEF\x68\x88\x44\x26\x92\xE2\x17\xB5\xC7\x94\xAA\x27\xB3\x2D\x48\x87\x55\xF7\x40\x68\xAC\x1A\x34\xB6\x90\x44\xF4\x40\x3D\x2D\xA8\x8C\xC3\xA4\xEB\x87\xBC\x1F\x5E\x6F\xC1\xD4\xBB\xCB\x7C\xF6\xE3\x6A\x4E\x08\x24\x36\x68\x08\xC3\x15\x06\xB9\xF8\x51\xB1\xEE\xA0\x30\x92\x33\x8C\xBD\xA5\x74\x72\x67\x74\x66\xC6\xFD\x99\x19\x34\xDA\x66\x8A\xAA\x14\xC1\x5B\xC5\xBC\xCC\x71\x58\x66\x71\x50\x53\x29\x20\xEA\xD0\xED\x3E\x99\x69\xCB\xFE\x02\x64\xA1\xD1\x92\x47\x61\xC9\xE3\x7E\xBD\x61\xBC\xCE\xBC\x89\xEE\x45\xE9\x41\x8D\xFF\xFA\x6F\x8D\xFD\x1C\xA2\x7A\xB3\x48\xEC\x64\xA1\x0A\x2F\x9D\x30\xF7\x16\xC1\x5C\x78\x17\x31\x44\x63\x64\x34\x90\x54\xEF\xBD\x3A\x4E\x9E\x97\x06\xA9\x3C\x19\x1D\x6C\xBD\x46\xA1\x3A\x95\x8E\x91\x2D\xA0\x47\xB6\xF8\x79\xBC\x36\xF0\xA5\x0B\x57\xD7\x05\xFF\xDD\x60\x0F\x70\x02\x2F\x85\x55\x25\x5B\x24\xF3\xA3\x05\xFB\x8E\x0C\x0A\x8E\x79\x26\xC2\xDD\x07\xB6\xFA\xF1\x10\x68\x24\xD1\x30\x82\x56\x82\x57\x9C\x04\xD5\xEB\xE4\x5A\xB8\x86\xE2\xFD\x78\x17\x75\xB9\x21\xDF\xBC\xCC\xBB\x19\x0D\xEF\x8C\x40\x6D\x10\x5A\xFB\x82\xCE\xC6\x9A\xEA\x74\xCE\xC0\xB0\x48\x83\x9F\x69\xAB\x43\xB4\x5F\xF0\x42\xF1\x05\x76\x79\xF3\xC6\xB0\xA2\xD0\xB9\xE0\x43\x5C\xC4\x4E\x78\xBF\x71\xB8\x35\x23\xC6\x9B\xEE\xA5\xEB\xDC\xF3\x01\x26\x2C\xF2\x37\xF5\x56\x78\xF6\x98\x72\x1B\x41\x2C\x5A\x69\x44\xEF\xD1\x08\x77\x1A\x15\xE1\x31\xD9\xCB\x96\xE1\x19\xF8\x5A\xB9\x5B\xFF\x60\xA9\xE7\x88\xDF\xD9\x08\xEC\xDC\xCF\x54\xEF\x37\x53\xF1\x78\x72\x85\xA3\xBD\x27\x1B\x7E\xE7\x37\x80\xCA\x4D\x9E\x61\x24\x9F\x9F\x5F\x32\xF1\xB6\xBD\xB2\x00\xEE\x9E\x4F\x53\x42\xBF\x2B\x79\xED\xEE\x9B\xA8\x59\x97\xB1\x58\x2F\x76\xD8\x66\xEF\x6F\xDE\xFC\x65\x75\x76\xA2\x0B\x7F\x53\x92\x4E\xFC\x4D\x8A\x6A\x35\xF4\x6F\x56\xF8\x9B\xD3\xF0\xFD\x94\xBF\xDF\xA9\x1E\xA4\xEF\x6F\xDA\xF0\xC3\x4D\x2B\x6F\xE0\x1F\x19\xE1\x64\xE8\x73\x93\x04\xB4\x3F\x85\x37\xCA\x53\x5B\x4D\x74\x76\x62\xFC\x6B\x50\x21\xF6\xAF\xD9\x3A\xC3\x57\x0D\x55\xFD\xE4\x55\x2E\x91\x0B\xC6\xBF\x66\xCB\xBF\x06\x55\x30\x1E\xDB\x23\xF8\xDA\xCE\x23\xF4\x9E\xF2\x37\x5F\x45\x2F\xDE\x7C\xD5\x16\xB9\x03\x6E\xBE\x6A\xCB\xDF\x7C\x55\xFF\xF0\xCE\x63\xF4\xF4\xCE\x63\xE1\xF1\x9B\x53\x7E\xFE\xE6\x54\x5E\xB8\x39\xDD\xC2\xFF\x90\x8A\x47\x61\x0D\xCE\x97\x6D\xF5\x2F\xBE\x9D\xD4\x2F\x70\x04\xE1\xCF\x1F\x37\xD8\x3D\x48\x91\x5F\xA0\x2F\x36\x11\xD8\xEA\x27\xC4\xCE\xC8\x8E\xD3\xF0\x45\x19\x15\xFE\xF3\xBF\x35\x03\xE5\xA1\x3A\xB3\x96\xF1\xFA\x41\x12\x45\xEC\x6E\x24\x26\x6F\xEC\x8D\xD3\x37\xF5\xD5\xD3\xD9\x5B\xF1\xA3\x6A\x22\xFE\xA8\x51\x58\x56\x83\xC3\x31\x21\x88\x08\x16\x9A\x62\x42\x5C\x37\x95\x9C\x09\x1D\x17\xDE\x94\x6A\x34\x3F\x42\x46\x74\x4D\xD3\xA8\xDA\x13\x06\xC0\xF9\xF5\x16\xB8\x11\xFF\xEE\x4F\x73\x59\x02\xC0\x49\xAF\xB6\xD5\x8F\x5D\x15\x7F\xF4\xBA\xAF\x5A\x8E\xD7\xC0\xB1\x9E\x9D\x70\xFC\x01\xE5\x69\xB8\xD6\x0B\xA0\x87\x6E\x19\x50\x8B\x4B\xCA\x44\x54\x7A\x34\x5C\xA7\x29\xE7\x02\x47\xD1\x64\x90\xE2\xBF\x8C\x10\x1F\xD3\xA4\x1A\x3C\x13\x40\xBF\x17\x4F\x02\xC8\xBB\x29\xC2\x68\x6A\xEC\x8A\xE0\xA9\x11\xD1\x33\xC8\xBC\x02\xFD\x82\xA7\xC4\x84\xA2\xC1\xF1\xD0\x86\xAD\x92\xC3\x94\x42\x85\x38\x05\x2B\x54\x43\xE0\x5A\xD6\xC3\x85\x63\xB8\x7C\x0A\x37\x22\x03\x78\x4B\x19\x4C\xFD\x1A\x02\x7F\xB3\xDB\x32\x66\x2F\xC6\x32\x66\xB7\xB2\x8C\x14\xCD\x45\x78\xCB\x54\x0C\x02\x6C\x63\x58\xBC\xC9\x2C\xE8\x03\x5B\x25\x0C\x9B\x38\x71\xF9\x62\x1E\x4B\x43\x4A\xBC\x83\xA8\x9B\xA0\x2C\x9F\x1D\xCF\x0F\x46\xF3\xB2\x62\xB1\xEE\xD6\x30\x86\x74\xAF\x35\x3C\xF0\x7C\x24\x9E\x8C\x85\xFD\xFC\x1A\x1A\xCE\xC8\xEB\x9C\xE0\xE4\xB0\xB2\x41\x4F\x7F\x94\xEC\x60\x9A\x9C\x20\xFC\xCD\x9D\x3E\x20\x85\x81\x66\x55\x81\x7E\xF4\x8A\xF3\x47\x44\xE5\xB4\xEC\xFC\x24\xCD\xC1\x92\xAA\x85\x8D\x81\x16\xA5\xDC\x76\xC5\x79\x1F\x2D\xD9\xFF\x01\x11\x2A\x33\x03\x19\x31\x10\x10\x54\x55\x2D\x9E\xB1\x01\x45\xC1\x06\x54\x74\x2A\x0A\x9B\xBF\x34\xCD\xD4\xC8\x7C\x83\x2D\x28\xA2\x23\x3B\x62\x5B\x50\x01\x11\x6A\x5D\xC5\x69\x78\x8A\x15\xE1\xDE\x04\x93\x5F\x67\x1C\x6A\x9E\x6F\x71\x9D\x5C\x41\x39\x3D\xD8\x69\x92\x06\x72\xB2\xC4\x58\x32\x09\x0E\x2D\x31\x3C\x55\xDD\x59\x62\x2C\x6B\x61\x72\xAF\x45\xBD\x49\x93\xDE\x64\x06\x96\x98\x3C\x58\x62\x4A\xB1\xC4\xF0\x12\xE8\xB0\x00\x3A\x68\x1E\x8F\x0A\x84\xFA\x60\x0D\x18\x06\x98\x56\x81\x7F\xF7\x9C\x67\xA4\x06\xE6\xD9\x7E\x21\x4C\xBF\x10\xB1\x2C\x84\xA9\x03\xFC\xF3\xA3\x01\x56\x78\x77\x49\x5D\x0C\xAB\xB2\x5C\xB3\x64\x9D\xE3\x73\x8B\x2A\x28\xF0\x29\xC9\xCA\xC5\xE2\xA5\x8A\x77\x5D\x2A\xFA\xCF\xB9\xB5\xBD\x16\x2D\x86\x08\x8F\xED\x18\xD7\x22\x19\x2F\x5A\x7A\x9D\xAE\x3E\xA9\xD4\xD8\xE2\x45\x4B\xE9\xC1\xC1\xA2\xA5\xB4\x68\x78\xD2\xA6\xA3\x45\x8B\x66\x16\x2D\x3A\xC0\xA2\xA5\x61\xD1\x92\xD1\xA2\xA9\xF1\xA2\xB9\x13\xE6\xD1\xE9\xA2\x35\x73\xDD\x9A\x4D\x0F\xBC\x64\x6E\x7E\xC9\x8A\xE2\xC7\x96\x2C\xAD\xC2\x36\x9D\x98\xE4\xF8\x94\x66\x02\x60\x2E\x87\x9A\x04\xDB\xAD\x2C\x84\xED\xD5\x7A\xD2\x8D\x7E\x49\xFB\xCF\x6A\x92\x9E\x4C\xF8\xA1\xA1\x1E\x85\x03\x09\x7B\xA4\xA7\x1B\x13\x3E\x42\xC2\xDB\x00\xB1\x65\xD9\x6A\x1E\xD1\x83\xFD\xBD\x0B\x22\x22\x7C\x82\xAA\xE1\x80\xF0\x31\x24\x23\x23\xBA\x7C\x1E\xDD\xC1\xCC\xEC\x1D\x0C\xA2\x40\x78\x27\x84\xF7\x54\xEF\xC3\xEF\x68\xFF\x8B\x7A\x20\xD9\x4C\xE7\x7B\xEF\x45\x99\x9E\x84\x52\x6B\x33\x4B\xA2\xC1\x88\x28\xD3\x2C\xCA\x06\x74\x0C\x25\x52\x88\x5A\x49\xBF\x1C\xC1\xBE\x90\x0C\x96\x63\x86\xEF\x77\x25\xF7\x73\xDA\xFF\xE1\x9E\xE4\x8E\xC7\xE4\xBE\xC3\xBC\xFE\x40\xA4\xD6\xBB\x90\x5A\xCF\x90\x5A\x3E\xEF\xE7\xAF\x58\x40\x6A\x3D\x20\x75\xFF\x57\xCC\x44\xDF\x83\xF4\x31\x91\x3E\x9E\x27\x7D\xDC\x91\x3E\x66\xD2\xC7\xB7\x42\xFA\x58\x42\xA5\x0E\x4E\xFA\xE7\xB5\xFF\xF2\xAD\x70\xFA\x41\x49\xFF\xE7\xCC\xE5\x0B\xC9\x3E\xC7\xF5\xE3\x65\x58\x78\x8E\x98\xDD\x44\x92\xD9\x4B\x24\xC5\x83\x85\x88\xE7\x17\x22\xEE\xF7\x40\x78\x79\x2F\x69\x23\xA3\xFE\xCB\x28\x6D\x40\x05\x59\xB9\xA7\xA4\xF9\x73\x67\x77\x50\x41\x88\xCC\x73\x41\x63\x82\x76\xB7\xCB\xC8\xF4\xE2\x91\xE9\x5B\x1E\x99\x5E\x3C\x32\xD9\x63\x83\x91\x99\x45\x9C\xA9\xBE\xBE\x9C\x59\x14\x2F\x9B\xB1\x03\x81\xA6\x00\xAC\x5D\xBF\x5F\x53\x6C\x40\x5A\xE7\xA0\x0A\xCD\x38\x00\x14\x0A\x57\xBC\x25\x22\xF3\x96\xD8\x0E\x9E\x53\x33\xC6\x83\xBF\xD7\x1B\x08\x70\x36\x21\x54\x66\x5F\x47\xB7\x78\x4A\x82\x82\x64\x46\xFE\xCC\xB1\x42\x74\xB2\xCF\x74\x15\x08\x0A\xB2\x37\xFC\xB9\x75\x01\xCE\x7F\x4A\xE1\x2D\xE3\xD3\x4A\xEE\xF8\x75\x84\xD7\xEF\xD7\x11\x4E\x46\x80\xE7\x1F\x5C\x00\x06\x2E\x5A\xDD\xDF\x03\x62\x88\x07\x5B\xDF\x52\x6F\x36\xB8\x68\x2D\xA3\x69\x44\x7C\x01\xC3\x2B\x01\xC5\x5A\x7C\x4A\xF1\x45\x9A\xEE\x62\xC9\x49\x73\x2F\x13\xF9\x6E\xBE\x4F\x47\x33\x5D\x4B\x44\x82\xA3\x40\xBD\xD1\x08\x58\x9B\x8A\x20\x1A\x8C\x80\x5A\x5A\x1F\xCB\xA0\xA4\xA0\xCB\x94\x4F\x71\xBA\x77\xB7\x3D\x12\x11\xF5\x7E\x67\x57\xD3\x10\x2F\x62\xC3\x1F\xDD\x1D\xF6\xEE\x93\x76\x95\xFF\x5A\x3F\x69\x53\x70\x14\xC8\xA8\xA9\x39\xB6\x8E\xFC\xE0\x55\x46\x12\x66\xC3\xC0\x1D\xE6\x81\x3A\xC1\xEB\xDA\xB9\xDA\x0C\x0A\xCE\x81\xC2\x8E\x82\x79\xB0\xD4\x1D\xDB\x81\xF3\xCF\x85\x38\xC3\xE2\x43\xAF\x88\xCC\xB6\xB9\x32\x42\xA3\x7E\x8F\x19\x94\xF7\x1A\xB0\xEF\x98\x05\x86\xC5\xA7\x0E\xF6\xFF\x5C\xB7\x77\xBB\x5D\x3A\xA2\x74\x9A\x59\x17\xC5\xC9\xCA\x24\x2F\x96\xA6\x87\x0E\x57\xCB\xA5\x59\x99\xB1\x60\x96\x2F\x2B\xF6\x8D\x12\x29\x5F\x76\x4B\x61\x22\xE5\x6D\x84\x43\x59\x05\x76\x1B\xB1\x39\x97\x28\xEE\xB9\x3A\xAE\x7E\x86\x6F\x10\x74\xD1\x1C\xDA\x5A\xF1\xA9\xC7\xBC\xD9\x98\x1C\x59\xCC\x36\x14\xB0\xFA\x65\xCD\x71\x9E\xCF\xEB\x31\xEF\xF8\xD4\xAB\xEA\x03\x2C\xF3\x66\x86\x89\xAF\x60\x07\xCF\x6B\xAE\xB7\x82\xFC\x91\xF8\xE7\xF5\x80\x41\x12\x48\xEE\xB0\xCF\xEB\x93\xB6\xE2\xD8\x57\xE5\x53\x4E\x1D\xE4\x1F\x95\x94\xC7\x4C\xFC\x3B\x4C\x4B\x3A\x5A\xD5\x42\x22\x0C\x63\xF8\xEB\x01\x28\x16\x36\xF6\x0E\x73\xD2\xA8\xF2\x08\x1D\x55\x83\x20\xB1\x26\xA5\xCA\x70\xF2\x64\x8C\xBB\x69\x05\x22\x5F\xB6\xC2\x64\x64\x8B\x4A\xFC\x71\x1A\x64\x3A\x33\x80\xE3\x90\x78\xE8\x4A\x76\x76\x3F\x72\xAD\x4E\x18\x8C\xEE\x78\x4B\x4F\xCA\x00\xD3\xDD\x07\x08\x86\xBB\xC6\xFD\x4C\xA5\x36\x8E\x14\xB3\x23\x9E\x1B\xEF\x61\xAA\x0A\x1F\xC6\x9B\xE2\xD8\x1C\x17\x2F\x62\x68\x7A\xD3\x15\xEA\xC3\x81\x85\xFC\xF7\xA6\xA0\xA8\x9D\x4C\xBC\x7E\x78\x19\xA4\xEB\x7C\xC0\x00\x9E\xBF\x92\x66\xC1\xC9\x31\x60\x9E\xA2\xBB\x1E\xE7\xBD\x85\x80\x62\x33\xF2\x10\x9B\x91\x0F\x62\x33\xCA\x2E\x46\x28\x84\x68\x4C\x66\x42\x34\x0A\xAA\x44\xDE\x87\x68\xC8\xE7\xA6\x80\x82\x32\x1C\x7A\x87\x4A\xD1\x29\x1F\xE5\x28\x4C\xA8\xCE\xC8\xAC\xD7\x0B\xE4\x40\x53\x72\xE8\xA6\x13\x55\xAE\x50\xFD\xCA\xE7\x88\xDF\x02\x35\x1B\x4E\x15\x7E\xB5\x36\x48\x7D\xA4\x53\x26\xB1\xD9\x69\x2B\xA0\x14\x90\x9D\xB4\xCF\x69\xFE\x97\x80\xDA\xF8\x01\x2E\xFA\xEB\xE3\xCB\xD5\x0F\x91\x09\xE6\x10\x45\x38\x2F\x68\x3C\xA1\x76\x9B\xEC\xA4\x7D\x46\x87\x9E\x12\xBF\x63\x06\x5D\xF8\x67\x66\x3B\xDC\x31\xFC\xEF\xC3\x03\x6E\x7A\x66\xAE\x53\xE2\x20\xF3\x4E\xC3\x93\x4E\xD6\xCD\x0E\x89\x81\x65\x1C\xCA\xD3\x07\x98\xE7\x73\xB3\xDD\x3E\x2D\xF3\x7C\x44\xE6\xF9\xDC\xB0\xCB\xEB\x34\xCF\x8A\x6D\x20\x28\x67\xA8\x3D\xAF\xB7\xAA\x7F\x45\xBF\x4C\x59\xFA\x9C\x99\x4C\x90\x63\xBC\xA3\xD2\x95\xF8\xF7\x12\xEA\x4A\x14\x20\x5C\x90\x75\xCD\x9D\x9D\x28\xFF\xA1\xB1\x03\x2B\xA6\x77\x19\x9A\x61\xC2\xE1\x7A\x4B\x27\x02\xEB\x53\xE6\x79\x55\x04\x19\x3B\xF7\x7E\x47\x24\xB3\x6E\x56\xD8\x61\x35\x1E\x1B\xF3\xFE\x84\x68\xF4\x6E\x53\xA7\xF8\xEF\x3B\x4D\x3D\xC1\x7F\xDF\x61\xB8\x68\x58\xE2\x77\x98\x64\x29\xAF\xC9\xF3\x4A\x24\xD4\x68\xCF\x26\x38\xBA\xF7\x23\x99\x96\x4E\x98\xF7\x69\x06\x28\xF9\x19\xCD\x92\xEB\x1D\xE6\xA4\x7D\x8C\xFF\x7C\x9F\x3E\x69\x77\x34\x2F\x34\x91\x72\x47\xB7\xDC\xA8\x48\x83\x64\xDD\x3C\x25\x23\xB9\x26\x23\xD9\x31\xF5\x52\x11\xEA\x02\xC5\x92\x8E\x43\x35\xB5\x4A\x8E\x70\xD4\xA8\x59\x50\x1D\x57\x56\x62\x24\x1E\xDC\x70\x3C\x38\x25\xC4\x47\x21\x49\x03\x05\x3B\x48\xBD\x27\x14\xD4\xAF\xA3\x37\x9E\xE5\x37\x22\xB6\x79\x18\x8E\x40\x7E\x98\xB0\x74\x78\xCB\xB8\xA1\x18\x0A\xEF\xFE\x3D\xF9\xF7\x51\xD0\x78\xD3\x90\x14\x9A\xA0\xBA\x19\xA9\x41\x4F\xFE\x1D\xAF\xC4\x24\x49\x18\x63\xDD\x77\xEF\x93\xB0\x64\xD0\x77\x98\x47\x07\x08\x86\xBF\xAF\x84\xB0\xE5\xE4\x00\xC7\xE1\x64\x7C\x1C\xFA\x9B\xC3\x3B\xD2\xAE\xA7\x62\x29\xC1\xB6\x55\x13\xAD\x9B\xB9\xE0\x41\x51\x65\xCB\xB2\xE8\x9C\x0E\x07\xA1\x73\xDC\xD1\x39\x46\x11\xBC\x0B\x9D\xE3\xBD\xE9\x3C\x50\x67\x7A\x3A\x07\x02\x07\x92\x47\xEB\xA6\x3C\x33\xC9\xF0\xDF\x94\xC4\xD9\x4D\xBD\x35\x49\x2D\xE7\x75\xCA\x50\x28\x1F\x69\x76\x45\x20\x45\x45\x72\xB0\x1C\xF2\xC5\xFB\x42\x1A\x23\x9E\xAD\xD4\x2C\xF5\x81\x34\xDD\x98\xA8\x32\x2F\x3A\x7A\x85\x45\xD8\xED\x20\x2A\x49\xA2\xBE\x67\x81\xA4\xE9\x44\xCC\x3B\xF4\x8C\x88\x79\x8F\x88\x98\x77\x68\x91\x31\xEF\xA0\x1C\xC1\x1F\xA6\x51\xE6\x94\x90\x76\x5C\x9C\x7D\x7B\xF2\x42\x3E\xE2\x05\xE3\x35\x1B\xF0\xD7\x3A\xF5\x63\x57\x7E\xC8\x0E\xDE\x49\x36\xEA\x44\x53\x8D\xDD\x83\x75\x92\x1E\xBC\x93\x74\x46\x7F\x72\x07\xEE\x24\x39\x78\x27\xC9\xA8\x13\xE7\xD3\x03\x77\x12\x1F\x60\x63\xC6\xA3\xD6\xAB\xFD\x1B\x8D\x0E\xD0\x68\x34\x6A\x14\xF6\x6F\xD4\x15\x8B\xB7\xB6\x2D\xEC\x48\x4F\x67\x73\xC6\xFF\xC6\xC9\x67\xDE\x71\x5C\xBA\xFF\x6D\x75\x6E\xE2\x20\xF6\xBF\x44\xDF\x53\xA8\x7A\xCA\x30\x86\xB4\xCD\x9A\xD4\xFF\x67\xAA\xDD\x0C\xB1\xFF\x2F\xE4\xB6\x4E\xB9\x09\xC2\xE7\x89\xE9\x75\x0D\xA9\x7F\x8E\x0B\x3C\x87\xD2\x85\x83\xED\x18\x53\xA9\x9C\xC1\x76\xE4\x2F\xF6\xDE\x8E\xC5\x9E\x42\x53\x1F\x80\x8C\xFA\x96\xEE\x10\xB4\xA1\xBB\xCA\xFD\x3F\x95\xF7\x85\xCC\xF1\xDE\x24\x31\x6A\x60\xFD\xFB\x03\x8A\xA8\xF5\xEF\xD5\x22\xB0\xAC\xFF\x97\xFD\x9F\xCF\x6B\x49\x75\xB3\xA4\xEB\x48\xD5\x9B\xAF\xE8\x16\xB5\xBE\x3F\xB1\xF4\x0B\x3E\xFB\xBD\x9A\xFE\xB5\x5F\xD2\x60\x43\x22\xD4\xBB\xB5\xFF\x8A\x91\x98\x45\x57\xFD\xFC\x35\x06\x58\xC4\xA7\x9E\xD3\x0C\x37\xF8\x11\xFC\x57\x9F\x30\x1F\x96\xCF\x4F\x6B\xB0\xE0\x4E\x98\x0F\x6A\xFF\xB4\x0A\x6F\x86\xA1\xBC\x5B\xB7\xFE\xEE\xF6\xF4\x53\xA8\x6D\x72\xC1\xFD\xBB\x65\x98\x8E\x53\xF3\xE8\xAF\xF5\xEE\x2F\xE8\xFE\x5A\xED\xFE\xAA\xBA\xBF\xD2\x30\x45\xEC\xEF\xA3\x1A\x5C\xF5\x73\x7C\x51\xC7\x39\x7E\x4A\x2C\x4A\x46\x8A\x53\x5A\x6F\x70\x90\x14\x63\x81\xA3\x67\x06\x3C\xAD\xFF\xEE\x44\xF9\xF7\xBC\x8D\x75\x70\x8A\x45\x33\xD7\xFD\x7F\x36\xE7\x27\xCA\xEF\xEC\xEC\xA4\x60\x4E\x3F\xF5\xE4\x13\xA7\xD3\xED\xEB\x60\x4E\xFF\xF1\x5B\x76\x9E\x56\xDF\xBC\x56\x47\x14\x95\xC2\xDD\x3C\x65\x18\x62\xE2\x53\x38\x6D\xCD\xE4\x00\x55\xBD\x95\xC2\x0C\xA2\x13\xE6\x9D\xDC\x91\x1C\xF3\xF8\xD2\x3B\x71\xBD\xAA\x9F\x61\xF3\x55\xF7\xF9\x67\xE9\x33\x81\xCC\xBC\x53\xD7\x11\xFE\xFB\x29\x4D\x85\x08\x3F\xC2\xDA\x8F\x1D\x6B\x3F\x96\xF3\x1D\xE9\x9F\x77\xE0\x18\x52\xAF\x2F\x91\xFE\x63\xFD\x53\xBA\x65\x5A\x43\x24\xEF\xAB\xEA\xE7\x58\x13\xA9\xBE\xC0\x14\x3A\xA6\x9E\xD6\x44\x94\xAF\x84\xD6\x9F\x1A\xDE\xFE\xBA\x95\x05\x7B\x87\x7D\x4A\x9F\xB4\x6F\xEA\xF6\xAC\x5D\x37\xCF\xEA\x26\xE5\x01\x0A\xE2\x04\xAE\x7A\xC9\x91\xA8\xA1\x32\xBA\x5D\x37\xCF\xE8\x80\x79\x31\x9F\x1C\xE8\x20\x7D\x60\x94\x1C\x18\xCD\x27\x07\xDA\x75\xF3\x51\xDD\xE4\x14\xC0\x9C\x76\xD9\x81\xE9\x30\xD9\x8F\x48\xFF\x51\xCD\xE9\x81\x39\x67\xF9\x66\x5D\x7A\x60\xB6\xFB\x1B\x5D\xB2\xAF\x4C\x87\x06\x5E\x30\x27\x3D\x43\x4C\x92\x9D\x30\xCF\xA2\x14\xE1\x0A\xFA\xB4\xD6\xAF\xA7\x1C\x01\x1C\x55\x18\x8F\xDC\xDC\x2D\xAA\x9E\xAF\xC7\xA7\xEE\x16\x6A\x32\x59\x07\x6B\x65\x2E\x80\x65\x0B\x0C\x58\x54\x2C\x06\xBF\x22\x8D\xDF\x74\xD2\xDE\xCD\x7F\xBD\xFE\xA4\x5D\xE7\xC5\x96\x35\xC4\x46\xE9\xDD\xCE\x1C\xC3\x4B\x45\xE1\x94\xCC\x13\x4D\x36\x6C\x0B\x75\xDF\xA7\x34\x0E\xF4\x83\x9A\xE0\x48\x13\xD0\x6B\xF7\x28\x4D\x70\xA3\x31\xE8\xB5\x26\x46\x71\xE1\x90\x5D\x0D\x38\x7C\xF0\x29\x8D\xBC\x9A\xCA\x1D\xD4\x92\xF1\x8C\x7A\xAF\x7E\x5E\xFA\x5C\x95\xA1\x7F\x6A\x34\x74\xB1\x2D\xA5\xC3\xEE\x5F\x77\xD2\xAE\xF6\x8C\x93\xCE\xCC\x65\x95\xB3\xC2\xBA\x8B\xF8\xCC\xC8\x1B\x1E\xBB\xB9\x4E\xE1\x5A\x38\x03\x5C\xAF\x52\x5E\x57\xD5\x07\x89\x3F\x5C\x21\xA3\xFB\x01\xCE\x90\xB4\xBD\xA0\x04\xD7\x27\x75\xFE\x43\xA7\x87\x95\xE4\xA5\x5C\x95\xE7\xC8\x2F\xED\xED\x7D\x21\x12\xDF\x9D\x19\x46\x4C\x51\x9C\x8E\xFF\x9F\xDB\xA5\x25\x15\xE1\xFF\x69\xFA\x6F\x64\xA2\x02\xDC\xAB\x75\x8C\x2A\x3B\x47\x0F\x89\x9F\x3F\x62\x03\xB0\x04\xEF\xC6\x12\x19\x4B\x07\x88\xAA\x3E\x7F\x8D\xE4\x68\x49\x46\x4F\x42\xEB\xD1\x2D\x6F\x7E\x32\xD4\xBE\x5A\xC7\xDE\x08\xDB\x33\x82\x54\x29\x28\x3A\xD4\x13\x85\xC0\x7B\x47\x78\xBF\xA8\x68\xC6\x35\x47\x1E\xDE\xBC\x69\xB7\x38\x3C\x9E\xBA\xA3\xC0\x68\x02\x8F\xB5\xED\xA0\x53\xAE\x7D\x57\xDB\x90\x36\xCF\x6D\x9E\x0F\x46\x3B\x88\xF0\x1B\x53\xFD\x41\x78\xDA\x89\x74\x48\x78\x6F\x10\xCE\xE8\xBA\x29\xF9\xDB\x92\xC2\xA8\xD2\x96\x54\x1C\x1A\xB7\x7E\x2D\x57\xFE\x5F\x34\xDD\xD9\xC9\xD6\x74\x23\x2C\x59\x3F\x92\x99\x1B\x46\xF9\x46\x62\x17\x78\x8F\xFC\xCC\xF0\x1E\x49\x59\x0B\xE0\xD6\x4D\x55\xFD\xA1\x8C\x8F\x20\xD0\x64\x30\x96\x12\x46\x3D\xB7\x53\xFD\x91\x3C\x51\x84\x29\x10\xC4\x02\x9E\xE4\xA1\x96\x30\xD8\xE2\x07\x5E\x20\x37\x4C\x47\xDC\x70\x00\x76\x48\x3A\x76\x48\xD8\x55\x6F\x2E\x41\x22\xC5\xA6\x17\xF3\xC5\xCF\x2E\xE6\x8B\xB4\xE3\x86\x4B\x2D\x81\x3D\x75\x5C\x22\x0C\xA2\x6A\xEB\x19\x5F\x6E\x77\x06\x49\xF7\x62\x10\x86\x47\x0A\xDD\x6C\xB7\x02\xAE\x00\xE9\xAE\x3C\x92\xBE\xA8\x3C\xF2\xB3\xDF\x20\x3C\xF2\x81\xA9\xA3\xEC\xCB\xDE\x5B\xF2\x3E\xBD\xD9\x44\x7D\x6D\x2F\xC9\xB9\x8E\xE9\xFE\xC9\x5A\x65\xDA\x24\x72\x7D\xC4\x4F\x2B\x6D\x93\x52\x9E\x75\x16\xF2\xAC\x53\xC8\x46\x79\xD6\x54\x97\x33\x93\x3C\x6B\x2D\x79\xD6\xF2\x6E\x9D\x72\xF1\x80\x44\x50\xF8\x56\xB9\x8B\xB2\xC9\x20\xD9\x6C\x92\x51\x9E\x75\xB2\x86\x27\x1B\xE7\x59\x67\xA3\x3C\x6B\x49\x77\x78\xA2\x45\x22\x63\x0F\x84\x5C\xD3\x95\x9A\x63\x48\x90\x5D\x12\x75\xE9\x28\x0D\x17\xC9\xFE\x6B\xA4\x34\x19\x55\xDF\xA3\xC9\xDD\x46\x88\x39\x5C\x4D\x94\x32\xCF\x9B\x2C\x14\x41\x68\xD2\x05\x03\xD5\x7E\x85\x33\x97\x37\x08\x67\xE3\x7F\x0F\x73\x5F\xA1\xE1\xB5\x35\x61\xE2\x95\x8C\x34\x98\xD6\x69\x5F\x0C\x96\x4C\xB5\x48\xCE\x90\x3F\x45\x30\x89\x81\x9C\x8C\x2B\xB4\xCD\xA0\xB2\x86\x21\x5C\xA2\x3B\xEC\x7B\xF4\x49\x43\x17\xEC\x13\xE6\x4E\x88\xF1\xF0\x05\xFE\x67\x05\x62\x5F\xB5\xDD\xFC\x9A\x50\xBF\x4E\xF3\x3A\xE8\x16\xAF\xCC\xAB\xF6\x94\x78\x30\xB8\x54\x82\x2C\x0D\xAD\x12\x0D\x25\x0D\x43\x89\x21\xED\x86\xC2\x51\xF6\x09\x6F\xB7\x54\x44\x47\xBC\x6E\x1C\x15\x81\xE6\x1E\xA0\xC9\x28\xA0\x1D\x28\xD7\x96\x6E\x2E\x74\x2F\x3A\x0A\x51\x77\x3B\xF7\x9C\x58\x40\x00\x7F\xF1\x1D\x26\xAD\x39\xC9\x65\xDD\xAC\x50\xEC\x79\xDA\x32\xF6\x5F\x04\x05\x5D\xDC\xE9\x5F\xC5\x33\x22\xB3\x42\x04\xF1\xF2\xD5\x80\x49\x86\x4B\xD8\x59\xB8\x85\x30\xBC\x45\x50\xBF\x7B\x3B\x5E\xD4\x77\xC6\xFD\x66\x73\xFD\xA6\x64\x85\x12\x3B\x6C\xD6\xF5\xBD\x23\x7D\x9F\x1A\xF6\x4D\xD6\xD8\xC8\xBF\x5D\xB7\x60\x96\xDF\xCA\xA3\x88\xFD\xD1\x7E\x14\xDD\xF2\xAC\xA0\x94\x3A\xA6\xDE\xAE\x25\x56\xDD\xAF\xB4\x4D\x3C\xE4\xC8\xB8\xE7\x48\x53\xB0\x83\x43\x75\x4B\xEA\xEF\x94\xA4\x10\x48\x51\x62\x65\x4B\xC6\x28\x5D\x40\x49\x20\x53\xC2\xC4\x99\x2F\x2F\x34\x59\xF5\x1D\x24\x41\x27\x90\xB5\xB5\x81\x49\x9D\xD9\x53\x90\xF1\xE2\x66\x8C\x90\x6B\x28\xCB\xBF\x84\x09\x97\x22\x90\x16\xB9\xE8\x72\xDA\x92\xF3\x26\xE3\xB8\x57\xB2\xED\x98\x82\xC1\x91\x4B\xC8\x1E\x14\x5F\x75\xC9\x78\x36\x6E\x42\x11\x59\xA5\x70\x47\x9D\xA1\x00\xC2\xCF\xDB\x6D\xCF\x32\xFD\xEF\x4D\xB6\x41\x50\x3F\x62\xDC\x8C\xD7\xCD\x9D\x90\x12\x16\x59\x2A\x1B\x89\x88\x9D\xDE\x61\x54\xB7\x16\x15\x95\x69\x3F\x3E\xB3\x14\x8F\xF1\x3F\xC7\xBB\x95\xC0\x47\x20\x5D\xFE\x0E\x59\x00\x6C\x19\xC7\x4C\x8D\x77\x2B\xC0\xE8\x44\xDD\xCA\x24\xFC\x08\x01\xBF\x89\xCC\x20\xF4\xD6\xCE\x32\x3D\x64\x90\x88\x58\xA4\x29\x4E\xDA\x47\x20\xEA\x4C\xE6\xFE\x28\xDB\xE6\xBA\x27\x56\x9A\xA5\x93\xF6\x61\x1A\x00\x67\xB2\x29\xDC\xE4\x64\x60\xC5\xAB\x4E\x24\xAE\x1B\x6E\x52\xDC\x50\xA6\x9B\x16\x40\x04\x4B\x27\x6D\x35\x62\x30\x68\xE9\x49\x28\x21\x81\xC9\xF2\x53\x7B\x32\x3A\x2D\xA5\x1C\x82\xFE\xCE\x4E\x6E\x43\xCC\xB2\xB7\x08\xDB\x7E\x9B\xB7\x7D\x10\xB5\x24\x8B\xD2\x4D\x5C\xB3\x81\x10\x23\x28\x17\x12\x04\x41\xE6\x26\x07\x90\xB9\xA1\x72\xC1\x2D\x09\x12\x3C\x24\x52\x46\xDB\x21\x01\x52\x5E\x68\xD9\xFE\x81\xDC\x4D\x75\x04\xD8\xFB\x65\x98\x45\x03\x5C\x06\xEF\xAF\x98\x81\x90\x4E\x70\xD6\xFC\xAA\xE0\x0E\x87\xC5\xE9\x97\x06\x25\xB9\xCC\x98\x0F\xB1\x74\x13\x65\xEC\x60\xC6\x31\x55\x87\x59\x95\x43\x2D\xDE\x45\x72\xC7\x41\x64\x93\x49\x13\x67\x95\xF4\xB3\x4A\x86\xE2\x51\xC4\x74\x76\x60\x31\x3D\x27\x9D\x4F\xF0\x68\x56\x05\x9E\x31\xF2\xEF\xD3\xC1\x3C\x72\xDD\xE8\x94\x32\x81\x3B\x7D\xAE\x07\x4F\xE8\xFD\xAD\x6E\x91\xB5\x9A\x6E\xA4\x06\x1C\xE3\xF5\x1F\x3C\xB7\xBF\x89\x07\xB8\x0B\xEC\x53\x4B\xFA\x18\x6B\x77\x89\x14\xF6\xA4\xCF\x4B\x27\x40\x6C\x37\x93\x97\xEE\x16\x25\xEF\x65\x9C\x97\x9E\x75\x30\x12\x19\x01\xB9\x41\x7A\x91\x72\xB4\xB8\xE4\xC3\x20\x2F\x5D\x3A\x2F\x7A\x50\xF7\xB8\xE5\x7A\x17\x73\x21\x12\xC5\x6F\x58\xE3\x7A\xED\x06\x66\xF5\xE0\x61\xB0\xCB\xED\x78\x3E\xA6\x1C\xE9\x5D\x06\xD4\xCB\x7B\x25\x76\x49\x28\x72\x17\xFE\xF8\x30\xA3\xFD\x30\xC4\x0D\x21\x4F\x44\x01\xFD\xC4\xFA\x67\x55\x1B\xB0\x82\x04\xFB\xF4\x76\xC6\x15\xBA\x97\xCA\x23\x57\xBF\x70\x4D\x92\x65\x6E\xAF\x3E\xDC\xFF\xF9\xCB\x92\x18\x87\x1F\x8E\x29\x43\xE6\xC1\xD5\x60\xB2\x3F\xAD\x50\x2C\x68\x52\xDD\xC0\xF8\x69\x5B\x7D\xF5\x9A\xE8\x8F\xC7\x54\x45\x45\x04\x98\x09\xCE\x30\xFE\xE0\x94\x0C\xF5\xAA\x47\x62\x60\x68\x1F\xF2\x51\x35\x0E\xB9\xCA\x20\x57\x85\xFC\x31\xFC\x89\x4A\x0F\xF0\x4F\x28\x98\xF4\x45\xD4\x5F\x35\x44\x77\x2A\x75\xC2\x1C\xA5\xC8\x63\x87\x3B\x99\x3E\x43\xC1\xCD\x62\x2B\x1D\x48\x10\x05\xE4\xD9\x73\x4D\x7C\x5A\x7D\xB3\x74\x59\xDB\xD3\x3B\x3B\x4F\x4B\x68\x71\xDC\x91\xB8\x6A\xB4\xB7\x97\x6E\x3C\x49\x58\x98\x37\x4E\x13\xBD\xCF\xAD\x3D\x79\xFA\xA9\x27\x9F\xC0\xB7\x71\x4F\x7C\xF3\x1A\x63\xBA\xAB\x13\xE6\x76\xDC\xCD\x27\xED\x23\xA8\xB9\x42\xAF\xB9\x2E\xE1\xFA\x28\xD0\xD5\x07\xAE\x72\x49\x2E\x5C\xAE\xEE\xA3\x53\x4A\x15\x47\xF7\x50\xFB\xF8\x47\x37\xFC\xD1\xCD\xFC\x68\x86\x3F\xF6\xEE\xF6\xE2\xB3\xDA\x9A\x6D\xFD\xA4\xDE\xF6\x26\x84\x3A\x70\xBA\xB5\x26\xF8\xA1\xAD\x2E\x26\x1F\x74\x75\x8D\x63\xAA\x18\xF1\x8F\xC1\x72\xA4\x8E\x54\xF5\xE1\xAB\x84\x49\xC6\x05\xC5\x1F\x22\xD5\xC8\x9E\xAF\xBB\xE0\xFE\xF2\xA7\x41\x7D\x93\xA9\xDE\xF5\xB6\xC6\x49\x2E\xB3\x1B\xE4\x32\xBB\x1F\xE9\xEC\x77\x45\x07\x97\xA1\xBF\x85\xF3\xF3\x23\xAF\x3D\x90\x27\xED\x7F\xDA\xBC\x44\x57\xA6\x35\x5E\xEB\x48\xC6\x43\x69\xA0\xA1\x02\x89\xF4\xDE\x0D\xA9\x28\x3E\xE3\xA8\x5E\x50\x88\x91\xA2\x9C\x8E\x1E\xF4\x4D\x77\xF7\x34\x86\x7C\xE3\x52\x5C\x01\xAA\x96\x2C\x8F\x1D\x14\x10\x05\x6E\xCB\x6D\xCD\x30\xCA\x1D\x41\xD5\x0E\xAF\x62\x82\x7C\x26\x05\x89\x29\x87\xF8\x0C\x95\xF9\xA0\x3A\xC6\x84\x5A\x28\x41\x44\xDF\xD2\x18\x99\x20\x13\x5A\x26\x45\xAF\xE2\x3F\x8F\x09\x86\x84\xA2\xAB\x27\x49\x04\x06\xA5\x6C\x2C\xB6\x68\xF0\x68\x34\xF6\x54\x80\xE3\x89\xF9\xC5\x18\x5F\x8C\xBB\x6A\x0C\x1B\x14\xC6\xC8\xE8\x18\x1D\x12\x2A\xA1\x19\x34\x09\x9B\xEB\xE9\xA0\x22\xF0\x16\x5B\xBB\x2E\x98\xC1\x9E\x02\xC3\xC8\x8C\x84\xE7\x66\xEF\xB0\x29\xA5\xC3\xFF\x6D\xBA\x59\xBA\xA0\xAD\x53\x6B\x49\x4B\xAA\x41\x90\x0F\xF2\x05\x8D\x57\xF0\xF6\xED\x04\xEF\x96\x45\x47\x5F\xAF\x16\xD2\x97\x32\x48\x87\xE5\x5E\x90\x2E\x78\x2F\x0E\xF6\x44\x07\xC9\x16\xEE\xEE\xC5\xB3\xE2\x0A\xE5\x3C\xAB\x33\x21\xE5\x53\xF0\x5A\x77\x9F\x84\x16\x92\xCC\x4E\x22\xEE\x26\xC1\xA8\x39\x1D\x89\xB8\xCA\xAE\x4C\xAB\x28\x8A\x0F\x3A\x3D\xCC\x3E\xD7\xC3\x02\xFD\xD8\x54\xD5\x18\xFF\x49\xC5\x10\xA8\xC6\xFF\x18\xDE\x20\x2A\xDE\x3C\xA6\x63\x37\xEF\xF8\xC8\xE7\x92\x6F\x8A\x2E\x63\xD5\xAF\x88\x83\xC3\x0C\x10\xC1\x08\xA7\x93\x60\xB2\x18\xD9\x8A\xB1\xE9\x37\x1B\xE3\xCD\x1B\x1F\x62\xAC\x88\xB3\xC4\xD5\xCF\xBF\x8A\xB9\x7A\xD0\xDE\xAF\x4A\x86\x0B\x5D\x83\x09\x73\xEC\x44\x70\x8E\xAE\x20\xDF\xBE\xD1\x6B\x30\x6B\x8D\x09\x9B\xC3\x90\x8C\x36\x97\x1A\x1B\x50\x0F\xB0\x31\x0D\x86\xEA\xBF\xB7\x28\x5F\x2B\xCE\x35\x6D\xB1\x05\xC2\x24\x29\x47\x5D\x3B\xEA\x98\x6E\x9B\xD5\x47\xAE\x76\x47\x42\x87\x9E\x6D\x05\xBB\x8B\x45\xD2\x13\x6D\x63\xAA\xFF\x43\x42\x90\xF8\xF7\x00\xE1\x95\x72\x12\x8F\xBE\xC3\x94\x27\x19\xDF\x53\xD5\x4E\x8A\xE1\xC9\x8A\x95\x54\xC1\x19\x55\x1B\x1A\x05\x43\x72\x93\xF9\x71\x85\x21\x0B\x2A\x26\x6F\xC9\x79\x9C\xEC\x49\xA2\x88\xC4\x59\x28\xB0\xAA\xE0\x3A\xEE\x82\xD9\xCC\x0E\xF8\x05\xF4\xFC\x3F\xAF\xCA\x79\x77\xB0\x95\xC4\x56\x82\xC4\xEF\x8D\x9C\x7F\x68\xA8\x7E\xF2\x88\x30\x25\x4D\x7C\x06\xBA\x3D\x7A\x2D\x65\x7A\x39\xAA\xCB\xFD\x46\xAF\x5B\x7F\xE5\x5B\x99\xE1\xE9\xB0\x34\xE0\x36\x19\x36\xCA\xE2\x33\x52\xAD\x08\xB8\xFA\x19\xE5\xAE\x41\xD4\x36\x01\x2C\x1C\xE2\xB6\xC3\xCC\x89\x36\x1B\xCB\x8B\x6F\xD7\x1A\xDB\x4B\x46\x4B\xAD\x77\xB6\xCD\xB8\xAD\x53\x48\xF0\xDB\xCB\xD2\x2D\xE3\x4F\x51\xC7\x96\x12\xE0\x0D\x2B\x3B\x16\xEF\x6A\x04\x2A\x67\x2E\xD5\x11\xA9\x33\x2C\x1B\x33\x02\x62\x65\xC8\x56\x51\x82\xDA\xDA\x81\x95\x32\xC9\x94\xDE\xF7\x04\xC7\x4B\x48\xF4\x95\x14\xB6\x29\x7B\x9C\xF3\x8C\xF5\x22\xBE\x45\x0F\x74\x95\x8E\x4B\x08\xA8\xA2\xCF\x41\x4F\x69\x41\xD2\x97\x08\xFE\x12\xC1\xFF\x6B\x21\xF8\x9F\x05\x82\xCF\x14\x10\xB0\x64\x05\xA1\x63\x33\x2C\x46\xC4\x8B\x11\x3F\x24\xB5\x04\x3B\xCA\x9B\x40\x79\xC7\x59\xF6\x9B\x8D\x54\xBC\xB3\x9B\x54\xC6\xD3\x0E\xC8\x9F\xF2\x2C\xA4\xD2\x41\x44\x94\xE7\x0B\x2F\xA3\x4B\xDE\x02\xE5\x6D\xDB\x13\xDF\x09\x64\x88\xD9\x44\x7A\x81\x63\xF4\x34\x07\x11\x81\x9E\x08\xF1\x6D\x47\xFC\x48\x4A\xB6\xC4\x10\xC2\x5C\x43\x0C\x16\x59\xDE\x1C\x57\xD7\x7F\xA2\xA5\x53\x9B\x89\x6F\x06\xC4\xCF\x46\x25\x12\xB9\x9E\xC7\x00\xA9\xB7\x63\x33\xE7\x9F\x68\xC7\xF4\x77\x64\x35\x70\x2F\xD1\xFF\x1B\x85\xFE\xDF\xE5\x74\xBC\xCD\x17\xFF\x92\xFF\x39\x0E\xAA\x03\x16\x56\x5D\xCC\x82\xF2\x1F\x56\xDD\x9F\x1F\xED\xFF\x7C\xB6\xFF\xF3\xD3\xFD\x9F\xEA\x1E\x95\x8E\x8A\xE8\xE1\x97\x4F\xAB\xD6\x53\x14\x86\xA0\x40\x52\xB6\x78\xE3\xC0\x79\xAA\x01\x78\x76\xAD\xA1\x62\xCD\x0E\xAF\x87\x5C\x83\x7E\xDD\x28\x54\x88\xEE\x0E\x7F\x3C\x4D\x21\x28\x2D\x63\xE6\xA1\x56\x7E\xE3\xC9\xD3\xEF\xDE\x7E\xA2\xB1\x27\x2D\xA0\xA6\x72\x7A\xFB\x9B\xF9\xEE\x4E\xDD\x73\x4D\x87\x93\x76\x9D\x41\x33\xD6\x51\x71\x23\x72\xDC\xDE\x7A\x73\xC9\x57\xED\x8D\xD3\x2B\x57\xFE\xEE\xE8\x05\x7F\xBB\x2C\xB3\x0E\x5A\x77\x14\xF8\x80\x6F\x16\x4D\x2A\xF7\x9F\x8C\xD5\x25\xD4\x9A\x48\x06\x0D\xBE\xC7\xD6\x80\x95\xAA\xA3\xAC\x4D\xAD\xF2\xA7\x15\x7E\xA7\xE2\x77\x4A\xA6\x4C\x39\xD3\x27\xEC\xD7\x43\x7A\xB0\x1E\xD2\xAE\x87\x0A\xE2\x7E\x12\x89\xB4\x22\x75\x8D\x56\xF9\x91\x95\x61\x3D\xC3\xDF\x71\x0C\x7B\xC8\x68\xB0\x12\x8D\xC8\x39\xB7\x54\x28\xE2\x3D\xBF\xD3\x03\x7F\x30\x1A\x70\x11\x0C\x7F\x25\x32\xAC\x57\x7C\xDB\xD0\x6C\x46\xA5\xE2\x0E\x78\x2E\x3C\x30\x51\xFE\xC7\xC6\xEF\x96\x0C\x1A\x22\xCE\xC1\xF1\x0D\x87\xD0\xFB\x2C\xFF\xEA\xE6\x7F\x1D\x20\x14\x93\x59\xE1\x2F\xC1\x38\x05\x45\x7A\x08\xA4\x4C\x7C\x99\x86\xF8\x8C\x5A\x83\x15\x58\x51\xE5\xEF\xED\x63\x41\x09\x0E\x7F\xE6\xB5\xD9\x77\xBA\xEA\x23\x5D\x11\x16\x2B\x29\xC6\x7C\xF9\x3E\x4E\xF5\x81\x05\xE6\xB1\xAD\x19\xC8\xAF\x19\xBC\x7A\x6F\x71\xE0\x9E\xD6\xCD\xF1\x80\xE5\x7C\x7C\x11\xD2\xF3\x4A\xF8\x75\x65\xFE\xD7\xE2\x3F\xA5\x7A\x79\xE0\x9B\x76\xC3\x2B\x20\xAF\x0E\xDB\x61\x9B\x08\x77\x79\xC7\xB8\xBD\x8F\x98\x4C\x8F\xB4\xD0\x29\x57\xF5\xF2\xAA\xCE\x25\x7B\xAA\xA4\x89\xED\x98\xAD\x33\x82\x51\x4F\xE1\xE0\x25\x3E\x21\x4F\x52\x68\x38\xCE\x0E\x18\x46\x3A\x35\xA7\x60\x32\x28\x04\x7B\x66\xE2\x20\x82\xF4\x01\x42\x5F\x21\x99\x9E\xDE\x37\x51\x90\x53\x3D\x05\x02\x7A\x83\xA4\x6D\x52\x36\x88\x73\x35\xCE\x94\x0A\xE2\x78\x5D\x2F\xE1\x63\x6D\x00\x29\xA0\x73\x6C\xBA\x6E\x1E\x6E\x2A\x8E\xA1\x69\x96\xF1\x9B\x43\x3E\xD9\x22\xB7\xF9\xF0\xD4\x3B\xBC\xE4\x94\xD6\x56\x17\x50\x4E\x8C\x37\xF5\x61\xA8\xE0\xD0\xB8\x4C\x49\xB3\x12\x2A\x99\xA4\xC6\x18\x6D\x0A\x58\xF2\xDB\x8F\xC3\x61\xAF\x37\x2E\xD2\xF8\x35\xBE\x57\x26\x85\xBC\xBE\xB2\xA0\xCA\xC9\xD9\x09\xC5\x55\x2E\x77\x85\x2F\xCE\x30\xE2\x1C\xD9\x4C\xE8\x72\xC8\x13\x00\x1C\x69\x7D\x88\x29\x40\x03\x7E\xA2\x5D\x32\x0A\x3B\x5D\x5E\x37\x0E\xB2\x8B\x7C\xC0\x4D\x60\x7A\x4C\xE9\x33\x17\xEB\x09\xD7\x55\x71\x6D\x53\x6E\x30\x48\x63\x67\x97\xA0\x55\x18\xAD\x40\xE0\xAD\x79\xD2\x1B\x26\xBD\xB9\x45\xD2\xCF\x93\xFD\xD0\xBA\x79\x38\x10\x7D\xBF\xC9\x72\x67\xCB\x42\xA3\xDD\x26\x7A\xA8\x9B\xA8\xEC\x1F\x02\xF7\x2E\x65\xBB\xE6\x10\xA3\xD8\xF8\xEE\x4F\x0F\xD0\x9C\x74\x7D\x18\x7F\x61\x8F\x59\xCE\x08\xE3\x39\x24\x64\x7A\x91\x19\xAC\xD5\x79\x01\x39\x58\xE6\x59\xC8\xD6\x9A\xC9\xC5\x33\x7D\x5A\xA8\x6B\x34\x07\x58\xC8\xA8\x9B\x14\x92\xD7\x4E\x34\x24\x75\x64\x4F\x21\x5F\x07\x32\x91\x15\x8C\x81\x57\x22\xDC\xF0\xA1\xEA\xC9\x36\x57\xE3\x2E\xD9\x90\xE1\x70\x04\x29\xC7\xC4\x44\x94\xFC\x96\xB6\x52\xCF\x4F\x73\x79\xBF\x72\xAD\xE1\xD6\xB8\xA6\x3B\x61\x37\x7B\xCB\x23\x08\x94\x73\xFC\x4A\x57\xF0\x57\xBE\xF7\x57\x06\xEC\x63\x82\x9C\xA3\xC9\xD6\x29\x07\x3B\xB0\xF5\x68\xDD\x3C\x5C\x97\x34\x44\xF6\x93\x8D\x30\xA9\xA2\xC1\x6C\x21\x5A\x37\x6E\xB0\x4C\x81\xF0\x29\x15\x7B\xEC\x22\x7B\x72\x98\x40\xE6\xAB\x4B\x17\xAB\x5F\x23\xE9\x70\xB8\x08\x41\x3D\x14\x40\x29\x56\xE3\xC3\xC5\xEF\x1A\x67\xB7\xF5\x95\xB1\x47\xA0\x4F\x0E\xB5\x78\x50\x92\x99\xAB\x64\x80\xA9\x54\x70\x93\xC8\x5D\x46\x62\xF5\x78\x4B\xC8\x84\xAB\x6D\xF5\xEB\x57\x87\xF0\x3E\x69\x2D\x31\xB1\xEA\x0E\x0B\x4D\xC2\x76\xA0\x54\x26\xBB\xD2\x64\xA8\xAF\xE0\x8C\x9A\x02\x25\xDA\x5D\xED\x7B\x9B\x92\xD4\x8E\xD3\xDB\x52\x74\x41\x0A\xF2\x41\xF9\xE4\x49\x0B\x84\xA9\x56\xB6\xA2\x14\xC4\x7C\xA8\xE4\x67\xA8\x64\x88\xA2\x9F\xBC\x82\x22\xC4\xF8\x65\xEC\xF2\xC9\xC9\xDB\x1B\x43\x28\xEF\x40\xE5\x36\xB9\x04\x56\x1D\x43\x86\xCC\x97\x11\xC5\x62\x88\x65\xBD\xD8\xA2\x85\x4F\xAC\x73\x89\xC2\xA0\x25\x53\xDD\x2C\x15\x6C\x32\xA4\xC3\x51\x9A\x64\x14\xA0\xA5\x5C\x5B\x3D\x73\x55\xE0\x6C\x1C\x55\x1A\xEE\xAD\xF3\xAA\xF8\x27\x91\x94\x9C\xA7\xA3\x78\xA2\xFC\xC7\x3E\x33\xF0\xF7\x18\xBC\x83\x7D\x42\xC9\x2D\x95\x2E\xB4\xBC\x41\xD8\xE3\x6C\x4F\x81\xF5\xFF\xE4\xBB\x6F\x9C\xB9\xE0\x27\x97\x71\x57\xEC\x3C\xF7\x2F\xE3\x0B\x17\xE9\x57\xB6\x18\x3B\x86\x70\xDA\x6E\x1B\x17\x8E\xCA\x0E\x04\x7C\xD1\xAB\x21\x52\xA9\xB3\x20\xC7\x64\x1E\x17\x8C\xCD\xB8\x4E\xF0\x0B\x36\x06\x47\x84\xB3\xA9\x3A\x9C\xCD\x70\x88\x32\x60\xBD\x0D\xB8\x57\xE6\x3C\x07\xC4\x04\x63\xB2\x81\x98\x9D\x00\x86\x1B\x32\xD8\x10\x5D\x3D\x93\xB3\x5C\xC1\xBA\xAB\x56\x42\x38\x99\xE9\x0C\x4D\x38\x4C\xC5\xFF\xE6\xF8\xAB\x94\x8B\x94\xD9\x29\xC1\x92\xD3\x0E\x4A\x37\x38\x44\x1C\x57\x07\x49\x27\x56\x01\xC2\x73\xA2\x07\x0D\x9B\xFA\xD2\x40\x4A\xB3\x2E\x56\xF0\x0D\x42\xBF\x72\x01\xFD\x8A\x3C\x5A\xDB\x6D\xA3\x70\xA3\xE3\x8B\x36\x18\x9E\x0D\xF9\x1A\x39\x13\x71\x76\x86\x38\xB1\x2D\x9E\x93\x9D\x9D\x93\x3B\xD0\x9C\x5C\x3F\x27\x37\x9C\x93\xDB\x6F\x4E\x8C\x16\x4B\xE8\xFE\xC4\xAD\x46\xE6\x24\x93\x91\xA9\x2D\x9E\x53\x51\x78\x55\xFC\xC3\xD4\x64\xDB\xE6\xC9\x01\x98\xF7\x98\x2D\x5D\x60\xCB\x88\x6B\x60\xBA\x46\x2A\xF7\x74\xFD\x46\x3D\x6F\x45\x1D\x5B\x3A\x62\xCB\x28\x8C\x63\x90\x59\xB3\xE8\xF1\x31\x02\xB0\xEB\x11\x80\xDD\xB7\x60\x77\xF7\x93\xE8\x73\xC4\x9C\x74\x25\x4D\xE8\xA2\xE9\x18\x04\xD6\x3D\x26\xB5\x7F\x14\x04\x5F\x3A\x33\x7D\x14\xFE\x22\x7C\xD4\xC0\x9C\x24\x3C\x9B\x38\xC0\x01\xC7\xDC\x4C\x8C\xCD\x50\x69\xAF\x74\x63\x62\x8B\xAE\x5C\x2A\x25\x00\xA0\x84\x98\x5D\xC4\x78\x66\x11\xA9\x8C\x4E\x86\xEC\x60\xB9\xC8\x32\xF9\xED\x69\x11\x33\xEC\x90\x6B\x51\xE1\xC7\x02\xE9\xE7\x38\x46\x28\xE6\x0A\xC9\x14\x90\x42\xEE\x02\x59\x3E\xAE\xEA\x49\xB5\x07\xEB\x18\x0A\xA4\x5E\x71\x66\x12\xB1\xC7\x9B\x8B\x44\x30\x4B\x22\x51\xE4\xD0\x18\xCC\x0B\x62\x28\xB7\x70\x26\xE6\x05\xCD\xC4\x1C\x64\x26\xF6\xD6\x67\x22\x7E\x9C\x30\x91\xA2\xF0\xA5\x08\xF5\x88\xA1\x90\x23\x1F\xC2\xCD\xE8\x8A\x2E\x38\xC8\x93\x1E\x07\x79\xC9\x74\x78\xE8\x13\x70\x3F\xFD\x4E\x58\x0A\x38\xC8\xCE\xEB\x4B\xE0\xBC\x3D\x4F\x87\x54\x87\x83\xCC\x66\x88\x25\x98\xBC\xEB\x6D\xCD\x44\x7C\x87\x93\x81\xEF\x70\xF2\x23\x1D\x8A\x04\xA7\x82\x38\x3C\x6D\xCE\xAF\x55\x7F\x78\x75\x80\x83\x2C\x4D\x72\x71\x7A\xC2\x41\x4E\x03\x0E\x32\xDF\x21\x85\x3D\x99\x27\xCD\x00\x07\x99\x38\x93\xBC\xFF\x52\xE9\xA6\xC7\x41\xA6\xAB\x6C\xD6\xE1\x20\x47\x12\xA2\x62\x88\xCC\x22\xA4\xB9\xE3\x26\xDB\xA2\xB5\xCD\xB6\x82\xF7\x78\xC4\xBB\xBA\x83\x18\xE7\x9F\x23\x5A\x04\xF2\x9F\xC5\x14\x13\x80\x9A\x57\x3E\x87\x83\x9C\x33\x0E\x32\x3D\xF0\x01\xDB\xC1\x78\xAA\xC6\xCE\xDF\x16\xD9\xF5\xD6\xF9\x92\xCA\x00\x6D\x4C\xE7\xB4\xDC\x16\x29\x97\xC8\x42\x34\x7B\x5B\x24\xEF\x8A\xDC\x1D\x68\xE5\xE4\x05\x30\xF7\x4F\x74\xE1\x3F\x3B\xF3\xA8\xD4\x18\x92\x2B\xEB\x07\x3E\x33\xFE\x55\x3A\x26\x3F\xAA\x78\xEC\x2B\x8E\xDD\xB0\x0C\xEA\xF5\xA1\xCF\xCC\xF5\x2C\xD1\x0D\x4E\xB4\xEA\x0A\xCC\x6B\x45\xBF\x06\xD3\x09\x0E\x9A\xA2\xC0\xE1\x75\xF1\x5A\x67\x65\xCE\xB8\x9A\x28\x7D\xF1\x4A\xF1\x95\x7E\xBC\x64\x1F\xE6\x2A\x68\xA6\xFA\x0F\x83\xDB\xF7\xAD\xD3\x10\xA7\xB1\x0F\x01\xC1\x8D\xC7\x8C\xFF\x14\x1F\x65\x6B\x15\xC5\x32\xB0\x47\xD2\xFA\xED\xC7\xEB\xC8\xEB\xDA\x48\x29\x2F\xB5\x6E\x56\xB1\xC9\xF7\x7E\x7A\x28\xDE\x05\x04\x31\xC4\xAB\x59\xAE\xC1\x8A\x03\x20\x63\xD2\x92\x51\x78\xF7\xD2\xC7\x14\x50\x89\x44\xC5\xF6\x15\xBD\x6E\x8E\x36\x5C\x15\xF4\x28\x7E\x0B\xB6\xFA\xBF\x28\x0A\x9F\x91\x3D\xEC\xDC\xA2\x0D\x0C\x0C\x09\x4D\x37\xD4\xA9\xA8\x38\xD0\x33\x05\x3B\xBB\x6A\x74\x69\xA5\xA2\xCF\x49\x40\x50\x04\xFB\x5A\xFE\xCB\x91\xED\x91\x23\x4D\x89\xEF\x1F\x62\x3C\xF0\xB0\x62\xF6\xEC\x84\x14\x7B\x1E\xEE\x60\x7C\x66\x5C\x3F\x13\x77\xE4\xC3\x02\x7A\x97\x76\x55\x75\xB6\xDB\xA5\xD8\x28\x6D\x95\x8E\x0B\xAA\xDD\xD2\x24\xDE\x5E\xF6\xFA\x71\xB0\x14\x27\x7D\x66\xE2\x38\x63\x2E\xE1\xCA\x8A\xB2\x16\x52\xE7\x9A\xCA\x51\xB2\xAA\x91\x14\x10\xCA\xF4\x18\x89\x5F\x3A\x37\x31\x60\x51\x44\x96\x1B\xE1\xAF\x94\x2A\x85\x73\x1B\x7C\x6B\x93\x92\x96\x26\xF4\x81\x3A\xEE\xAF\x0C\x97\x2D\xA6\x6A\x80\xFB\x8F\x00\x3B\xA7\xF4\xEC\x5F\xFC\x9D\x99\x6A\x2E\x10\x6D\x21\x89\x52\x81\x74\xB6\x74\xB4\x04\x4C\xC5\x5F\x8B\x75\xB2\xDD\x03\xD2\x1B\x72\x87\x1E\x15\x59\x26\x8B\x51\x32\x3E\xB9\x54\xEE\x9C\x70\x62\xD9\xD9\xCE\xCF\x4E\x56\x45\x72\xFD\xD3\x55\x35\x3A\xA6\xD4\xC4\x81\x41\x3E\x42\x8D\x8B\x16\x86\x5A\x3E\x4A\x79\x37\xC8\x47\xC6\x2B\x48\x03\x1F\xE9\x62\x66\x91\xF8\xCE\x9A\x73\xEA\x1F\xDF\xF7\x9D\xE2\xEB\x7E\x7E\x4C\x39\xE4\x1F\xA9\x81\x9F\x1D\x53\x1A\x2C\x01\x88\x83\x81\xD4\xBB\xCB\xE0\xAA\x8F\x8B\x1C\xCF\x87\xB0\xBB\xEF\x1E\x9F\x84\xD9\xAB\xB5\xC1\xD1\xDC\x4F\xD7\xD2\x37\x51\x15\x3C\x8A\xAE\xA3\xFA\x88\xBC\x0E\xA4\x22\x40\xBE\xE9\xF5\x1B\x5B\x2A\xC8\xBE\x47\x9B\xD4\x20\x45\xC5\xF9\x3F\xBB\x69\xB7\xF0\x64\x8A\x5B\xAA\x5B\x2D\x7B\xDD\xD1\x4D\x9F\x70\xBF\x8F\x29\xCD\x45\x20\xF1\xAC\xB0\x84\x4C\x4E\xF8\x3D\x1C\x8A\x8D\x7F\x41\x7A\x06\x35\x93\x98\x2B\xF7\x87\x98\x3D\xDA\xFE\x1B\xA2\x0D\x58\x0F\x6D\x9D\xC8\x4A\x14\x7B\xAC\x84\xFD\x73\x5A\x89\x04\x2C\xE4\x3E\x26\xB3\x82\xDE\x5A\x6B\x32\x5A\x1B\x5C\x8A\x8C\xD7\xA6\xA4\x82\xAE\xB7\xBC\x32\xE5\x8B\xBD\x32\xE5\x81\x56\xA6\x38\xF8\xCA\x98\x5D\x57\xA6\x28\x8A\xEF\x8C\x75\x3E\xB0\xEC\x0D\x4B\x0B\x68\x2A\x54\x86\xFF\x9C\xF5\xFA\x52\x87\x0B\xD6\x07\x09\xEE\x18\xB9\x08\xAC\xF0\x8F\x0E\x4C\xEB\x9F\x57\x04\xE8\x8A\x37\x28\x0E\xA3\x90\x00\xFC\xAE\xD2\x00\xF9\x83\x7C\x1F\x0E\xDA\xEF\x5F\xBE\x55\xCA\xD9\x99\xD8\x53\x43\x2D\x25\x64\xD6\x9A\x76\xC9\x38\x85\x1A\x06\x73\x49\xCC\x5C\x42\x2D\xF5\x5C\x12\x8F\xB9\x84\x11\x60\xF1\xA8\x48\x79\x4D\x8B\xA0\xFD\x4B\x62\x09\x76\xCE\x39\x25\x14\x6B\xCA\x08\xC2\x89\x37\x17\x29\x40\x32\x3F\xDB\x64\x6B\x8D\xF3\xF9\x45\x92\x66\x3B\xE6\x6C\x93\xAE\x11\x54\x7D\x75\x99\xCA\x11\xA4\x64\xB1\x89\xC0\xF8\xCF\xEA\x2D\x12\x3B\x37\xF5\x16\x8B\x1E\xC2\x4E\xF5\x9F\x55\x67\xC1\xF8\x87\xF9\xFB\xFC\xFC\x45\x89\x7C\xAA\xB8\x7C\x66\x29\x14\x4D\xFC\xF6\xE3\x50\x6C\x79\xF0\x64\xFE\x77\x5B\x67\x20\x7A\x7C\x0D\xF2\x8B\x5C\x65\x22\xAD\x63\x70\x7C\x3A\x51\xC9\x68\xD6\x6B\xEA\x12\x52\x71\x42\xA0\xC8\x64\x6A\xF2\x85\x48\x91\x57\x8B\xF8\xC6\x91\x54\x24\x54\x12\x2E\x23\xDE\x24\x7E\x9B\x96\x0E\x92\xFB\xD6\x20\x67\xB2\x04\x3E\xC1\xA7\xBB\xEA\xBE\x1C\x7F\x4E\xB6\x3F\x82\x58\x41\x7D\xAE\x64\x71\x5E\x90\x49\xAE\xE0\x74\xCC\x29\xB7\x0D\x06\x32\xB0\x7E\xDA\x56\x9F\x08\x7E\xAD\x8A\x42\x59\x78\xE1\x29\x36\x88\x03\x69\x32\xBF\x63\x2E\xE2\xC3\xC7\xD4\xB4\x49\xA8\x52\xA9\xBB\x08\x29\x44\x6B\x82\x50\x10\x8A\x4B\x38\x6F\xCF\x4E\x82\x06\x4C\xAE\x6F\x48\xAB\x8F\x71\xA6\x1D\x73\x64\x3C\xE2\xC8\x98\xB2\x11\x58\x0D\xED\x39\x92\xB5\x67\xE1\x48\x8A\x76\xC2\x81\x77\xC6\x26\x57\xFC\x94\xD1\x66\x3E\x01\x8B\x71\x9F\x5C\xC8\x85\x70\x45\xF5\xE9\x10\xCC\x09\x8A\x90\xA9\x28\x9A\x87\x4D\xFF\xA8\x2F\x1B\xD0\x7D\xD5\xF5\x4F\x2F\xF4\xBA\x0D\x7C\x71\x1F\xEE\x40\x32\x6E\xD1\xD7\xB0\x6E\xEE\xFD\xBA\x3A\x1A\x70\x05\x69\xBA\x30\x48\x49\x2A\x4A\x82\x95\xAB\xFE\x2D\x6D\xAA\xEF\x2A\xF4\xA1\x6D\xB1\x21\xF1\xAD\xC4\xB6\x61\x85\x22\x5C\x3D\xDF\xF9\x01\x56\x9A\xD4\x67\x22\x8B\x83\xF1\x54\xAF\x9B\xDB\xA9\xC8\x2A\xF1\x79\x22\x2B\xBF\x31\x31\xDD\x37\x64\x47\xC9\x48\x53\xA1\xEF\x32\xB6\x5C\x66\xDE\x5C\x42\x09\x89\x37\xBC\x94\x8A\x96\x72\xB1\x63\x7F\xE5\x2C\x2B\x37\xB7\x7B\x0D\x85\x88\x03\xCA\x45\xAA\x33\x36\xEE\xE6\x5E\xDF\x27\x15\x8F\xE3\x02\xBB\x28\xFA\x3C\x62\xCA\x91\x93\x0A\x34\x25\xA4\xD8\x3A\x83\x17\x41\x04\xAE\x21\x02\x11\x60\x7A\x11\x38\x6C\x20\xEC\xB8\x5E\x6F\x00\x38\xA2\x9A\x3B\xD5\xB3\x74\x8D\x1C\xFF\x52\xD2\x2A\x1C\x53\x4F\xFF\xEB\xAB\x3B\x2A\x88\x72\x07\xE5\x26\xAD\xAF\xCF\x2F\x3C\x34\xD8\x01\x21\x1B\x07\x29\x59\x52\x8E\xB7\xD4\xDC\xF1\xF6\x21\x5A\xCC\x25\x31\xD8\x4F\xBD\xAA\x2B\x0A\xEA\x5B\x37\x69\x3D\xF5\xDB\x4C\xB8\x25\x6F\x2E\x35\x25\x2C\x51\x75\x7D\xBB\x05\x4B\x1B\x6B\x92\x7E\x92\x33\xD9\x4A\xB1\x26\xA2\x0E\xEC\x75\x5D\xF1\xE9\x9A\x71\xF3\xBA\x5E\xF6\xAA\x3E\xE4\x55\x7D\x18\x09\x42\x71\x22\xD8\xFC\x21\xBF\xCD\x97\xEF\xC3\xD8\xFC\x04\x0E\x87\xE6\x0F\x77\xCD\x97\x7C\xEF\x9E\xF4\xCD\x67\x5E\x93\x53\x67\xB9\xE0\x90\xB6\xD5\xEA\x93\x72\xD4\x56\x9C\x70\x93\xC3\x12\x4C\xC5\x38\x9C\x4A\xC9\x25\x66\xF7\x09\x3F\x50\xC2\x61\x38\x24\x0F\x64\x05\x64\xCC\x38\x4B\x23\x5A\x67\x13\xB2\x85\x0B\xCD\x90\x3A\x19\x79\x52\xA0\x3C\x73\x71\x82\xD3\x7D\x2A\x5C\x88\x68\x55\x2A\xBC\xA7\x2B\xC8\x43\xFA\x85\xAB\x4B\x58\x22\xC2\xC6\x25\xF9\x19\x33\x3E\x10\x29\xDD\xC6\xB2\x0F\x17\x90\x73\xC2\xAF\x03\x86\xF6\x5C\x23\x47\x58\xBA\x63\x65\x66\x6E\x22\x57\xE7\x63\xD0\x28\xC9\x73\x61\xE1\x3C\x18\x29\x72\xA9\xA4\x79\x85\xA2\x1D\x99\x85\x93\xC0\xC2\x39\x3D\x2A\x26\x0B\x66\x61\xE6\x60\x4B\x70\x7B\x92\x33\x64\x28\xDC\x35\x87\x2C\x50\x50\x4B\x22\x9C\x98\x4C\xC4\xD6\x5D\xA3\x1C\xEE\x90\xBF\x15\x19\x9E\x47\x7C\xCD\x54\x4B\x69\x9B\x05\x06\xCF\x68\x11\xD2\x01\xA9\x7B\x6F\x60\xCA\x64\x1A\x2E\xAC\x6C\x7E\xDB\x31\x65\x14\x98\x32\xA3\x60\x0C\xC8\x02\xD7\x64\x1D\xD7\x58\x66\xCA\x98\xB9\xA6\x4E\xC7\x0D\x6A\xAA\x5C\x9D\xE2\x6E\x64\x2E\x68\xD2\x33\x5C\x9A\x38\x9A\x19\xD8\x90\x07\xA2\xC0\x03\x5C\xF0\x90\xBD\xBE\xBC\xDC\x94\x36\x18\x9D\x21\x3F\x96\xAC\xB4\x2C\x62\xC6\xFE\x1C\x0D\xC9\x09\x03\x90\x73\xE2\x11\xAF\x7E\x89\x2A\x3E\x09\xA2\x5D\x84\xD9\xAE\x2B\xAF\xE9\xF3\x01\x56\xDF\x1C\x64\xF5\x07\x02\x4C\xC6\x89\x3A\x9F\xDE\x0A\xF2\xF9\x37\x69\x8A\x9F\x0A\xF2\xD9\xD5\x26\x1C\x15\xAC\xD0\x09\x69\xE2\x20\x9C\x93\xF1\x7C\xBA\xD9\x58\xBA\xB6\xC7\xDD\xAC\x78\x16\xB6\x8E\xA8\x32\x16\xAA\x5F\x61\x66\x8C\x58\x40\xD7\x5E\x1C\x2E\x17\xED\x4E\xC8\xDE\xC4\x12\xD2\x5F\xD9\x10\x04\x5A\x36\x4C\xE3\x14\xF3\x30\xC5\x94\xDE\x11\x1F\x88\x4C\x31\x74\x4E\x08\x19\x6D\xDD\x4B\x6A\x33\xAC\xD5\x54\x42\x42\x1A\xB8\x6D\xE8\xAE\xC9\xF1\xF9\x29\xD0\xFD\x4E\x35\xF9\x02\x59\xCD\xF9\xAE\xC1\x5C\x86\xBC\x93\x07\xDE\x19\xFD\xB2\x50\x56\xDB\x5B\x94\xD5\x49\x90\xD5\x99\x48\xA4\x4E\x56\x27\x41\x56\x27\x7B\xCB\xEA\x8C\xA9\x38\x90\xD5\x49\x2F\xAB\xD3\x5D\x65\x75\x1A\x64\x75\xBA\x9F\xAC\x4E\x67\x64\x75\xBA\x9F\xAC\xCE\x06\xB2\x9A\x0F\x50\xD2\xDC\x16\xCB\x6A\xBC\xD8\xF4\xB2\xBA\xA7\x75\x3A\x27\xAB\xD3\x5D\x64\x75\x36\x90\xD5\x59\x90\xD5\x76\x56\x56\x8B\x10\x4A\x83\xAC\x2E\x46\xB2\x3A\xDD\x45\x56\x53\xFC\x5A\xD4\xF9\xA4\x65\x29\xF9\x2B\x79\x2C\xE8\x20\x0F\x76\x3F\x2C\x50\x43\xCC\x48\x11\x89\x98\xD7\xF1\x69\xD3\xF1\x7A\xB4\x87\x3E\x82\x8F\x06\x56\x2F\x48\x96\xC7\xB8\x97\xEA\x04\x8A\x60\x9E\xEB\x2B\xB2\xB3\x99\x29\x0D\x99\xCE\xED\x50\xB4\xA7\x28\xDA\xF3\x4E\xB4\xA7\xA3\x3D\xC0\x14\x16\xD3\x94\x6C\x86\x94\x16\x2C\x19\x2C\x4B\x2F\xD7\x93\x5D\xE5\xBA\xE9\x18\xD8\x05\x06\x4E\x59\xAE\xA7\x81\xC3\xD2\x8E\xC3\xB8\x06\x5E\x27\xD7\x93\x45\x72\x3D\xC1\x2D\x2B\x72\x3D\x11\xB9\xEE\x66\x06\x36\xE4\x17\x17\xF8\xC5\x74\x72\xDD\x0C\xE5\xBA\x13\xB9\x9E\x0E\xE5\x7A\x1A\xE4\x7A\x84\x72\x3D\x0B\x72\xBD\x10\xB9\x9E\x91\x14\x5B\x2C\x07\x47\x52\x7D\x86\x29\x2C\x2D\xF3\x2E\xFA\xE9\x42\xC6\x18\x0B\xC1\xBD\x18\x63\x20\x04\x79\xD8\x63\x31\xFF\xDB\x34\xE3\xCF\xE7\xBA\x9A\x11\xF3\x41\xAE\xBB\x6E\x3E\xC2\x24\x2C\xF6\xF1\xE0\x18\x03\x5F\x49\xF6\x1F\x01\x18\x18\x4A\x09\x25\xDE\xC9\x76\xD3\x75\xE3\x3A\x85\xBC\x76\x52\x55\x03\x1C\x64\x43\x7D\x77\xF4\x6B\xBC\x48\x8E\xA6\x10\xEF\x23\x47\x09\xAF\xBA\x18\x0B\xD1\x52\xF8\x8F\xE2\x83\x3A\x1E\x9C\x04\x1E\x2C\x91\x07\x73\x28\x03\x0F\x96\x1D\x0F\xC6\xCC\x83\xF9\x58\x88\x2E\x89\x07\x20\x08\x51\x92\xCD\x28\xF9\x28\x58\x89\x6F\xD1\x69\x5D\xF1\xC5\xD9\x35\xCB\x14\x1E\x00\xCB\xA1\xF9\xE5\x81\x3E\x4D\x1E\xCF\xA2\x6F\xDE\x79\x4D\x51\x4D\xD3\x39\x21\xBA\x24\x19\xE6\x50\xC2\x64\xA1\x10\x2D\x82\x46\xBC\x0C\x95\x3C\xE0\x28\x81\x83\xA2\x44\x46\xC4\x76\xA3\x4D\x81\xD4\x21\x69\x37\x85\x9C\x84\x68\xDE\x0B\x51\xDA\x22\x4B\x2C\x44\xBB\x7C\xE3\x72\xA2\xBD\x9A\x0E\x74\x45\x33\xA7\x2B\xE2\x15\x5B\x4C\x10\xE4\x7E\x10\x5D\x31\xED\x64\x8A\x84\x01\x05\x9B\x31\xB3\x08\x23\x3A\x38\x48\x65\x9C\x66\x20\x55\xDC\x42\xA9\x92\x7B\xD5\x19\x2E\xD2\x3A\x0B\x24\x37\xBC\xA2\x26\x90\xDC\x0C\x56\xD4\x0D\x56\x34\xE4\xCE\xF4\x52\x25\xA7\x01\x18\xC8\x44\xAA\x38\x91\x2A\xD9\xCC\xC0\x86\x04\xC4\x33\xBA\x63\x59\xBE\x23\xC4\xBD\x54\xC9\xF1\x24\xD2\x8C\x16\x4E\x53\x30\xE1\xAC\xB1\x74\xD6\x10\xC3\xE6\x52\x1F\xCE\x1C\x48\x41\xCC\x07\xD1\x15\x9A\x3E\x53\xB5\xAB\x94\x0C\x69\x28\x0D\x5C\x17\x20\xC0\xE9\xE2\x23\x05\x31\x0E\x82\xC3\xD1\xA3\x02\x67\xDB\x2B\x88\x39\x4A\x0F\xB6\xCB\x91\xE4\xC8\xA7\x05\x53\xDF\x60\x37\x1B\x3C\x75\x85\x63\xA7\x3C\xE2\x6E\x1A\xA1\x06\x17\xD9\x9F\x86\xF3\x60\x4D\x8C\x64\xDC\xA2\x09\x85\x25\xE3\x50\x11\x9E\x88\x91\x89\x98\x2E\x2A\xA0\x9F\x88\x9E\x9B\x88\xA1\x47\xC7\x13\x01\x0D\x79\x18\x18\x29\x82\x7F\x11\x7D\x07\x81\xFB\x3B\xC4\x0C\x3B\x85\x5E\xDE\xEE\x83\x2B\x48\xD2\xDA\xA1\xA4\x15\x6B\x08\x85\x88\x2B\x01\xE2\xA4\xCC\x3F\x3C\xB6\xC0\x06\xD3\x42\xD8\x11\xBC\xEF\x2D\x25\xA8\xEB\xB6\xC9\x77\xB3\x32\x94\x24\x39\x6B\x03\x65\xF0\xB9\xA0\x42\x1A\x6E\x3E\x33\xBF\xE2\xF1\xCD\x70\x11\xE9\x22\x01\x6C\x20\x3D\x88\x00\x9E\x0C\x4D\x02\x8A\x26\x10\xB4\xD8\xDE\x24\x30\x0D\x26\x81\x25\x0E\xAA\x5A\xA0\xC5\x26\x7C\xCF\x49\xC7\x26\x81\x52\x92\x63\x83\x00\x0E\xC2\xF7\x50\xF0\xCA\x59\x6C\x7E\xD9\x6F\x73\x88\xCB\x21\xD6\x62\x0F\x85\xE6\x0F\x75\xCD\x13\xBE\xDB\x48\x8B\xB5\xA2\xC5\x56\x73\x02\xB8\x3C\x23\x3A\x4F\xAF\xC5\x66\x62\xF9\x18\x69\xB1\x29\x1C\x82\x65\x79\xC0\x72\x46\xB5\x68\xB1\x3D\xCD\xED\x9C\x16\x4B\x8E\xDF\x0A\x52\x12\xC0\x69\x2F\x80\xC9\xB6\x59\xB2\x00\x4E\x82\x00\x5E\x9A\x11\xC0\xA2\xE0\xB9\xDD\x15\x3C\x87\xF2\x38\xE9\xE4\x31\x19\xA4\x92\x81\x11\x9E\xE9\x39\x6B\x88\xA7\x31\x27\xDD\xA1\xD1\x0B\x63\xBB\x50\x18\x9B\x11\xF9\xF3\x40\x7E\x6C\xB4\x41\xD9\x21\xE4\x77\xB3\xE4\x37\x22\x8C\xED\xAC\x30\x36\xC0\x39\x4E\xB9\x08\x63\x2B\xC2\x38\x9F\x19\xD8\x90\x98\x79\x38\xCD\x08\x7A\x82\xF4\xBC\xB4\x17\xC6\x06\xC5\x33\x09\x63\x39\x4F\x5C\x10\xC6\x51\x2F\x8C\xAD\x80\xDD\xBB\x03\x68\x75\x26\xC4\x14\x59\xD6\xEA\x22\xDA\x44\x34\x76\xAA\x37\x40\x69\x54\x24\x4E\xA8\x22\x3A\x0B\x14\x2B\x4E\x0A\xD2\xEA\xF0\x8D\x5E\xAB\x0B\xDE\x08\x4A\x11\x21\x44\xAB\xED\xB6\x49\x82\x5C\x89\x88\x3F\x4D\x90\xBA\x05\x18\x94\x6E\x2C\x9B\xED\x22\xD9\x1C\x66\xC5\x51\x36\x0E\xF2\x3A\x01\x43\x70\x8C\x43\xF9\xBC\x50\x32\xDA\x41\x00\x9F\xE3\xA9\x0C\x8F\x17\x3B\xF4\xB6\x0C\x25\x63\x3A\x73\xBC\x8C\x67\xD0\x4B\xE5\xAF\x6B\xA7\x66\x24\x8E\x3F\x4B\xEC\x70\xF3\xAF\x8C\x19\x1A\xC4\x4A\x4F\xE6\xE7\x2E\xFC\x41\x90\xA7\x1C\x99\xD3\x82\xFB\xB3\x58\x37\xAB\x4D\x3E\x51\xFE\x8B\x03\x57\x22\x7E\x89\xC7\xBE\x01\xB7\x29\xA6\x90\x9C\x2F\x3D\x0E\x7F\x3B\x0A\xB6\x7A\x1B\xEF\x50\x46\xD2\x77\x60\xAB\x8F\x5D\xEB\x05\x9C\x28\x73\x19\xE1\x74\xFE\x45\xDA\xBB\xF3\x32\x7E\xC9\xDE\xFD\xF5\xB3\x77\xE7\x1C\x43\xD2\x59\x41\x73\xB6\x79\xBB\x60\xF5\x8C\xBE\xA1\xEC\xDD\x06\x1C\xD9\x48\xB2\x3A\x25\xA3\xF7\xFE\xC6\xEE\x7C\xCE\xD8\x9D\x1F\xC4\xD8\x9D\x1F\xD8\xD8\x8D\x62\xA6\xE8\x38\xD2\x8E\x8C\xDD\xD1\x22\x63\x37\x87\xBB\x84\xA4\xDF\x79\x63\x77\x24\xC6\x6E\x3B\x63\xEC\xB6\x33\x03\x1B\x32\x80\x0D\x0C\x40\x37\x48\x8A\x3B\x2D\x86\xD7\x17\x3B\x6B\xEC\x96\x65\xD6\x03\xE3\x76\x4C\xEA\xF5\xAE\xE6\x6D\xBE\xA3\xCC\x08\xC9\x5C\x82\x31\xFB\x5B\x4C\xC1\x12\xBF\x90\x45\x2F\xC2\xA2\x17\xEC\x8A\x1C\x09\xC8\x10\x0E\x05\x05\x3D\x3A\x96\xF8\x7C\x8B\x19\x98\xB9\xA5\x02\xDF\x09\x03\x38\x54\xE4\xAB\xDE\x1A\xF2\x39\x9A\xEA\xAF\x97\xFF\x95\x18\xBD\xBB\xF3\x21\xE5\x40\x5B\x3B\x4A\xAE\xC9\xF1\x7C\xC8\x66\xCE\x07\xFC\x12\xA5\x49\x01\x76\x53\xAA\x4E\x89\xB3\xC3\xE2\x6F\x47\xC1\xC8\xF9\x40\x1B\x26\xC7\x31\xC8\xF9\x80\xA4\x96\x11\x98\x82\x37\x75\x6F\x6A\xA7\x0B\x8B\x39\x90\xA9\xDD\xBC\xC8\xA6\xF6\xF8\x25\x53\xFB\xAE\xA6\x76\xF3\xE2\x9B\xDA\xA3\x59\x53\xBB\x1B\x9A\xDA\xA3\x7D\x4C\xED\xF9\xBC\xA9\x3D\xDF\xCD\xD4\x9E\x0F\x4C\xED\x66\x5F\x8B\x6A\xBE\xA7\xA9\x5D\xE2\x4A\xD9\xD4\x9E\xEF\x62\x6A\x2F\x5E\xB2\xB5\xFF\x95\xB2\xB5\x9B\x45\xB2\xD7\x7C\xED\xB6\xF6\x7F\x47\x33\xFE\xD9\x42\x6C\xED\xE1\xAE\xD1\xDE\xB2\xB1\x3D\x08\x74\xB0\x2C\xB5\x51\x82\x26\x1B\xA1\xD6\x28\x05\x36\xAE\x52\xFE\xD1\x50\x72\xE3\x97\xA4\xEE\xA0\xE4\xCE\x45\x20\x89\xE4\xCE\x50\x72\xE7\x22\xB9\x53\x01\xF4\x80\x5C\x24\x37\x4A\x85\x89\x06\x53\x4B\x3E\x90\xB0\xF4\xC6\x44\x8F\x39\x39\x61\x8B\x12\x25\xD6\x52\x30\xD0\x3E\xFE\x53\x57\xDA\x60\xFB\x9F\xF1\x9F\xCA\x2F\x0B\xED\xFE\xF6\x6B\xB6\xFB\x67\xFB\xD8\xFD\xE3\x45\x76\xFF\x6C\xB4\x1F\x0E\x62\xF7\x8F\x6F\xC5\xEE\x1F\xDF\xBA\xDD\x3F\xDB\xCF\xEE\x1F\xEF\x61\xF7\xCF\xF7\xB3\xFB\xC7\x24\xD1\xE3\x19\x89\xBE\x34\x27\xD1\x63\x4A\x61\x26\xF4\x05\x4E\x09\x61\xAD\x38\x19\x89\x2F\xE2\x86\x79\x01\xE6\x3A\x83\x4D\xBA\xAF\x59\x9F\xE2\x2F\x3A\x8A\xE6\x81\xA2\x29\xEB\xC5\x0B\x04\x58\xC6\x14\x8D\x76\x33\xEB\x47\x6C\xBE\xEF\x2C\x49\x6E\x60\x49\x1A\x0E\x6C\xD6\x92\x94\x76\xC4\x88\x06\x70\xD7\xB8\x52\x49\x67\x49\x9A\x75\x21\x9B\xDE\x92\x94\x88\x59\x7F\x2F\xEB\x41\x42\x60\x6A\x43\xC5\x58\x2C\x07\x9D\x79\x3F\x61\x53\x88\x19\x99\x42\xD2\xDE\x14\xA2\x46\x8A\xB1\x9A\x31\x85\x0C\x4F\x31\x9C\x72\x32\x63\xDE\x4F\xF6\x31\xEF\x9B\xDE\x84\xB4\xAF\xCD\x28\x58\xC2\xA4\x9E\xC6\x8C\xF9\x6B\x68\xFC\x32\xA3\x1B\x9C\xD9\xD3\xEA\x45\x80\x17\xBB\xDB\x8C\x5C\xA7\x5D\x2B\xD6\xAC\x93\xA1\x66\xCD\x9D\xA6\x7B\x75\x2A\x0A\xF5\x6C\xA7\x76\x64\x33\xFA\x22\x2D\xFC\xC7\x3B\x39\xBE\xA7\x09\x7F\x6C\x47\xC2\xEB\xE7\xD8\x6D\x0A\xEE\x6C\xC0\xF1\x64\x25\xDC\x74\xCA\x2E\x6D\x98\x45\x4A\x78\x2A\x4A\xB8\x03\x13\x44\xB9\x28\xE1\x94\x28\x31\x23\xCA\x53\x30\x23\x51\x8E\xBC\xCA\xD4\x31\x9C\x9A\x33\xA3\x78\xCC\x0B\xEA\x81\xAB\x60\x46\x50\xDB\x3D\x1C\xB4\xE6\x6B\x12\xD4\x99\x88\xA3\x17\x57\x50\xDB\x5D\x05\xB5\x0D\x82\xDA\xEE\x27\xA8\xED\x8C\xA0\xB6\x2F\xAA\xA0\xEE\xFD\x03\xE5\x88\xD0\x76\x4E\x50\xDB\x5B\x11\xD4\x86\x04\xF5\x42\x1F\x81\xD9\x5D\x31\x9D\xF5\xD9\x2E\x74\xD8\xDA\x05\x0E\x5B\xBB\xD0\x61\xBB\xD8\x47\x80\x2B\x9D\xCC\xFB\x08\xC8\x61\x9B\x2D\x72\xD8\x26\xBC\x04\xD9\x6E\x3E\x82\x8C\x06\x60\x76\xF1\x11\x2C\x76\xD8\xE6\xC1\x61\x4B\xDE\x15\x22\x61\xD2\x4B\xF6\xAC\x93\xEC\xB3\x0E\x5B\x37\xF6\x11\x64\xF3\x0E\xDB\x85\xDA\x68\x16\xD2\xE1\xC5\x47\xE0\x82\x8F\x20\x63\x21\x99\xCE\xF8\x08\x4C\x2F\x26\x13\xD6\x46\xDD\xC8\x47\x90\xCC\x48\xCB\xB1\xEF\x91\xAB\xAC\x64\xBD\x8F\x20\xDB\xC7\x47\xE0\x3A\x37\xE9\x01\x9D\x02\x43\x77\x69\x32\x72\x97\xDA\xDE\x5D\xAA\x46\xB2\x56\xCD\xB8\x4B\x6F\x49\xC0\xDB\xBD\x05\xBC\xED\x05\xFC\x2E\x9D\xEE\x22\xE0\xC5\x29\xF0\x1E\xC6\x44\x1A\x07\x33\x34\xF6\x7C\x2F\xCD\x1B\x07\xB6\xDB\x32\x4D\xF4\x5A\x61\xA6\x13\x66\x85\xC7\x3C\x10\xED\xFA\x98\x3A\xCC\x72\xD0\xE2\x2D\x37\x04\x3E\x18\x9F\x9F\x25\xAC\xDA\x99\xC4\x0F\x8D\x5F\xA9\xB3\xF4\xAF\xA4\x7D\x64\xAF\x9D\x68\x2A\xFD\x0D\x11\xBE\xE2\xBA\xD7\xCD\xE2\xBC\x11\x7C\x5D\x0D\x5E\xA7\x54\xD4\x1D\xE4\x02\x57\x9B\xD2\x32\xB3\xC6\x94\x8B\xE7\x75\x6D\xBD\xA1\xAB\xA1\x8F\xD8\xCD\x25\x99\x51\x38\x53\xC2\x9C\xBE\x48\xBB\xFB\xC3\xAA\x6D\x5C\xB7\xC1\xD7\x4D\xD9\xCC\x66\xD2\x81\x60\x93\x2A\x9F\xFA\x12\xA2\x35\xCE\x59\xEC\x32\xA1\xF8\x2D\xCA\x5A\xFC\x58\x90\x0C\xA3\x9D\x0B\xD2\xBC\x3E\x48\xF3\x38\xB4\x51\x0F\x0B\xDA\xA7\xFC\x25\x27\x15\xE7\x68\x6D\x57\x70\xBA\x72\xA4\xAE\x9B\x55\x5A\x15\x4F\xCB\x8A\xED\x31\x96\x36\x63\x56\xDC\xBC\x69\xF9\x0A\x6A\x59\x46\x2C\x33\xCA\x40\x90\x17\xC5\xBF\x1B\x64\x7C\x1C\xDF\x6C\xB2\x0E\x6A\x3C\x97\x8A\x0E\x4C\xC4\x63\x8A\x7C\x8E\x71\xC8\xAD\xA2\x07\x62\x2E\x6A\x97\xE1\x3F\x77\x42\x06\xCA\x1F\x6D\x4F\x98\x75\xAA\xDE\x60\x2B\xFC\x9E\x6E\xBA\xF8\xF3\x51\xC8\x70\x23\xDC\x0E\x19\x68\xA9\x4B\x9D\x49\x0D\x68\xB2\xE9\xDE\xA3\x2A\xC8\xC0\xDD\xA3\x0E\x41\x06\xD1\x3D\xEA\x30\x64\x90\xF9\x75\x46\x31\xC1\xA3\xA7\xFA\x5D\xA4\xC5\x70\x08\xAC\x26\xAC\x9B\x15\x2A\x87\x4F\xD5\x25\x12\x66\x73\x39\x10\x48\xFF\x8D\x1E\x5F\x63\x90\x23\x36\x4B\x40\x76\x4C\x1D\xF2\xFA\x12\xFE\x8B\x67\xE3\x45\xFC\xE3\xF0\x45\x62\x2F\x7B\xA9\xD1\xFE\x0B\x9F\xDE\xD9\x21\xB8\x36\xAF\xDF\xD8\x52\xE2\xF7\x73\xE1\x9B\xE5\x37\x53\x4A\x4B\x8D\x4C\x90\x1D\x53\x47\x47\x04\x01\x72\xE7\x03\x81\x98\xD4\x79\x01\x99\x3F\x1E\xD2\x6A\xF2\xE2\x67\x8C\xD6\xDB\x73\x78\xEE\xFD\xC6\xEA\x32\xF4\x0C\x08\x1A\x97\x5A\x37\xCF\x1B\xBF\xCD\xF0\x15\x0C\x38\x7B\x94\xFE\x22\x60\x17\xFA\x6B\x95\x41\x86\xFD\xF3\xA6\x65\x04\xF4\xEF\xC3\x46\xAB\xEF\xBD\x2A\x40\xCB\x5F\x52\x6D\x4D\x50\xC0\x2F\xA8\xD1\xF7\x2C\x6A\x54\x8F\x1B\xFD\xF2\x01\x1B\xFD\x72\x68\xF4\xFB\xC7\x8D\xFE\x3E\x36\x2A\x89\x75\x6A\x88\x99\xAE\x8A\xFF\x4E\xEB\x6D\xDE\x2C\x4E\xC0\x9B\x6B\x1C\x44\x80\x9E\x23\x40\x75\x75\x87\x7D\xE4\xB4\xD9\xAE\xFE\x0D\x4B\x44\x42\x86\x7B\x7D\x71\x62\xDF\x37\x2D\xBD\x39\xF3\xDA\x86\x31\xDB\xFA\xCA\xF8\xC5\x46\x1D\x53\xA6\xD6\xFD\xDB\x86\x90\xDB\xF1\xED\xC6\x82\x3D\x6D\xB6\x41\xAF\xF5\xED\x98\x13\xE6\x42\xB1\xAA\x7A\x0C\xB1\x8F\xE2\x2F\x82\x24\x56\xFC\x47\x6B\x35\xE1\xF9\xF4\x69\x56\xA9\x4C\x57\xF6\x53\x4A\x1B\xA3\xC9\x4E\xDA\x94\x0A\x85\x53\x7D\xBD\x94\xF6\x0D\x7D\x09\xD9\xF5\x80\xFE\xA4\x8E\x29\xDD\x14\xF8\x8F\x6A\x4A\x2A\xA5\xD8\x4C\xD6\x44\x5F\xA5\x90\x00\x52\x8F\xF2\x0D\x3E\xF3\x4B\x28\x60\xFC\x73\x4E\x25\xC7\xAF\xB7\x03\x35\x6A\x83\xD3\xCC\x26\xA4\x67\x58\xAF\xCF\xE2\x0D\xEA\x2C\x2A\xA8\x5A\xD0\x1A\xCA\x61\xA0\x8E\xBF\x82\x9B\x4F\xAB\x82\xCB\x55\xA3\x66\x56\xE7\x14\x6B\x73\xA6\x36\xDE\x52\xC4\xA3\xD7\x9C\xDF\x6F\xA4\xAE\xB4\xF3\x1A\xF2\xB5\xDA\x41\xEE\xF5\xE3\x35\xE5\x0F\x4C\x28\xAB\xAD\xFA\xD2\x55\x2E\xD4\x41\x45\x33\x4A\x64\xB2\x63\xCA\x9C\xA1\xE2\x1B\xAE\x85\xA4\xFA\x3D\xB6\x01\xA0\x86\x7E\x4C\x29\x0E\x80\xF8\x0C\x5E\x04\x24\xDD\x4C\x8C\x83\x34\x37\x37\xB0\x0A\x53\x4E\x35\xC9\x86\x98\x6B\x7C\xA0\x74\x38\xA6\xCC\xDA\x06\x6B\x54\x06\x04\x89\x84\xFA\x93\x2C\xF6\x5A\xF0\xF7\x34\xDE\x28\x08\xBE\x83\x61\x2F\x34\xA8\x16\xE2\x4D\x81\x9E\x8D\x44\xE6\xF0\x6F\xD4\x91\xC2\xDF\xF0\x4A\xD2\x65\xD6\x99\xE2\xFD\x86\xB1\x1B\xF4\x2E\xC9\xF7\x7C\x54\x70\xA1\xB0\x21\x5E\x83\xA0\x26\xE8\x85\xA8\x09\x66\xEC\x0C\x61\xEC\x82\x2E\x25\x58\x00\x1C\x74\x87\xA3\xA0\x3B\x74\xA0\x0E\x47\xC1\x74\xA9\xFA\xA0\xCF\x4E\x42\x76\x2B\xE9\x70\x5F\x99\x43\x45\x08\x83\x64\x02\x07\x1C\x42\x35\xC4\x21\xB4\xE0\xCE\x4D\x94\xFF\xF6\xB9\x91\x06\x60\x85\xC1\x48\x18\xED\x5E\xCE\x42\xFC\xAC\x3B\xBC\x4F\xA9\x72\xF3\x62\x15\x2E\x78\xFE\xEA\x37\x5C\xE1\x82\xE7\x59\x1A\xBC\x3B\xE9\x71\x64\x03\x16\x64\x0F\x1E\xF8\x97\xB1\x70\x41\x3C\x53\xB8\x20\xA6\xDB\x01\xBF\x68\xF1\x45\x1B\x6E\x5B\x11\xD7\xE0\xEB\xB1\x72\x15\x63\xD6\x26\x2D\x61\xAD\x4B\xD4\x34\x37\x21\x88\x56\x71\xD0\x8A\x53\x52\x7F\x62\x02\x9B\x9E\x07\xCD\xB1\xF3\x18\x4E\x62\x5B\x46\x9D\x88\xDE\xDF\x98\x18\xBE\xB8\x64\xA4\x83\xE3\xC7\x4C\x8C\x3B\x4C\x67\x33\x42\xA3\xB2\x04\x98\xD3\x01\x79\x09\x3D\x07\x00\x65\xA9\x18\x37\x04\xBB\x09\x5C\x57\x71\x93\x11\xD5\x76\xAB\xA0\x10\x4B\xF1\x01\xC3\xC5\x07\x62\x42\x92\x0E\xC5\x07\x62\xC6\xE2\xE9\x30\x09\x0F\x54\x5B\x41\xCF\xD4\x54\x88\xA5\xA6\xC2\x0C\xC1\x03\xF3\x0B\xD1\xD3\x21\xD1\x75\x47\x72\x83\x9F\xE6\xC7\x57\x9A\xE2\x1B\x74\x1D\x48\x18\xEB\xE1\x3A\xE8\xAE\x46\x91\xEA\x6E\x53\xC3\x75\x48\xBB\x75\xE8\xA9\x5E\x14\xC5\xE7\xB5\x36\xDB\xA0\xA8\xA4\x8D\xFF\x88\x62\xE0\x3A\x0A\xDB\x63\x31\x75\x5A\x9D\x24\xA5\x46\xFD\x0D\x5D\x32\xE9\xD3\xAE\x58\x81\xE3\xDB\x55\x55\xFD\x3F\xAC\xD1\xFB\x3B\x5B\x51\xD5\xE5\xAB\x60\x0D\x13\x90\xE0\x80\x1A\xEC\xF5\xA5\x19\xBC\x60\x1D\x8A\xEF\x92\xCC\x0A\xC2\x80\x24\xF1\x29\x8E\x21\x3C\xD1\x65\x2B\x04\x1C\x65\x29\xEC\xFD\x08\xA7\x56\x07\x18\xE3\x2F\x29\xA9\x41\x3D\x80\xCC\xFD\x49\xAB\xA3\x0E\x50\x48\xB0\x7C\x6E\x3E\x37\x23\xF6\xCD\xE0\x80\x1A\xC0\xCF\xAA\x05\xF0\xB3\xFF\xE0\x73\x33\x62\xDF\x8E\x92\x89\x59\x23\xF1\xEA\x3E\xBE\x05\x4A\x0B\x28\x90\x1E\xE4\x6B\xE7\xA2\x8A\x2D\x0E\x3A\x1A\xE8\x21\x0D\xA2\xE1\xB1\x17\x52\xFC\x59\xEE\xC7\x74\x71\x0F\xB8\x80\x94\xBF\x81\xEF\xF1\xE7\x00\xA3\x1D\x62\xDA\xE3\x1E\x7C\x9A\xE0\x72\x15\x57\xB3\xE5\xEA\xE1\x8B\xB7\x59\xC5\xBF\xF4\x30\x42\xA0\x1F\xE0\x3B\x20\xF1\x99\x9C\xD9\xBB\x90\x73\x80\x2D\x44\x14\x51\x43\x62\xF2\xAE\x01\x35\x43\xCC\xA2\x5F\xB2\x1F\xE7\x03\x03\xDF\x7F\x54\xA0\x2A\x83\x69\x8C\x92\xEA\x89\xAB\x40\xF9\x4F\x29\x31\x0B\x67\x97\x1B\x3A\xE2\x05\x60\x41\xF9\x67\x94\x60\x29\xE9\x8D\x49\xF0\xA8\x26\x1C\x44\x42\xC9\xFA\x6C\xE7\x68\x62\x9F\x5C\xF6\x7F\x72\xF3\xE6\x74\x8B\xF7\x87\xBF\x69\xF1\x8A\x44\x61\x8B\x9C\xF7\x10\xBE\x88\xD9\xAF\x1B\x9F\x15\x08\x37\x92\xF8\xA3\xA1\x11\xC4\x5B\x2A\xB0\x0A\x6A\xD3\xEF\x40\x30\x6E\x53\x3C\xBD\x6B\x6B\x87\x17\xA8\x38\x94\x02\x79\x1D\x8D\x9B\xD9\xFC\x61\x0E\x18\x7F\xA0\xD6\x01\x5D\x95\x0C\xC3\x34\x54\xC1\x29\xE2\x0F\x24\x7D\xF7\x1A\x79\x93\x00\xA3\xA2\xB3\x01\xA0\x9F\x41\xD3\x0F\xFD\xA0\xA3\x2C\x04\x71\x82\x07\x44\xD5\x5A\x62\xBA\x80\x0B\x02\x41\x60\x2A\x1A\x3A\x21\x98\x9E\x30\x0F\xD0\x74\x9A\xFE\x79\x73\x3F\xF9\x2D\x76\x0C\xAF\x0D\xF5\x8F\x77\x59\xAD\x0C\x99\x39\x9C\xF0\xEA\xEB\xC8\x76\x04\xD1\xD9\xD9\xE6\x23\x41\x47\x0D\x74\x7B\xB8\x21\x21\xF7\x00\x4D\x08\xA8\x2C\x1B\xC4\xB3\xA5\x4F\x1E\x16\xBE\x3D\xD7\xCC\x01\x20\x9F\x0A\x00\xC8\xA7\x66\x01\x90\xF9\x95\x3B\x9B\x78\x21\x57\xDF\x19\xAA\x11\x75\xF8\x0D\x43\x39\x71\x6F\x2F\x27\xE2\x85\x72\xE2\xEE\x5A\x0F\xE0\x19\x66\x46\xE5\x8F\x53\xBD\x3D\x54\xC3\xE3\x40\xF2\x05\x43\x5B\xDD\x65\x68\xAB\xFD\xD0\x6E\x9F\x1F\xDA\xFA\x7E\x43\xBB\xBD\x1B\xDA\xED\x0B\x86\x76\x74\x9F\xA1\x81\x2A\x7E\xB3\xD4\xE5\xE0\xAA\x97\x74\x88\x1A\x46\x0C\x81\xA7\x1A\xE2\x24\x87\xD7\x3E\x96\xD4\x1F\x51\x2D\x99\x4F\xA1\xA5\x1B\x18\x43\x38\xD5\x78\x4D\x09\xF8\xB3\x6A\xDD\xFC\x2D\x1C\xEC\x57\x87\x58\x90\x29\xD7\xB1\x2C\x89\xA3\xF1\x48\xAD\x27\xFD\xE1\x19\x0B\x28\xDF\x64\xA2\x60\x89\x84\xA5\x39\x45\x21\x2C\x1F\xF8\xEC\xA0\x89\x25\x58\x22\xF0\x67\x6C\xA7\xC2\xC1\x4C\xEA\x18\xA6\x64\x55\x94\x00\x1C\xFF\xCB\xE3\xE7\xD5\xBA\x79\x84\xB5\xF6\xA5\x13\xE6\x11\x7C\xBE\xE8\x73\xE7\xF0\x44\x69\xAA\x89\xF2\xFF\xFC\x73\x8B\xA0\xC6\x38\x82\x66\xDA\x41\x8D\x4D\xD9\x0F\x40\x00\x71\x3F\xF8\xB9\x19\xA8\xB1\xA5\x22\xF8\x21\x74\x07\x0E\xA7\x47\xE0\x70\x53\x08\x50\x63\x4B\x30\x45\x22\x30\x38\x1C\x9E\x2C\x64\xC9\x2A\x09\x6A\xCC\x40\xE2\xCB\xB6\xFA\x8F\x57\x19\x6F\x0C\x5E\xC4\xE1\x7D\xED\x03\xF4\xFF\x61\xDC\x5E\xFF\x52\x3D\x0D\xED\x87\x97\x50\x0E\x55\xB3\xA1\x13\x9D\x32\xB7\x6E\x1E\xA6\xC3\xAE\x6D\x64\x25\x05\x30\x4D\x59\x13\x3B\x9D\x07\x44\xC9\xD1\xAA\x13\x70\x50\xA8\x2B\x5A\x91\x26\x84\x53\xE9\xA0\x9B\xFC\xBB\x86\x4F\x0B\xFA\x19\x41\xBF\x4D\x19\xDF\x0C\x39\x0F\xBB\x9C\x30\x23\x4F\xE8\x19\x6C\xAC\xC4\x67\x4B\x62\xAA\x7A\x5A\x78\x57\x57\x30\xA5\xA2\x88\x1C\x17\x75\x0B\xAF\xE3\x3C\x50\x71\xF4\x9F\xF8\xEC\x48\x9D\x5C\xC2\xB3\x66\x42\x77\x99\x07\x07\x4D\x19\x9C\x85\x13\x8C\xB4\x40\x9C\x47\x1A\xDE\x73\x52\xBC\xE2\x11\xEC\x47\xB4\xC0\x94\xD6\x99\x1F\x7B\x98\x45\xE8\x74\xB3\x31\x3E\xB9\xC4\x85\x36\xCD\x1A\x7E\xF9\xF7\xE8\x6C\x6A\x1B\x42\xD8\xC9\xAA\xFF\x14\xF4\x86\xD7\xD1\x8F\x2D\xDF\xD1\x1E\xA8\xA7\x05\xB2\xC9\xF8\x10\xEA\xCE\x1A\x01\xBD\xA1\x37\x1A\x9E\x28\x35\xFB\x00\x5F\x5A\xB1\xF5\x5D\xDE\x6C\x64\xB8\xC6\x6F\xF3\xDD\x6D\xEA\xCD\x25\x98\x06\xB7\xD2\x34\xB8\x95\xC8\xF6\x20\xA4\x43\x31\xC0\x17\x80\xE9\x40\x23\xA2\x6F\x27\x8C\x55\x4D\x2D\xD6\x31\x2F\xCE\x92\xE0\xCD\x99\xD2\x71\xAC\xE3\x52\x8F\x16\x06\x16\x8A\xB5\x66\x19\xF9\x82\x20\xC3\x96\x61\x09\xCF\x9E\x43\x0C\x19\x76\x0B\x7C\x35\x04\xFD\x7A\x7E\xF8\xFC\xA1\x57\x6B\xD3\x4C\xF0\xE6\x18\xEF\xF1\x1C\x3D\x56\x4F\xF0\x46\x32\x11\x78\xB0\xCB\x2D\x8D\xF8\x80\x63\x08\xEC\x08\x53\x7F\xF4\x92\x7F\xD5\x1B\x61\xB2\x55\x4F\x88\x17\x9B\x25\xAF\x2F\x7B\xF7\x38\x52\x42\x20\x05\x4D\x4B\x8E\x86\x5B\x67\x56\xC2\xF5\x7B\xFB\x73\x43\x61\x89\x54\x77\x10\x55\x1F\xBA\x86\x87\xC1\xF6\xE3\x5B\x13\x3B\xA6\x3A\xD9\xDD\x4A\x31\xBF\xB9\xBA\x2A\xA0\xC2\x76\x74\x41\xEB\xF9\x20\xA9\x1D\x49\x0F\x3C\xF4\xAC\x19\xC0\x81\xBA\x5D\xD4\x4D\x37\xD0\xDE\x23\x09\x59\x1C\x28\x9C\x31\x07\x38\xC5\x0B\xB5\x77\xB1\x77\x44\xF3\x70\xA0\xEF\xFF\xDC\x5E\x70\xA0\xFF\x7C\xE6\x57\xE9\x38\xEA\xCE\x5D\xC2\xC3\x12\x64\xE5\x59\x71\x2A\x3D\x4B\x74\x49\xB4\x17\x1C\x68\xDC\x99\xB1\x22\x88\x7B\x81\x2A\x73\x16\x38\x50\x77\x66\xA2\xFD\xC7\x3E\x37\x82\x03\x75\x01\x0E\x74\xE7\x9A\x18\x4F\x0C\xD8\xEA\xCD\xF4\xE1\x20\xA5\xFE\x76\x5C\x28\xF5\xB7\xF3\xC4\x81\x4B\xFD\x7D\xF5\xEA\x0B\x28\xF5\xB7\x03\x73\xA5\xE9\xFE\xE4\xEA\xD7\xA7\xD4\x5F\xDF\xF5\xB0\xD4\xDF\x9F\x5E\x3D\x50\xA9\xBF\x3F\xBB\xFA\x8D\x59\xEA\x6F\x01\x3D\x6F\x5E\x3D\x68\xA9\xBF\xAF\x5E\xDD\xB5\xD4\xDF\xEF\x65\x3A\xDF\xF6\x9C\xDD\xDA\x9D\x3D\x01\x97\x93\x10\xCA\xB9\x8E\x7E\x23\x60\x5A\x84\xC0\x1C\xD2\x87\x82\x49\xBF\x6C\x38\x9E\xE2\x98\x7A\xBD\xB0\xFF\xA3\x64\xC6\x67\xCC\x7B\xE4\x8B\x1C\x04\xE0\x71\xBB\x6D\x0A\x32\x32\xA1\x92\x66\x24\x12\x63\xAC\xA4\xE5\x41\x49\xEB\xCC\x3E\xBD\xAE\x20\x79\x3A\xCD\x64\x5E\x65\x88\x94\xB1\x56\xDB\x02\x32\x82\xAA\x96\xF7\x4A\x54\x07\x47\xE5\x3D\xA6\xA1\xBC\x87\xD1\x5A\x73\x89\xF7\xE9\xC2\x12\x1E\xBA\xC0\xC1\xB1\x05\xF8\x51\x9A\x0C\xB1\xC2\xE0\x5C\xA2\x82\x46\xAF\x0F\x93\xCB\xC2\xE4\x38\x1A\xE8\xBE\x2E\xD0\x64\x97\xE9\x81\x82\xFC\x84\x79\x84\xC3\xD6\x52\x72\x69\xF4\x95\x1A\xCE\x92\x7F\xBD\x0C\x41\xB1\x59\x9F\xB1\xB5\x90\x92\x07\xEC\x8A\xF1\xB3\x73\xC8\x7C\x4A\xA1\xB3\xE3\x3E\xF9\x9E\x28\xE5\x91\x88\xC7\x73\x39\xD7\x9A\x8C\x8A\xA9\xE0\x11\xF1\x13\x03\x81\x4D\x07\x93\x24\xDE\xE6\xB8\x3A\xD8\xB2\xBB\xDC\x04\xD9\xD6\xA8\x40\x6B\x9D\x65\x99\xCA\x68\x44\x0B\x68\xBD\x31\xE1\xD8\x3E\x29\x5C\xFD\xB1\xE1\x29\x54\xC2\x84\x5B\x73\xA4\xCF\x46\xC2\x5F\x7B\xAD\x07\xFE\x1E\xCA\xB6\x32\x83\x92\x4F\x85\x32\x5E\x56\x36\x38\xD2\xB9\x18\xB0\xEF\xEB\x27\xCC\xAB\x35\x2F\x35\x5F\xD2\x73\xCA\x87\x61\x35\x98\x6C\x17\x64\x2B\x2D\x51\x8E\x4E\xA1\xDC\x98\x58\xE0\xD0\xC0\xD7\x22\x87\xA4\x50\xEC\x31\x16\x42\x21\x27\x03\xD2\x01\x06\xCE\x90\xE5\xB3\xAB\xCC\xB1\xC2\xB7\xC2\x52\xA3\x95\xF5\x6F\x6A\x21\x3B\x4B\x38\x68\x7F\x01\x2C\x25\x69\x07\x74\x6A\xA1\x3A\xCD\xB5\x12\x21\xAF\xBE\x7C\x55\xDA\x5C\xD4\xBF\xDE\xBB\xFF\x62\x30\x80\xB4\xE5\xAC\xB1\x52\xE0\x9B\x07\xCC\x2C\x76\xD2\xB8\xF8\xD1\xC2\x9A\x6D\x7D\x25\x5A\x74\xB7\x95\x74\x86\x1E\x49\x3A\x19\x7A\x34\x7B\x9F\x66\x32\xF2\x69\x36\x26\x40\x0D\xA6\x21\x5A\x25\x3F\x3F\x91\x2F\x42\xB4\x4A\xD1\xB9\x2C\x9B\xF2\x7E\x71\x79\x35\x93\x63\x4A\xE1\xFD\x16\x72\x3E\x08\x96\xD8\x13\x89\xCB\x36\x39\xA6\xE8\x46\x5E\x42\xC1\x59\x4B\x8E\x99\x63\xC2\xCD\xC6\x5C\x86\x64\x49\x9C\x9B\x1C\x53\x7C\x8F\xC2\x7F\x62\xAF\x2F\x35\xE9\x09\x93\x8A\x5A\x69\xEA\x14\x94\xC7\x33\x13\x37\x91\xBD\x88\xCF\xFA\x6D\x8A\xD6\x69\xDC\xA2\xB8\xDD\xF4\x84\x79\x14\x52\xAF\xC0\x85\xA2\x27\x8A\xEB\xE8\xD3\x3F\xAB\xA0\xA0\xA0\x0A\x66\x48\xF6\x8A\x8B\xCF\xBD\x5E\x6E\xFF\x4B\x64\x32\xAE\x73\x32\x3F\x33\x12\x28\x55\x3A\xCE\x6B\xE7\xB7\x49\x8A\x18\x28\x1E\xE2\xCA\xE9\x14\x6D\xA0\x38\xE9\x08\x12\x3A\x7A\xAB\x3F\x26\x0B\x69\x2E\xD1\xFF\xB8\xE8\xDF\x3F\x5C\xF4\xB4\x13\x49\x2B\x75\x48\x85\xB4\xE3\x3B\x64\xDE\x83\x6E\x1B\x94\x29\x4F\xB6\x4B\x4E\xC5\x91\x8E\x19\x9F\x9A\xCB\xC4\xD0\x95\x8B\x36\x19\x85\x44\xE3\x7C\xE8\x82\xE8\x2E\xD7\x39\x23\x56\x43\x71\x3F\x9D\x4A\x2C\x15\xF8\x04\x39\x13\x82\x8F\xD4\x59\xB6\x03\xCB\x5C\x04\xAD\xB5\x4F\xD5\x22\xB3\x6B\x01\x66\xB3\x0E\x31\x8F\x12\x12\x4A\x79\x59\x85\x84\x84\x1A\xCE\xCB\x32\x50\x48\x48\x28\x57\xDC\xE7\xD9\xF5\xB6\xEB\x47\x78\x6F\x92\x64\xA2\xCB\x5F\x3E\xB8\xFC\x15\xB3\x97\xBF\x62\xB3\xC9\xBB\xCB\x5F\x3E\xBA\xFC\xE5\x38\x86\xA5\xF9\xCB\x5F\xCE\x97\xBF\xA2\x80\x02\xF2\xF1\x15\x2E\x5F\x78\xF9\xCB\x87\x97\xBF\x3C\xB4\xBE\xCB\x9B\x8D\x0C\x37\x0F\x97\xBF\x02\x2F\x7F\x45\xE0\xBA\x62\x74\xF9\x0B\x22\xF1\x51\x4A\x37\x0C\x42\x91\xBE\x35\x32\x67\x57\x5B\xBF\x5D\x77\xBE\xD2\x95\x8E\x99\x0A\x86\x97\xB5\x50\x08\x33\x05\xB9\x82\x7C\x32\x7B\xBB\x12\x49\x43\x49\xA9\xE4\x01\x2C\x82\xA4\xB3\x07\x90\x74\x8E\x78\xD3\x74\xDE\x0B\x0E\x04\x67\xAE\x84\x94\xB2\x2A\x51\x7A\xF0\x80\x52\xAF\x78\xEA\x2B\x6B\x34\x30\xAF\x98\xCB\x5F\x21\xC0\x03\x73\x4C\xEE\x88\xC9\xD9\xCF\xD0\x9F\x06\x05\x11\xC8\x09\xAC\xBD\xE8\x65\x29\x79\xA8\x39\x3E\x51\xD4\x31\x36\xE3\xB1\x1C\xA2\x8C\x98\x50\xD1\x89\xA7\xE7\xE6\xA6\xA7\x07\xD3\xD3\x34\x3D\x83\x72\x94\xE2\x97\x39\xCB\xAD\x93\xA2\xDD\x7E\x10\xF1\xE8\x35\x49\x22\x01\x7E\x16\xDF\xED\xA3\x60\xDA\x46\xF3\x68\xEA\x1C\x22\xFC\x28\x21\x0C\x3A\x54\x9D\xC5\xD3\x61\x93\x4D\x02\x29\x92\x6E\x50\xC7\xA5\xBF\x26\x42\x5A\xFC\x84\xD6\x36\xC8\xE9\x1D\xDD\x09\x6A\x7A\x08\x54\xF5\x47\x24\x2D\x14\x24\xA7\xD5\x49\x9B\x7A\x2E\x70\xC6\xEA\x74\x17\xFF\xD1\xD8\x8B\x48\xD1\x0D\x88\x98\x2D\xE2\xB5\x86\x52\xE2\xD7\x70\xE0\x10\xAF\x41\x0C\x76\xAD\xFA\x0A\xB6\x24\xB8\x59\xDE\x48\x4D\x12\x2E\x4C\x32\x17\x62\xA2\xEB\xB4\x00\x55\xFD\xBF\x1C\x35\x93\xF8\x1D\xDD\x0F\xF8\xA5\x9B\xED\x0B\xBE\xD9\xBE\x65\x78\xB3\xBD\x4A\x1F\x5E\x2A\xD2\xFC\x52\x55\xEC\x97\x08\xFE\x12\xC1\x5F\x2A\x43\xFE\x57\xA6\x0C\xF6\x4B\xF4\xFF\x8B\xA5\xFF\x4F\x8F\xA3\x3A\xE7\x1C\x89\x07\x89\xEA\x9C\x53\x11\x5E\x9C\xA8\x4E\xBB\x38\xAA\xF3\x63\x9F\xFB\xDA\xA3\x3A\x7F\x7B\x6E\xA4\xB0\x5B\x54\xA7\xDC\xE3\xAC\x38\x47\x64\x40\x1C\x36\xF3\xCE\x05\x65\xCC\xF6\x0D\x93\x59\x38\xA8\xD9\x30\x99\xFD\x56\x62\xB7\xF0\x25\xB7\xCB\x4A\x84\x50\xC5\xA1\x9A\xB8\x80\xF0\x91\x28\xAA\x0F\xD1\x81\x11\x8D\x48\x3F\x72\xED\x8E\x5E\x0B\x40\x88\x2E\xF4\x15\xDE\x15\xAE\x76\xA4\x45\xEF\xAF\xFB\xDA\x5D\x8A\xBC\x39\xAE\x51\xB6\x20\x26\x0B\xEC\x78\x24\xD8\xE9\x4B\xAC\xFC\xB5\xB0\xF2\x3F\x7D\x89\x95\x5F\x18\x2B\x97\x1D\x14\x51\x31\x88\x12\x7C\xC1\xCC\xFC\xEB\x99\x75\xDB\xFA\x4A\x11\x6E\xBD\xCF\xAB\xB1\x79\xD2\xB2\xF1\x5D\x31\xAE\xAC\xA4\x47\x90\xC5\x2F\xF3\x78\x8F\xD6\xF7\xAD\x35\xB9\x57\x0F\x4C\xF0\x92\xFC\xB4\x22\x2B\x10\xBB\xCE\xC4\x94\x87\x57\x67\x4A\xD1\xFA\x2E\x09\x2A\x14\x13\x67\xC1\x49\x1B\xC5\x75\xD2\xE2\xC4\xC0\x89\x5F\x42\x71\x9D\xB7\x8A\xE2\xC4\x85\xCE\xCC\x09\x96\xBB\x97\x6C\x8E\x90\x7B\x60\x25\x13\x3B\x58\x44\x2D\x14\x7D\xA6\x46\x0F\xEA\xB7\x41\xE5\xD3\x29\x2E\x8D\x7C\x62\xF4\x0F\xB1\xC8\xEB\xFA\x3C\x0D\xE0\xE4\x4C\xAF\xEA\x09\x94\x7C\xB6\x1A\x54\x2C\x29\x7C\x67\xC5\x9B\x4B\x78\x64\x4E\x21\xF6\xE6\x5C\x5D\xF5\x77\x77\x9E\x79\x7A\xB1\x5E\xC6\xB7\xD8\xC8\x0D\x6C\x26\x1A\xA5\x56\x07\xD0\xC2\xD7\x6C\x05\xB3\x89\x98\x21\x3B\x47\xB7\xCC\xB7\xA4\x3B\x79\xD9\x7D\x80\x12\x52\x5F\x5D\x86\xE9\x05\xD2\x7F\x69\x00\x90\xFA\x9D\x9B\x7A\xEB\xCC\x45\xA8\x2E\x92\xD5\x55\xC3\xB2\xE4\x8E\xA6\xA0\x2F\x7B\x10\xCC\x0C\x23\xC5\x8B\x48\xD3\xCD\x41\x53\xB0\x2C\x1B\xA9\x25\xCA\x7D\xE9\xFC\x44\x05\x8F\x3A\x64\xDE\xE2\xFF\xEE\x5B\xC3\xD9\xBE\xA9\xAD\x0F\xF5\xB3\x3B\xCC\x8B\x99\x5E\xAC\x57\xB8\xC6\xD3\x72\x20\x54\x07\x36\xF8\x35\x4F\xFA\x78\x7D\x24\xC0\x3D\xF4\x73\xA5\x42\xA3\xF5\x11\x02\x1C\xF2\xC7\x51\x53\x3B\xE2\x5F\xB3\x05\xA9\x7F\x5A\x6D\x35\xB7\x71\x21\x75\x58\xDE\xA3\xE5\xC0\xAE\xE6\x5C\x83\xFC\xB2\x42\x80\x47\x6F\x97\x1C\x62\x0D\xCB\x73\x14\xB4\x42\xC1\xE5\x03\x52\x30\x09\x39\x86\x13\x70\x13\x05\xB7\x4D\x1C\xC1\xED\x99\xDD\xD7\xE8\xF0\x2D\xAE\xD1\x91\x7A\x79\xB4\x4E\x05\xFB\x4F\x52\xA2\x07\x79\x67\x0E\x32\x7D\x72\xEA\xED\x48\x3E\x93\xA1\x30\x3C\xFA\xB2\x5B\xDB\x5B\x1A\x14\x41\x43\x5A\x7E\x9F\x18\xA2\x6B\x87\xB1\xFA\xDE\xD4\x32\x91\x5F\x11\x08\x14\xF6\xE6\x84\xD2\x8A\x47\x86\x3C\x6A\x82\x4A\x4B\x49\x7E\x10\x1D\x25\x11\x95\xAB\x67\xDE\x01\xBB\xC9\xFE\xFA\xB4\xAD\x35\x44\xF8\x4F\x34\xB6\xE0\x3D\xAF\x7A\x8B\xD8\x33\x2F\xD3\xB7\x05\x61\xF6\x8C\x9A\x0B\x4B\x38\xA6\x42\xDD\xD4\xD7\x79\xB5\xC1\x77\x47\x32\x7B\x3A\x2F\xA9\x23\x69\x23\xA2\xFF\x38\x23\xE0\xFA\x2F\x7E\xD5\xDE\x37\x91\xE4\x39\xB7\x3D\x90\x04\x4D\xF4\xDE\x6D\x14\x01\x6D\x13\xBF\xF7\x7F\xA0\x57\x28\x36\x3D\xC4\x76\xB6\xA0\x86\x01\xEA\xA6\x0F\xCE\x36\x7D\x70\x36\xF9\x06\xAB\x3E\xF6\x40\x22\x0F\x1C\xFF\xEC\x6F\xEA\xE0\x14\x41\x21\xE8\xD5\x09\x73\x0E\xF4\x69\x75\x92\x82\x07\xD4\x09\xB3\x0E\xF2\x64\x9D\x81\xF6\xEB\x2D\x31\x00\xC7\xF0\x67\x27\xCC\x9D\xB3\xBF\x56\xC3\x5F\xEF\xE2\x26\xAA\xD1\x43\xD5\x4C\x13\x2B\xDD\xAF\x8E\x7F\xED\x9B\x70\x5C\x8B\x4F\x0E\xF3\xEF\xF8\xFC\x6C\xF6\x02\x7E\xFB\x8F\x06\xDF\x72\xD5\x5E\x1C\xC6\x00\xB8\x8F\xDD\x23\x24\x28\x38\xF7\x60\xDD\xFC\xAD\x5A\x2A\xB4\x4E\x39\xC2\x1F\x0C\x85\xEF\x4E\xDB\xEA\xAD\xD7\x42\xED\x53\x7C\x6E\x9D\x97\xE9\x8F\x28\x08\x54\xAF\x9B\xBB\x28\xF2\x33\xE7\xC0\x5F\xE4\xE5\x82\xD7\x4F\xB2\xB6\xA2\x0B\x4D\xD9\xD6\x13\x96\xF9\x2B\xCC\x7B\x13\xBA\x0D\xA1\xA2\x66\x4F\x81\xF1\xC7\x07\x37\x24\xAA\x1B\x4B\x27\xE6\x0A\x17\xFB\x9B\xD0\xCF\x5E\xD5\x4B\x1C\x25\xE9\x20\xE1\x4C\x0F\xA6\xA2\xD7\xF5\xD4\x9E\x82\x04\x28\x26\x74\xDA\xC7\x96\x56\x1C\x5B\xDA\x55\xA4\x0D\x9A\xC9\xC3\x60\x9B\x65\x7A\xEC\x50\xE7\x3C\xA2\x00\x44\x6D\xAD\xB1\x11\x43\x0F\x70\x58\xE1\x57\x3F\x37\x2A\x6B\x6D\x61\x99\x05\xC2\xF2\x79\xAA\xD2\xBD\xEC\xAF\x2C\x58\x01\xBD\x6E\xA0\x71\x9E\x48\x7A\xB4\xB1\x6B\x6C\x66\x1D\x2C\x08\x3D\x51\x93\xA9\xFC\xA8\x28\xA7\x7C\x9D\x0D\x11\x99\x2B\xBD\x99\x95\x9E\xAA\x58\xD7\xE9\x28\x6A\x38\x2C\xDE\x94\x59\xC1\xBC\x31\x80\x53\x31\x04\x56\x60\x58\xA8\x18\xEF\x38\xB6\xBE\x71\x12\x6B\xBF\xCD\xF8\x6D\x02\x4A\x9B\x43\x7C\xAE\x6B\x4A\xC6\x61\xBA\x7A\x8E\x77\x35\x87\x25\x58\xF4\xEE\x86\xD8\x4A\x17\xB3\xD3\xA0\x47\xC8\x09\x71\x37\x1B\x63\x42\x81\xDD\x3B\x05\x1D\x87\xD4\x80\x75\x1C\xFF\xE1\x6E\x52\x14\x31\x54\xCC\x72\x64\x01\x29\x1C\xA2\x3C\x3B\x72\x2F\xDD\xB4\x5B\x2D\xE4\x07\xE4\x4C\xC1\x36\x64\xFE\x13\x66\x3B\x34\x8C\xCE\xFB\xE0\xE7\x87\x7E\x1E\x1C\xAA\xAD\x73\x30\xBE\xBA\x04\xD5\xC5\xC6\xF8\xA7\xD5\x45\x30\xB0\xE4\xF5\xD6\x5A\xBD\x02\x87\x90\x13\x8F\xC0\xA1\x63\xCA\x89\xD3\x65\xD7\xC6\xC4\x26\x76\x84\x62\x0C\xB9\x90\x24\xFD\xE7\xBE\x35\x0A\x17\xEA\x0B\x1B\x1E\x26\xF7\xA8\xA3\x68\x5A\xC8\xC0\x74\xC9\x6E\xB7\x21\xCF\x1F\x53\x0A\x0E\x9F\x9D\xA8\x02\x6E\x23\xA8\x68\x72\x0C\xC3\x61\x39\xC7\x29\xDA\xA7\xB1\xFE\xF8\x96\x3F\xCE\xFE\xC5\x15\x71\x1C\x59\x58\x39\x4B\x09\xAD\xA6\xA4\xC8\xD9\x3D\xC6\x89\x63\x34\x03\x0C\xA4\x43\x7D\x2C\x86\xC2\xDF\x90\x3B\x3E\x87\x33\xF8\x9C\xBA\x7F\xAD\xB1\xFE\x91\x96\x55\xBB\x41\x2B\xBB\x3D\xB6\x07\xAD\x2D\x91\xA6\x71\xFE\xBF\x28\xC0\xFF\x9C\x5F\x6B\xDC\x7D\x94\x60\xFB\x48\x5B\x1F\xA1\xCD\xFF\xA2\x51\xEC\x20\x34\xD3\x42\x33\x66\xFA\xB2\xE8\xC0\x9D\xE0\x08\x4B\x9B\x43\x1C\x54\x3C\x89\x66\xF6\xBB\xFB\x46\xD8\xEF\xF9\xE2\xFD\xEE\xC2\x34\x4A\xD6\x11\x96\x7C\x97\x3F\x5D\x26\x05\x4C\xA0\x62\x80\x81\xA5\x6E\x76\x66\x66\x76\xFA\x1B\x61\x76\x49\x41\x63\x5C\x34\x43\x1D\xA4\xB2\x68\x9E\x74\x44\x2C\x9F\x97\x4F\xFB\x8F\xDB\xAC\xCD\x1D\x8B\xA3\x71\x9B\x0E\x2A\xD1\x75\x22\x6C\xA5\xAF\x11\xDE\x8F\xDB\xF4\xE3\xB6\x61\xDC\x69\xB7\x2A\xDD\xE9\x7A\x48\x2C\x7A\x09\x47\xC4\x37\x0E\x97\xC0\x61\xE3\x28\xD1\xA6\xB8\x58\x01\xBA\x04\x42\xF4\x82\x51\xDA\x14\xF8\xA6\x6B\xAC\xCF\xEE\x9B\x68\x0F\x60\x51\x3E\x31\x1E\x2A\xFE\x40\x1F\xF8\x24\x22\xD1\x6E\x46\xF2\xFA\x68\xE3\x44\x5E\x43\x43\x55\xEC\xE7\xE5\x35\x3D\xA2\x79\xEA\x74\x31\x5D\xB0\x48\xB6\x9B\x6C\x5F\x25\xBD\x5F\xA4\x78\x9E\x05\x0F\x8D\x6A\x80\x4F\x39\x0E\x18\x0A\x4F\xC1\x30\x78\x48\xD0\x19\xD0\x55\x33\xE6\x1A\xA5\x8F\x40\x8E\x9A\x81\x79\xEF\x93\x27\xED\x23\x60\x86\x69\x83\xAF\x23\x41\x60\x4F\x98\xD7\x85\x6A\xFB\xAE\xB7\x7B\xBE\x8E\x0C\x20\xFE\xD3\x4A\xD4\x21\x11\xFA\x26\x80\x44\x69\x5E\x4B\x9C\x42\xF0\xCB\xFE\xB3\x31\x0D\x3A\xF7\x27\x52\xA1\x67\x6C\xBC\xBC\xFF\xF0\x0C\xB5\x08\x7E\x11\xB5\x8C\xA3\x73\x39\x76\x48\x33\x42\x2C\x51\x27\xCC\xCA\xEC\xAF\xA2\x27\xED\x32\x80\xF5\xC1\x00\xEE\xF6\x72\x8E\x2E\x1C\xC0\x5D\xDD\x00\xEE\x5A\x34\x80\x3B\xC3\x00\xEE\x5C\x3C\x80\x53\xAC\x51\x53\x68\xE8\xDF\xEA\xDF\x0C\x95\x6A\x93\xAE\x29\xFF\x4C\xA7\xC8\x9B\xDE\x74\xFC\x0B\x4E\xC7\xDB\x62\x17\x63\x74\x20\x4E\xC8\x66\x3B\x7E\xB4\x09\x86\x52\xAD\x6B\x47\x56\x6D\x43\x70\xFD\x11\xE8\xCD\x26\x05\xF3\x90\x9C\x32\x06\xD2\xCD\x26\xE1\xC7\x38\x3F\x3B\x69\xE9\x85\xCC\x9E\xE2\xAA\xC2\x02\xD0\x42\x98\xCC\x8C\x11\x24\x4C\x22\x29\x90\x29\xF7\x0E\xAE\x8E\x19\xF1\x95\x00\xCE\xDB\x4D\x8A\x01\x70\x60\x36\x9B\x0C\xA2\x87\x38\x6D\x98\xCE\x8B\x4C\xDA\x24\x91\xE8\xA8\x69\x7C\xE4\xFC\x4C\xB3\x94\x59\x09\xE9\x26\xE8\xAE\xC0\x09\x27\xB1\xD9\xFB\xF9\x16\xD6\x06\xBB\x8D\xBD\x7F\xAD\x26\x24\x1F\x6C\x5F\x83\x6B\xBB\x61\x33\x36\x60\xC3\xC8\xE6\x4C\x22\x31\x4F\x65\x9B\x60\xDA\x26\xF1\xDB\x0F\xD0\x02\x64\x9B\x92\x0D\x9A\xE0\x00\x37\x1B\x47\x86\x1C\xC8\x20\x39\xBF\x46\x11\x32\x49\xA8\xD0\x71\x7E\x8D\xB0\xE7\xC4\x70\x96\x85\x3B\x00\x4E\x3B\xA3\x22\x43\x10\xE1\x74\x4C\x5B\x27\x90\xC9\x8C\xED\x78\xC6\x8C\x39\x48\x60\xEB\x92\x52\xDC\xB8\xCD\xC6\x6C\xB2\xE3\x50\x52\x97\x91\x74\xE1\xB2\x28\xAE\x19\x5C\x3A\xF9\x39\x41\x1A\xF3\xCF\xD4\x95\xF8\x26\x52\xF6\x46\xC8\xF6\x54\x33\xDB\x93\xC1\xAE\x54\xF5\xEE\xAB\xC4\x3E\x77\x76\xC9\x66\x8C\x52\x51\x7D\xC7\x35\x4E\x38\xD4\xEB\xC6\xC9\x87\x90\x86\x56\xBC\xD9\x70\x2E\xF3\xB6\x57\xD5\x77\xCA\x6D\x03\x34\x9E\x56\x2B\x04\x49\xEB\x57\x58\x9D\xE4\xD0\x2C\xB5\x6E\x56\xAB\x9D\x6F\xEF\x6A\xA0\xAF\x9B\x95\x86\xB0\x84\xB6\x83\x18\x35\xEB\xA6\xC4\xAD\x6E\x40\x57\x5F\xA0\x6B\xF9\xAA\x38\x78\xAC\x67\x7B\x5E\xF5\x59\xA9\x10\x4D\x17\x6E\x12\xB1\xF2\x52\xC3\x19\x6D\x2F\xBF\x5F\x8C\xBB\xD0\x36\xB6\xBA\x79\x8D\x03\x47\xE6\xDA\x33\xD2\x9E\xD7\x17\xBB\x26\x0D\x92\xA7\x2A\xC2\x14\x8E\xDC\xCF\xA1\x3A\xD5\x77\xE2\x90\xEF\x51\x5A\x82\x71\x2A\xF6\xF6\x68\xBA\xC5\x14\x1F\x37\x3A\xDA\xF6\x2A\xA0\x26\x55\x2C\xCD\x1D\xC5\x70\xD0\x5F\xA8\x7C\xC5\xFE\x49\x46\x9A\x62\xE8\x03\xC2\xFF\x1C\x7F\xF5\x6A\x6D\xC0\xBD\x5A\x9B\xC7\x09\x12\x91\x70\xFE\x08\x3A\x88\xAA\x9D\x78\x87\x7C\x12\xF9\x15\xB9\xF5\x45\x48\x23\x0B\x11\xDF\x47\x23\xB0\xD5\xEF\xD1\xBA\x38\xA9\xC5\xED\xE8\x77\x49\xFC\xA4\x66\xD8\xF8\x61\x43\x2C\x90\xF5\x8A\x1C\x5D\x11\x97\x29\x27\xDC\x34\x69\x43\x0C\x18\x31\xEE\x10\x72\x02\x57\xAC\x61\x51\xD8\x13\x1B\xF1\x78\x51\x45\x72\x85\x02\x07\x24\x51\x59\x56\xC8\x70\x0A\xA1\x8A\xE6\x58\x48\x47\x14\x03\xE7\x2D\x31\x3D\x98\xBF\xA1\x0D\x41\xA8\x51\x62\x34\x5E\xAB\xBF\xC0\xA1\x25\xB6\xF8\x3E\xF6\x3E\x7A\xB5\x6E\xBE\xF4\xEE\x37\xEF\xA8\x46\xF3\xC0\xDF\xF3\xC7\x9C\x2A\xAF\x7D\x79\x81\x3F\xB4\xE1\xA2\x08\xF8\xAD\xBE\xDC\x38\x5F\x5E\x68\x22\x4E\x9F\x6C\x12\x50\xF7\x11\x92\xAA\x81\xA8\xA5\xD8\xB5\x07\x27\x74\xDF\x31\x2C\x01\x22\x5F\x5E\x68\x21\x5D\x23\x4D\x52\x43\xB4\x09\xE9\x5A\x43\x79\xE4\x8A\x6E\x45\x64\x5F\xC0\x93\x97\xAE\xA4\x09\xE5\x33\x01\x36\xDA\x43\xF6\x05\x24\x84\x94\x72\xAE\x96\x6C\xA4\x2D\x85\x2A\xA2\x58\x7E\xA2\x5D\x32\xFC\xF1\xFF\x63\xEF\xFD\x83\x2C\x3B\xAE\xF3\xB0\xEE\xBE\x7D\x7F\xBD\x7B\xDF\xCC\x5D\x70\x48\xAF\x35\x5B\x56\xDF\x1B\x24\x9E\xAD\xE2\x16\xF0\x87\x6A\x81\x00\xB4\x8D\x46\x71\x66\x76\x01\x0A\x40\xA5\x58\x15\xA5\xC2\x54\xF8\x87\xFE\x60\xEE\x80\x36\x67\xB1\x8B\x82\x6D\x18\xB3\x34\x17\x32\x24\x59\x0A\x4D\x51\x91\x7F\x50\x16\x2C\xD1\x06\x8D\x98\x8E\x64\xD1\x2E\x26\xB1\x42\xC8\x86\xCB\x96\xA3\x58\xAA\x0A\x29\xD9\x96\x1D\x4B\xB2\x1D\xD3\x09\x29\x42\x8E\x64\x49\xB4\xC9\x4D\xF5\xF7\x9D\xEE\x7B\xDF\x9B\x37\xCB\xA5\x49\x4B\x2E\x97\x16\x85\xDD\x99\xF7\xFA\xF6\xED\x1F\xA7\x4F\x9F\x3E\x7D\xCE\xF7\x39\x35\xA2\xA3\xE5\x51\x98\x5E\xBA\x6B\x05\xBB\xD8\x6B\x7C\xE5\x4F\xE6\x25\x78\xBB\xEA\xCD\x1F\xF0\x92\x93\x9C\x0A\x01\xCF\xA8\xF9\xB2\xC6\xF5\x40\x8A\xAC\x1C\xE4\x88\xE0\x74\xF7\x1D\x5C\xA1\xC1\x56\x9E\xD6\x4E\xB0\x13\xBE\x7C\x5B\x1D\x8B\xA8\xDB\x69\x19\x74\xFF\x96\x47\x31\x2C\xA7\x1D\xA7\x79\x72\x82\xC6\x59\x44\xA0\x43\x7F\xB3\x0B\x4D\x72\x19\x0C\x0E\x67\xBA\x2F\xDD\xE2\xD1\xCF\xF0\x6D\xE1\xB8\x78\xB8\x0C\x27\x1B\xDE\x2E\xE0\x41\x13\x9E\xDA\x9F\x5A\x01\xD3\x43\x5E\x4F\xC4\xAC\xF0\x01\x94\xC6\x52\x9F\x5A\x96\xB1\x64\x98\x56\x64\x5A\x04\x75\xF5\x12\x5F\xEB\x75\xF3\x61\xAD\xE9\x5A\x52\x7D\x7A\x56\x70\x5C\x5E\xEA\x8E\x3D\x59\x29\x9C\xF1\xAD\xD3\xAC\x49\x9E\x15\xCA\xB5\x1D\xA7\xE7\xC3\x31\xBD\x3E\xE3\x70\xE0\xBD\x19\xBC\x39\x5E\xF9\x5A\xFA\x0B\x9C\x35\xDF\x50\x8D\xC9\x47\x48\x45\x97\xAA\x9B\xCF\x04\x6D\x38\xA5\xF3\xD8\xB4\xFC\x73\xFF\x96\xAB\x72\xC5\xD4\x0E\xCA\xD7\xBC\xFD\x01\x4A\x99\xB1\x09\x1A\x18\x07\xDA\x67\xC2\x9B\x77\x85\x30\x40\x52\xE9\x75\x30\x1C\xA1\x1D\x0E\xA2\x47\x3F\xF7\x7F\x70\x14\xA0\x45\x7C\x81\x84\x72\x5F\x0A\x86\x5A\xEC\x0E\xF6\x5C\xE9\x64\x30\xDD\xF5\xC3\xBA\x20\x96\x41\x17\x14\x6F\x8B\x2C\x97\x19\xCC\x03\xB3\x59\x4D\xD2\xDF\x46\x96\xBA\x0E\x8D\xB7\x70\x28\x42\xE1\x4A\x2A\x90\xD3\xDD\x1F\x7D\x11\xB1\xA1\x1D\x90\xFE\x60\x62\x4E\x5B\x46\x64\xCA\xE7\x83\xA1\x99\x85\xD7\x62\xB8\xAA\x5E\x0B\xE2\x5C\xD0\x21\x50\xB0\x40\x9D\x40\xAE\x90\x75\x26\x68\xDB\xD0\xEF\xA0\x0D\x1E\xD2\xFA\x24\xEA\xE4\xC5\x61\x44\x8C\x8C\x76\x62\xDC\x64\x78\xB1\x4E\x90\x45\x67\xDF\x0B\xBF\x46\xF3\x4D\x7C\x34\xC7\xE7\x82\x6B\x3B\x14\xA0\x7F\xCB\xAE\xEC\x62\x4C\x10\xDC\x92\x87\xBD\x37\x27\x30\x79\x63\x95\x57\xCD\x0F\x58\x6D\x67\xF0\x75\xF9\x1A\xB2\x5A\x02\x26\xD3\x57\xE8\x14\xF9\x6C\x49\x47\xC2\xBF\x2C\xAF\xA4\x99\x36\x7E\x21\x17\x7B\x71\xA6\x13\x44\x0A\xE7\x80\x38\x11\xA1\x5F\x33\xF9\x30\x94\x8F\xF5\xA7\x28\xED\x65\x9A\xDB\xC2\x2F\xE0\x46\x7E\x26\x18\x27\xBB\x32\xC9\xA5\x2B\x78\x5D\x53\x84\x65\xB3\x13\xB4\x78\x98\xF2\x42\x04\x72\x65\xCA\x29\x9E\x9C\x72\x9B\xA6\x3C\x6A\x77\xC0\x3A\x17\x61\xE6\x10\x86\x41\xF5\x0E\x36\xC7\x34\xE5\x55\x98\xF2\x6A\x65\xCA\xA3\x8D\xCA\x07\x5D\x91\xA6\xBC\x0A\x8D\xAF\x1A\x57\xD1\x07\x58\xCA\x94\x07\x93\xA4\x60\x7B\xED\x43\x0A\x61\xF4\x04\x63\xE2\x60\x9A\x2B\xBC\x71\x0D\x13\x74\xAF\xA0\x8C\xE4\xFE\x82\x84\x3B\x87\xFD\x50\x7E\x0A\xFF\xB4\x2E\xF7\xAF\xDD\xFA\x3D\x97\xCD\x9E\xCB\xFD\x3F\xFB\x45\xE6\x60\x61\xF2\xAB\xD1\x95\xFE\xB3\xA5\xD7\xDD\x87\x6E\x11\xEB\x6F\xE3\xF4\xA0\x64\xF7\x3D\x58\xCD\xD8\xC8\xA6\x5F\x01\x34\xE6\xF2\x09\x7A\xCF\x36\x1F\x59\xE8\x62\x96\xA3\x60\x53\xFE\xBD\xF2\xE7\x79\xE3\x29\xC1\xEF\x34\xBF\x0B\x91\x31\x78\x48\x83\x89\xF9\xAA\xFE\xFD\xC0\xE6\xC6\x39\xAD\x74\x95\x7F\x55\x3F\x7D\xE4\xCF\x3F\x3D\x82\x39\x6B\x7F\xA2\x60\x83\xBE\x99\x3D\x4B\x1F\x78\x15\xB6\x22\xEC\x9C\xE7\xE3\xCD\x6D\xE5\x8A\xA3\xA1\xF0\xE6\x46\xA8\x8A\x11\xE6\xC5\x6E\xD8\x50\x3E\x0D\x8B\xBF\xF2\xCF\x8F\x6B\x00\x12\x2D\xC2\x4E\x90\xF6\x7F\x1E\x80\x52\x72\x5B\xA1\xE9\x86\xC8\x9D\x0A\xCB\xA3\x46\xA7\x04\x96\x1C\xF8\x25\xD8\x5B\x37\xBC\x4B\x89\xF0\x14\xCC\x63\xFE\xCB\xB7\xA4\xF9\x3B\x78\xC3\x48\x9E\x97\xB6\xA7\x13\x1B\x48\xBF\x91\x50\xA5\x74\x05\x46\x24\x8F\x23\x12\xC6\x2B\x8E\x08\x9A\x46\x01\xCC\x19\x61\x96\xFB\x93\x60\xCF\xE6\x33\x64\xAF\xF3\xD8\xD0\x2F\x9B\xF3\x0C\x73\xAE\x9C\x0C\x17\x46\x0E\x75\x97\xC9\xB5\xED\xCA\x54\xF7\x80\xB0\x9E\x22\x98\x54\x60\x2E\x0B\x02\xB3\x67\xAC\x3F\xE1\x16\xA2\xC2\xA9\x3C\x58\xE6\x97\x8D\xC3\xB2\x57\x8F\x71\x3F\x7E\x48\x5D\x00\x8F\x43\x1E\x5E\x25\x68\xC0\xE7\x20\x02\x0A\xB0\xA1\x61\xC5\x3C\xA4\xB6\x43\x2B\x62\x29\xEB\xB7\x23\x5A\x4F\x75\xEE\xDB\x51\x74\x30\xB2\xFA\xB6\x97\x11\xEA\x21\xF5\xD4\xEF\x8C\xB2\x0D\x94\xF7\xE9\x62\xA8\x04\x8A\xD2\xB8\xDC\x77\x63\x02\x77\x42\x76\x92\x44\xE8\x8B\x5F\x93\xC5\x83\x66\x89\x17\xBD\xB8\xE9\xEA\x35\x93\x93\x0E\x66\x1A\xB8\xA0\xB3\xB7\x7A\x22\x39\x24\xE2\x41\x63\x49\xFD\x1C\xEF\xE1\x09\x85\x86\x33\x0A\x91\xB4\x70\x79\x3F\x79\x1F\x0C\x87\x2B\x0F\x73\xCD\x2F\x05\x96\x28\x62\x5B\x96\xFC\xAE\x88\x90\x6C\x88\x8E\x5A\x49\xA2\xA6\x10\xB6\x59\x13\x4A\xB6\xA0\xA3\x08\xD2\xC2\xE5\x53\x1E\x0D\xF9\x8A\xB4\xE5\xC8\x9D\x38\x2F\xCB\x29\x3F\x43\xE0\xF2\x28\x69\x61\x0E\x56\x97\x5C\x3E\x5B\x72\x43\x92\xAE\x62\x45\xBA\xCC\x69\xE9\x42\xDC\x7F\x9C\xD1\xD0\x50\xF6\xCB\x48\x7F\xDB\x73\xDF\x21\x32\x80\x69\xF0\x3C\x6B\xCB\xB2\x90\x48\xC8\x88\xAA\x33\x2D\xEC\xD9\x22\xC6\x12\x09\x87\xEF\x95\xDE\x96\xBB\x51\x9C\xE3\x6A\x2E\xEE\x62\x35\xEF\x2F\x75\x8A\x59\x9D\x2F\x07\x2A\x9F\x62\x52\x3E\xC5\x34\x12\x65\x50\x3F\xDC\x97\x80\x23\x0E\x84\x9F\x82\x32\xD4\x8D\x41\xE9\x14\x1C\x96\x92\x5F\x6B\x84\xA8\xF2\x80\x30\x43\x25\xFC\xB1\x0C\x06\x9A\xBF\xF5\x4F\x29\x99\x12\x97\x73\x3E\x22\xA5\x44\x85\x02\xC1\xCA\x66\x89\xCB\x3A\x34\x50\xCB\xB4\x5A\x34\x34\x5D\x40\x84\x33\xF2\x5C\x4B\x9A\x19\x3F\x4A\x1A\x4C\x79\x96\x21\x23\x15\x81\x97\xF4\x6C\x80\x0D\x03\x0A\xE7\xA3\x9B\xED\x3A\x1B\xC7\xD5\x6C\x1E\x57\x2E\x54\x8C\x6C\x58\x08\x74\x4A\x20\xDD\x8A\x99\x2E\xA2\x14\x09\xC0\x63\xCF\x02\xEC\xA9\x04\xB0\xA7\x8A\x4D\x89\xE5\x05\xB0\xE7\x33\x8A\xA7\x36\x46\xB5\x44\xC0\x9E\x57\x35\x7B\x11\x0C\x52\xFA\x3A\xCC\x29\xC0\x1E\xB3\x06\xD8\xD3\x72\x1C\xAA\xC1\x4C\x80\x3D\x66\x3D\x81\xB9\x8D\x2E\x84\x75\xC0\x9E\xE6\xE7\xB5\x2E\x4E\x66\x46\xF9\x5B\xE6\x46\x79\x26\xA9\x3D\xF0\xA1\x20\xC5\x2C\x6C\xEA\x3B\x29\x23\x19\x72\xEE\xDB\xE9\x1E\x14\x6F\xF0\xEF\x7B\x92\xE2\x88\x0D\xBE\x01\xA4\xB0\xEF\x46\xFF\xBE\xE3\x18\xC4\xD9\x97\x3C\x23\x5A\x67\x83\xE1\x72\x0D\xC1\xC7\x41\x51\xD0\x02\x09\x66\xC1\xF5\xE4\x2F\x29\xC3\x26\x85\x0C\xC0\xDC\xE9\xD9\xF5\xB9\x16\x33\x01\xB0\x72\x3B\x18\xB6\xB9\xED\x6F\xBB\xEF\xA0\x9B\xE3\xA7\xC2\xB1\x7D\xCD\x73\x11\xCD\x32\x8E\xC6\xCC\x2C\x4B\x67\x18\x33\x1B\x87\x78\x1C\x71\xCA\xE7\xDD\xFF\x70\x2B\x65\xAD\xC7\xE7\x31\xC3\xFB\x92\xFE\xE4\x7F\x07\x0D\x57\x83\x10\x2B\x9C\x9C\x41\x22\x6B\x12\x02\x76\x38\x25\xB4\xE0\x9E\xF4\xC8\x5A\xCD\xFD\x1F\x19\xB7\xEE\xD1\xD9\xDA\x1F\x9D\x35\x2C\xC1\x74\x0C\x98\xDE\xE2\xDA\xA0\x27\xCB\xE2\x39\xA5\xD7\xFE\xD0\x7B\x91\xD1\x02\x8C\x4F\xE2\x70\xD5\xBA\x6C\xAA\x40\x72\xDF\x57\x0B\xF0\x94\x9E\x06\xE1\x77\x9C\x1E\x04\xAF\x9B\xFF\xD3\x32\x41\xDD\x88\x7B\xC1\xAE\x02\xC8\x74\x94\x8A\xF4\x1D\x9D\x20\x05\x9C\x2C\xB4\xF0\xFA\x32\x03\xA1\x5B\x0B\x2C\xF6\x2A\xFA\x2E\x88\xCA\x15\x94\xA9\xD8\x8F\x8C\x27\xE6\x18\xF6\x25\xC6\x91\xF5\x04\x55\xD2\x44\xD8\x7F\xEA\xDA\xA0\x9F\x81\x29\x6E\x19\x30\x70\x75\x69\xDA\xBC\x89\x4E\xBB\xC7\xF1\x85\xB9\x8E\x63\x46\xBC\xD4\xA7\xB5\x9B\x50\xE2\xEA\xD8\xD4\x8C\xFE\xA0\x9A\x74\x89\x16\x78\x91\xD8\xA8\x8C\x44\x42\xDD\xD6\xC7\x41\x7F\xD1\x61\x53\x25\x3E\x39\x97\x87\xED\xB8\xE4\x4E\xD0\x04\x95\x07\xF2\xC2\x92\xD2\x1B\x44\xB9\x70\x0B\xE6\x39\xF6\x4C\x8E\x33\xD7\xC7\xF0\x92\x30\x1C\x19\xA8\x1A\xBA\x5F\xBC\x45\x2D\x3A\x51\x7D\x3C\x4F\x40\xF7\x13\x58\xD9\x88\xED\x48\x74\xB7\x9C\x4C\x1A\xB0\x4A\x70\x0C\x68\xDB\xE7\x73\xCF\x4D\xDA\xF2\xB2\xC9\xA2\xCF\x82\x45\x9F\xAD\x58\xF4\xC9\xFD\x5C\x47\xC8\xFC\x88\xEE\xD2\x88\x6B\xA4\xCF\x49\x7B\x90\x71\x0E\x98\x47\x0C\xBA\xDD\x9A\x67\x4D\xE5\x75\xF7\xB9\x5B\x3C\x5E\xD3\xEC\x2F\xE9\x6E\x68\x43\xFF\x1E\x52\x9A\xBA\xAD\x73\xB5\xBC\x6B\xF2\x65\xFF\x0D\xA3\xB3\x13\xA7\xBA\xEF\x8D\xA7\x6D\xAE\x2F\x48\x71\x12\x7C\xA7\x7C\x11\x17\x60\x82\xD2\xD8\x27\x16\x64\x3B\x5F\xCC\x07\xE9\x8C\xA5\x7D\x29\xC8\x8D\x17\x8D\x4D\x67\x70\x17\x2B\x56\x4E\xDF\xA7\xCD\x0C\x65\xA1\xA3\x55\xA4\x7D\x13\x74\x8B\xE6\xA9\xCA\xC8\xF0\xD2\x6E\x3A\x75\x68\x16\x3E\x8A\x6A\x64\x58\x6B\x3A\x34\xDF\xD5\x78\xCB\x83\xA1\x1D\x72\x82\xCA\x24\x8A\x34\x8B\xF1\x24\x93\x5F\x92\x30\xB5\x1C\x0B\xD3\xD3\xB7\x32\x2D\xCC\xD7\x35\x00\xAB\xEF\x7E\x0C\xA5\x00\x36\x69\x84\x6A\xCC\x28\x31\xDA\xA5\x04\x70\x08\x34\x65\x97\xBC\xBD\x3A\x79\x7B\x15\xEE\xAA\xD8\x5D\xED\xD4\xD4\xDD\xB0\xE7\xC7\xE8\xCB\xE8\x56\x8E\xDD\xDD\x4E\x33\x53\xAC\xCD\x0C\x36\xDC\xE6\xC7\xB4\xF8\x56\x24\xD2\xD1\xBF\x30\x6E\x59\x43\xBD\xA6\x92\xA5\x85\x2D\x8F\x81\x8C\x88\xA7\x03\x49\x80\xF4\x50\x47\x24\xB5\x50\x3C\x18\x15\x87\x31\x24\xD2\xF0\x3B\xEB\x54\x1C\x44\x48\x03\x1D\xF2\x61\xA6\x35\x76\x57\x30\x6D\x64\xC7\x73\x38\x80\x6D\xBC\xDC\xF4\x3A\x3E\x3A\xDB\x29\xC4\xF3\x44\x87\xDD\x9F\x9B\xB9\x13\xBA\x04\xD4\xEE\xD3\x7E\x9F\xC5\x7E\x0D\xD6\xE7\xDC\x05\xFE\xC8\xB8\x65\x4C\xC6\x30\x5E\x9F\x3D\x3E\x61\x2A\x3B\x51\x05\x7A\xE4\xA5\x5A\x45\x8D\x88\x51\x80\x33\xD9\x02\xF1\x51\xA8\xA0\x08\x50\xDE\xD2\x60\x31\xC0\xE9\xC4\xC4\xA5\x0C\x8E\x6E\x60\x68\x2F\x0C\x34\xCE\xA0\x5C\x0F\xBC\x34\x03\xFD\xEC\xE4\xE9\x56\x88\xC0\x4F\xC6\x08\x97\xB8\x13\xEF\x5A\xE3\x59\x41\x4F\x17\x0C\x2F\xCD\x30\x84\x3B\xBE\x78\xC7\x29\x78\x80\xE9\x03\xC4\xED\x05\x8D\x88\x28\x3D\x4E\x49\x90\xB4\x9D\x46\x3A\xDD\x4F\xCB\x10\x04\xFD\xA7\xC5\x02\x1A\xE0\x5F\xC2\xAD\x46\x5A\x2C\xEC\x74\x03\x66\xE5\xC4\x67\x30\xE4\x90\x09\x68\xC2\x24\xF5\xD1\x38\x95\xC7\x87\x7C\x2B\x53\xB8\x8A\xB0\x1C\x2C\x38\x3E\x2A\x01\x85\xB0\x38\x12\xAB\xCB\x82\xB7\xF4\xFF\xA1\x8E\x99\xC2\x8D\x8E\xA4\x20\x33\x0A\x87\x67\x97\xAF\xC9\x4C\x50\x09\xA1\xF1\x2E\x93\x90\xD2\x84\xBA\xF4\xFD\x95\x5E\xCE\xF0\x81\x32\xB1\xA5\x33\x58\xAF\x2E\x0B\xB3\x7E\xC1\x65\x5C\x74\x19\x32\x6A\x5D\xE6\xAB\xA0\x3F\x2E\x84\xBF\xCE\x8F\xDD\x47\x42\x3B\x08\x9F\xDB\x0E\x8A\x4C\x56\xCA\x3F\x8D\x63\x90\x7F\x0F\x8E\xDF\xFE\x5B\x47\x5C\xF8\x29\xFF\x3E\xC9\xC2\xAF\x9D\xF2\xD7\xC7\x01\xF9\xD6\xFA\x20\x9D\x03\xE1\x29\x83\x12\x3F\xF2\xF6\xC6\x15\x39\x4D\x7A\xFD\x04\xE2\x56\x90\x5E\x5F\xBB\xC6\xDB\xEB\xE3\x00\x55\xD5\x60\x89\x60\xEE\xA1\xAB\x70\x13\x2D\x28\xE7\xDC\xF5\x28\x47\x15\xE1\x09\x6C\x82\xBB\xF5\x9D\x38\x71\x32\xA1\x79\x68\x5B\xDB\xB0\x67\xDA\x35\xEC\x13\x19\x39\xC2\x72\xD1\x89\xF6\x75\x80\x3C\x5F\x1F\xDA\x71\x58\x92\x14\x77\x2B\x22\x5B\x23\xB2\xAC\x71\xDB\xA3\x5B\x4A\x92\xCD\x36\x03\x91\x75\x58\x33\x7A\x7A\x5C\x8F\x03\x4E\xA3\xFD\xD2\x35\xAE\x0D\x06\x74\x3B\x4A\x5D\x57\xE9\x0A\x89\x0E\x01\x15\xCC\x71\x91\x0C\xB0\x00\x72\x0D\xBA\xAD\xF0\x73\xEB\x4C\xF7\x4B\xB7\x04\xC9\x28\xF3\xC6\x99\xEE\xF3\x58\xFA\x0D\x99\x96\x74\x02\x31\x88\x2D\xA8\xE1\x35\x10\xC6\x83\x46\x0A\xC4\x5A\xD7\xCB\xF0\x98\x8E\xE8\xA8\xDA\x77\xE3\x50\x23\x39\x9E\x07\xEE\xE5\x13\xB1\xCF\x1C\x0E\x06\xD5\x01\x01\xC5\x35\xA3\x6B\xDD\xF6\x28\xB5\x36\x61\x08\x9A\xB5\x91\xE0\x10\xB8\xE5\x15\xB9\x65\xC5\x0F\x74\xD6\xD5\x0D\x9D\x88\xCA\x2D\x5C\x8D\xED\x3E\xC7\x40\x0C\x72\x5E\x5F\xB9\x00\x51\x5B\x36\x53\xDA\x58\xE9\x70\x01\x07\x80\xAB\xBA\xEF\x97\xD9\x9B\x7D\x6C\x5C\xD5\xFD\xD9\x5B\x82\x30\x55\x50\x32\x28\xB0\x80\x6D\x0E\x47\xA8\x0B\x07\xC4\x76\xF0\xEF\x9B\x52\xE6\x83\x90\x0A\x0C\xEE\x04\x93\xF9\xEE\x70\xDC\x63\xEA\x7A\x74\x22\xF8\x77\x0A\xEE\xB1\xE5\x90\xB7\x90\xAB\x6F\x85\x5C\x65\xEB\xCD\xFB\x01\x5E\x38\x59\xA2\x0D\xD0\x4B\x19\x1A\x60\x6F\x0C\x8D\x6B\xBC\xE2\xCC\x67\x07\xBB\x41\x5A\x0F\x76\xE9\x74\x69\x12\xA2\x3A\xB3\xD9\xB1\x8D\x60\x26\xB3\x47\xB8\xC0\xB6\x67\x8D\x2F\x53\xE3\x4B\x3A\x3E\xA5\xF1\xDB\x7D\xD8\xE4\xB6\x71\x5E\x03\xDC\x35\xA2\xAE\x60\x8E\x47\x46\x07\xDC\x61\x8A\x16\x68\x9A\xF7\x5B\x5D\x9F\x4C\xF9\x78\x59\xBC\x21\xF6\x17\xDE\x3B\xE4\xB8\xAC\xF3\x7F\xF7\xA3\x1F\xFA\x09\x15\xAF\xF0\xE9\x48\x70\x2C\x50\x02\x73\xC4\x55\xAE\xB8\xBA\xEB\x7F\x22\x14\x73\xA5\xFF\xE8\x87\x7E\xE2\xDE\xAB\xBB\x43\x01\x03\x1C\x40\xBD\xA9\x0A\xE3\x0A\x7F\xE1\xE9\x78\x2C\xA3\xF7\xE4\xC2\xD3\xE3\x50\x71\x09\x57\x31\xFD\xC2\x55\x29\x8F\xAD\x4A\x0B\xB9\xC4\xC3\x24\x6B\xBE\x30\x02\x2D\x24\x03\x79\x1B\x52\xFF\xC0\x04\x16\x6C\xDB\xCA\x95\xE1\x84\xFF\xAD\x61\x0E\xB3\xF0\x73\x11\x7E\xE6\xA5\x08\x91\x07\xE0\x1F\x70\x36\xA8\xA7\x9C\x55\xEB\x14\x6D\x6B\x83\x02\x13\x08\xA4\xDC\x15\xFE\xFA\x98\xD0\xB8\x8B\xF0\x95\x49\xD9\xAB\xCF\x8D\xAE\xF0\xCF\x8D\x83\x4E\x9F\x3C\x3F\x46\xF7\x49\x8A\x47\x55\xCE\xF0\x0D\xC1\x00\xFF\xD6\x71\x28\x31\x31\xA1\x89\xF4\x56\x4A\x8C\x8B\x64\xE9\x4A\xA2\xDF\x82\x7A\xAC\x92\x3B\x75\x27\x30\x65\xA1\x4F\x70\x5F\xD9\xB9\x0C\xD8\xD3\x32\x00\xCC\x73\x81\xFD\x5D\x0F\x05\x99\x5F\xC9\x47\x6B\xF8\x27\x72\xDD\x26\x4C\x7F\x5C\xAA\xFA\x19\xD5\x76\x38\x8F\x88\x07\xD3\x3A\x49\xCB\x28\xE3\xFA\x4C\xF1\x4F\x43\xB5\x66\x3C\x55\xD1\x78\xF2\x90\x7F\xC2\xCD\x83\xE8\xCE\x9B\x83\x5D\xCA\x05\x3C\x68\x42\x64\x81\xC3\x50\xE5\xAF\x8F\xBE\xA6\x57\x79\x11\x6C\x3F\x39\x5A\xE0\x66\xFA\x50\xDC\x4E\x79\x32\x48\xF2\x3D\x53\xF1\xB1\x9D\xC7\x97\xDA\x6B\x57\x5D\xF7\x2F\xBD\xFC\x9A\x3A\xDE\x27\x50\x4B\xCE\xF7\xA6\x22\x6A\x56\x84\x1C\x7C\x4B\x1C\x78\x73\x57\x76\xBF\xC2\x1B\x43\x01\x61\xA0\x54\x15\x5E\x03\x88\x2B\xE7\x49\x09\x87\x36\x22\x34\x00\x6F\x27\x68\xF8\x5C\x20\xFE\xA9\xC4\xBA\x3F\x15\x4F\x54\x92\xD5\x69\x41\x92\x35\x54\x87\x82\xED\x5E\x1C\x81\x0C\x8D\xCF\x62\x72\x10\x3C\x43\x15\x55\x3D\x49\xCC\xB5\xB0\x58\xF3\x20\x05\x95\xCB\xB9\xA9\xE5\x50\x95\x48\x52\xEA\xC6\xC4\xE3\x91\x41\x1F\x1F\x0D\xCB\xA0\x58\xB6\x78\xF1\x1C\x96\xB5\x7E\x72\x19\x13\x54\x2B\xD7\x1E\x0D\x95\xCF\x6E\x0C\xDB\x6E\xDB\xE5\x12\x5E\x12\x86\xD7\xDE\x08\x3F\xFE\xBE\xAB\xBB\xC3\xF6\x84\x7B\xB4\x1D\x9F\x2F\x82\x42\x8F\x67\x55\x9C\x8F\xB7\xB0\x27\xDF\xA9\x75\x15\x9E\x61\x06\x6C\x37\xF6\xB9\x79\xC4\x2D\x85\x28\x0C\x3C\x7C\xD1\x99\x92\x51\xCE\xB3\x70\xDA\x0B\x82\x0A\x04\x3D\xA0\xEC\xB4\x51\x82\xAD\x2B\x82\xB0\x5D\xD9\xED\x0B\x57\x82\x5D\xC0\xE9\x03\xE4\x3E\x21\xA4\xA5\xDA\x0F\xC3\xB9\x1D\x4E\xD7\x6B\xA2\xFC\xA5\x5C\x6F\xCD\x8C\x1D\x9B\xEC\xE1\x7C\x45\xBE\x71\x1D\x45\x20\xF1\xD0\xF5\x18\x11\x1E\xD4\x0E\xE2\x5D\x78\x5C\x57\x2E\x7A\x75\xFB\x3A\x12\x07\x96\x41\x80\xCF\xF4\x84\xD4\xF1\x5B\xAD\xD6\xFE\x68\xC5\x5B\x09\x18\xE1\x95\xCB\x45\xB2\x2C\x80\x11\x49\xCF\xEE\xF2\x5E\xCA\xBC\x23\xC2\x1B\x61\x89\x35\x71\x89\xB5\xA9\x76\xB3\xF6\x47\x99\xC6\x21\x56\x65\xB9\x67\xDA\x70\x4C\x5A\xC2\x3D\x9D\x3D\xE2\x16\xDC\x9A\x4A\xE6\x70\xC4\xEB\xE5\xD2\x57\xE3\x50\xA2\x54\xF8\xAB\x15\xA8\x97\xD6\x34\x8D\x5B\xB0\xA6\xD2\x95\xAE\xBD\x4F\x9B\xEE\x65\x6E\x66\xAD\x98\xB6\x4D\x90\xB3\xC6\x89\x44\x57\x2E\x3F\x02\x08\x55\x4E\x89\x76\x0B\x1E\xCB\x9C\xE9\xFE\x15\x7A\x45\x14\x67\x48\x79\x49\x29\x5F\x3E\x89\x6C\x13\xC8\x51\xE9\xDA\x20\x0A\x25\xE5\xA8\x74\x5B\x94\xF2\x72\x92\x72\x52\x0E\x0E\x8B\xA3\x61\x3B\x48\x79\x17\xDE\x5E\x46\x29\x8D\x41\x54\x4B\xB7\x08\xAB\x20\xBB\x31\x9C\x73\xE7\x52\x10\x15\xF2\x0A\x92\x94\x9F\x63\x3E\x59\xA8\xF3\x5C\x7C\xBE\x74\xE7\x26\x29\x5F\x22\x34\x16\x52\x7E\xA7\xD6\x2D\xF1\xCC\xD8\xB7\x74\x5C\x97\x40\x5A\x65\x16\x83\x5B\xB8\xED\x99\x94\xB7\x94\xF2\x92\x52\xBE\x84\xC9\x03\xB8\xB5\xA4\xA7\x83\x35\x7C\x9E\x27\x56\x0C\x25\xE4\x6F\xB7\xCF\x5D\x1D\x14\x63\x15\x64\x7E\x49\x96\xBD\x0A\xB4\x40\xF0\x86\x4F\x18\x8E\xA7\xC5\xFF\x87\x0A\xBD\x75\xE2\x67\xFE\xBF\xA8\xCD\xF3\x88\x9B\x0F\xC6\x7A\x85\x78\xBB\xBC\x5F\xB8\xB2\x6F\x66\x9A\x7D\x5D\xA7\x07\x81\x1B\x96\x3E\x3F\x94\x1B\xC7\x36\x9C\x64\x96\x61\xA1\xDE\xA7\x8D\xD7\xC7\x20\x6F\xAC\xBD\xEA\x9B\x60\x03\xB5\xC1\x5E\x6F\xB9\xEA\x83\x6C\x23\x37\xAD\x85\x60\x4E\x55\x1D\xA0\x17\xA7\x9E\x67\xB8\xF6\x14\xF7\xDA\x86\xB1\x05\x2D\x23\x52\xD1\xE4\x2A\x9E\x42\xDE\x03\x47\x30\xA7\xE3\x53\xB2\x97\x84\x82\xA3\x76\x05\xB8\x35\xE0\xBB\xBA\xBA\x8B\xFA\xC1\x65\x8A\xE0\xF1\xD2\xE1\x6E\x20\xD8\xE4\x23\x2C\x5D\x0D\xE3\xBC\x6F\xDD\x22\xB4\x79\x1C\x96\x87\x64\x97\x5F\x36\x18\x9C\x25\x56\x52\x5F\x63\x2B\x01\x83\x55\xD0\xE7\xDC\x1A\x2A\x59\xC0\xA5\x2B\x5C\xDE\xFD\x32\x6F\xC9\x59\x80\xDB\x03\x3D\x6F\xAD\xAB\xFA\x65\xF6\x08\xC6\xC0\x15\xDD\x9F\xC6\x12\x92\x97\xDA\xD9\x4B\xC1\xBF\x72\x04\x08\x51\x3E\xB8\x15\x57\x4D\x3B\x5B\x35\x2D\xE5\xB2\x0D\x9A\x33\xE8\xD4\xAD\x20\x5A\x2D\x77\xFF\x0C\x63\x26\xAB\xA6\xE5\xAA\x29\xE6\xAB\xA6\x5D\xDD\x1B\x5A\xB7\x74\xC5\x6C\xD5\xB4\x77\xBD\x6A\x8A\x33\x56\xCD\x9D\x5A\x17\x57\x4D\xC9\x99\x6D\xE7\xAB\xA6\x58\x59\x35\x25\x57\x4D\x3B\x5B\x35\x73\xDA\x0B\x0C\x94\xEC\x0D\x95\x5B\xF4\xB9\x6B\x24\x0A\xF4\x20\x0C\xA9\x04\x79\xF6\x05\x36\x94\xCD\x7B\xC4\x5F\xD4\x2A\x46\x22\xD1\xF9\xB6\x78\x0C\x14\xC9\xB7\x3A\xA2\x20\x63\xDD\xD4\x8F\xC5\xC8\xB6\x63\x89\x59\xF4\xB5\x57\xDD\x9F\x8B\x47\x72\x7C\x30\x05\x2C\xF9\xE6\x0A\xA2\xE6\x53\x79\x0F\xCF\x1C\xCB\xC3\xCB\xDD\xA4\xF8\xA6\xED\x66\xB5\xE2\xD5\x82\x08\xAD\x91\x7A\x9B\x9F\x33\x30\xCE\x5D\xC6\x36\xCE\xBE\xEC\x11\x83\xF3\x18\x51\x95\xD4\x14\xD1\x96\xCF\x08\x9F\x0B\x97\xCF\x02\xDA\xC2\x6E\xA0\xFB\x4A\x82\xDD\x32\x97\x07\x2D\x81\x65\x8D\x10\x9E\x67\x87\xDA\xB7\x4F\x0F\x8B\x91\xA0\x97\xDA\x55\x8F\x0D\x2D\x42\x43\x17\x08\x71\xAB\x10\xE2\x96\x85\xDD\x3B\x48\xD3\x02\x21\x6E\xED\x6E\x9F\xC1\x1D\xBB\x38\x72\xED\xEE\x60\x25\xC4\x2D\x73\x65\xB0\xF5\x41\xC7\x93\xF1\xAC\x9E\xF5\x4D\x33\x6D\xB0\x0D\x8F\x1A\xC6\x65\xCE\x5C\xD9\xED\x6D\xF8\xA0\x1A\x32\x82\xCA\x70\x74\xC6\xAD\x4C\x87\x7D\xCD\x9F\x78\xAC\x89\x3F\xE0\xF5\x01\x23\x6B\x4E\x9E\x09\xAA\x23\x3A\xD4\xC3\xE7\x38\x12\xE8\x63\x38\x65\x88\x76\x53\x8D\xA1\x59\xA3\x2B\x38\xB0\x78\x21\x8E\xA6\xB8\x6D\x7E\x07\x08\x73\xBE\xAF\xD6\x8B\xD3\x91\x16\xDE\xC0\x72\xD1\xFE\x66\x25\x59\x9A\x39\x4F\x46\x93\x2B\x50\x0D\xB0\x02\xFE\x11\xCE\xCA\x36\x98\x04\x7A\xA8\xFC\xF9\xEB\xFE\xFC\x8D\xA1\xBE\x6C\x2C\x9E\xA9\xC0\x0F\xFA\xD3\x72\x9E\x2E\xFC\x0B\xB8\x8F\x05\x80\x87\x37\xE3\xB0\x88\x5E\xE0\x45\x30\x15\xC3\xFF\x57\x77\xBB\x8F\xBE\x28\x51\xA5\x00\x4B\xC3\x6D\xC8\x0F\xC2\xB1\xDD\xD0\x73\x62\xB1\x88\x5F\x87\x4E\xFF\xD2\xED\xAC\xAF\x9D\x0D\xFF\x72\xD1\x7B\x8D\x86\x37\x08\xED\x93\xAF\xDB\xF0\xEF\xC1\x52\xAF\x90\x94\x45\x68\x90\x13\x20\x05\xB7\xD4\x6F\xC2\x2D\xD4\xCC\x0A\xB8\xE6\x48\x0A\xB8\x66\x1F\xB7\x20\x2E\x7B\x74\x09\x97\x0A\xF0\xEC\x62\x60\x00\x73\x8E\xFD\xBF\x01\xF7\x29\xF1\x2D\xDD\xE2\xA8\x6F\xC5\x13\xE8\xBF\xA4\x82\x7C\x87\x7E\x04\x15\x41\x4A\xA7\xDC\x95\xBD\x82\x52\xCE\xF7\xD1\xBC\x53\x83\x90\x3B\x0B\xB7\xAE\x8C\x80\xE6\x08\xB4\x48\xD1\x7C\x5D\x6C\x38\xF4\xBD\x6D\x85\x30\x83\x5C\x6B\x88\x39\x27\x16\x73\x64\x7E\xE4\x20\x30\x52\x06\x3E\x2B\x0D\xEF\x07\xE1\x0A\x30\x6E\x6D\xDE\xA4\x9D\xDA\xDB\xCB\xD2\x45\x60\x87\xE2\x6B\x1B\x07\x68\x58\x00\x37\x7D\xC8\xE7\xE3\x94\xCB\x38\xE5\x6E\x11\xCE\x67\xB1\x53\xC5\xE9\x4E\x65\x30\xF8\x5C\x21\x9D\xAA\xD9\xA9\x6A\x68\xD6\x3B\x55\xB5\x92\xC2\x54\xC7\x4E\xD5\xA1\x53\x85\xAB\xE5\xD6\xBC\x61\xA7\x6C\xEC\x54\x81\x05\xDF\x84\xDD\x2F\x75\xCA\x4A\x70\xD7\x32\x2C\x22\xED\x2A\xFF\xD3\xEA\x60\xB7\x9F\x71\xE4\x42\x49\x7A\xE5\x9A\x23\xD7\xB8\x45\x90\xE9\xFF\x4B\x1D\x0C\xF9\x6E\xB0\xE3\xBC\x1E\x83\x6C\xEE\xE2\x9C\xDD\xBA\x1C\xC7\x88\x20\x59\xAF\xFF\xB5\x0F\xDC\x54\xA1\xAF\xFE\x23\xFF\xF4\xE6\x4D\x75\xB0\xCC\x7D\x16\x54\x31\x7F\xF5\x59\xF7\xDD\x7F\x94\x13\x18\x94\xD2\xCB\x3F\xFA\x81\x9B\x6A\x50\x5E\xBD\x63\x69\xE1\x6D\x5B\xF9\x28\x73\xD5\x92\x80\x01\x2B\x1F\x9B\xC6\xBF\xFC\xDF\xE3\x25\xBC\x0B\x13\x8E\x36\x32\x28\x86\x33\x10\x23\x30\xFC\xC9\x38\x28\x1A\xAD\x53\xF9\xE4\x58\x13\xFF\x03\xD4\x44\xD1\x7D\x08\xB9\xBB\xC8\x6A\x04\x3C\xB4\x72\x85\x03\x0B\x30\x2E\xC9\x62\x93\xB3\x66\xBD\xF2\x66\xA3\x5C\x2C\x5D\x79\x14\x45\x23\x43\xE8\x8C\x6B\x9C\xEE\x7E\xE8\x96\x9C\x35\x90\x8B\xB7\xB6\x54\x50\x78\xC3\x12\x0A\x9F\xAF\x04\x39\xE4\xCD\xFF\x91\x4F\x21\x60\x6E\x16\x02\xC6\x30\x9A\x14\xA9\xD8\xC6\xA8\xEB\xDC\x1B\xA2\x30\x02\x8F\x9C\x8C\x42\x2F\x8C\xF7\x69\xE5\x8A\xF0\x57\x46\x13\x3E\x61\x0E\x41\x7E\x51\xC1\x95\xC8\x40\x86\x3C\x5C\x8D\xAA\xEC\x54\x55\xC7\xAA\x9E\x1F\x93\x73\x6B\xAA\xAA\x5A\xAD\x6A\xE7\x4A\x8C\xD3\x8D\xFA\xFB\x38\xDE\x55\x97\xD1\xB9\x2B\xE1\x4B\xE9\xC0\x93\xAF\x75\xC3\xA4\xE0\xF1\xD8\x8D\x2A\x34\xBF\xF2\x66\xDC\xD0\x0D\x09\x24\x4F\xDD\x90\xC0\xFF\x8D\x95\xA1\x23\x60\x5B\xAA\x62\x2E\xFB\x6A\x47\xA6\xCA\x76\x62\x65\x93\x3D\xE0\x72\xBF\xF0\x8B\xB4\xD7\xE7\xDD\xFB\x5F\x64\xE4\x45\xDE\x7D\x99\x9E\xFA\xA0\xA8\xF2\xF9\xFD\x13\x50\xEE\x11\x4C\x27\xB6\x77\x45\xA0\x4C\x04\x75\x14\x2E\x0F\x1D\x1A\x6A\xA1\x56\xA4\xC5\x1A\x5B\x04\xF6\x94\x52\x9E\x6E\x50\x98\x03\xC0\xCC\x44\xF1\x2B\xD0\xAB\xE0\xCA\x79\xD7\xC3\x48\x5F\x11\x3A\x97\xE9\xC5\xB3\x97\xC2\x2D\xFF\xC4\x19\x2F\xDD\xE1\x4B\xBB\xF4\x52\x66\xEC\xD6\xF1\xB5\xD5\xFA\x6B\x5D\xED\x6F\xDF\xFE\x9B\xEA\x09\x89\x18\xC0\x2F\xB1\xC2\xF9\x39\xA5\x79\x43\x4B\xA0\x2D\x5C\x1D\xD0\x71\x39\x63\x30\xB2\x29\x52\x39\xC6\x78\x64\xD3\xCE\x0A\xA4\x7F\x46\x45\xFF\xF9\x5B\x91\x0F\x33\xDB\xB4\x5D\x12\xF5\x4D\x94\x6A\x2E\xAC\xED\xC0\xF4\xA2\x52\x35\x54\xAA\x72\xC9\x94\x4B\xAC\x1A\xD1\x4C\xA8\xC9\xF1\x4B\xD4\xE4\xB9\x3F\x79\x0C\x14\xEC\x50\xB1\xB1\xA4\x6C\x9E\x2C\xC9\xCD\x93\xA1\x1F\x5C\xD0\x36\x2D\x68\xD5\x7C\xEF\xB6\xAE\x37\xC5\x70\x4E\x6E\xD6\x08\xA6\xA9\x86\x82\x1D\xC6\x1D\xB2\x79\x54\x3F\x60\x18\x21\x69\x62\x75\xA9\xCD\xB8\x03\x7E\xB4\xDA\x54\xC0\x29\xB9\x20\xA2\x81\x01\x69\x2E\xE9\x3D\x2C\x37\x9B\x17\xED\x8A\x79\x51\xC6\xCD\xB5\x3A\xBD\xB9\x4A\x5C\x71\xC9\x06\x54\x1C\x13\x33\x19\x14\x43\xBD\x62\x52\x04\xC3\x46\xE2\xF0\x85\x34\x72\x5E\xC0\x95\x47\x28\x80\x1E\x94\xE1\x70\x6A\x56\x28\xDB\x37\xFF\xE7\x2A\xFF\xD4\xB8\x75\xD5\x44\x47\xCB\xDD\xFE\xA9\xC3\x5F\x0B\xF9\xA5\x50\x4A\x95\xAA\x52\x8D\xCA\x81\x28\x7D\x5B\x3D\xB6\x6C\x5C\xD5\xFD\xC6\xFB\x09\x26\x1B\xDE\xE1\x17\x57\x97\x8D\x53\x4B\x61\xC1\xA7\xCE\x3E\xD7\x84\x63\xFB\x82\xBE\x9D\x5F\xD7\x48\x20\x5E\xA0\x78\xA2\x59\x0D\x05\x70\x4D\x57\x33\x03\x87\xA6\x20\xD0\xDB\xCA\xF8\x08\x60\x84\x05\xE1\x3C\x4C\x52\x1D\x27\x09\xC0\x43\xAE\x1A\x51\x61\x30\xE0\x26\x07\x14\x6C\x4B\x95\x6C\xCB\x86\x51\x17\x15\xAB\x3C\x98\x3B\xAA\x16\xFE\x85\x31\xD6\x6B\x64\xC8\x51\x6F\xC3\x7A\xE1\x2E\x7C\x6C\x59\x92\x91\x82\x1D\x5B\xA2\x63\x85\xAB\xDD\xC2\xDB\xF0\xFF\x24\x22\x8B\xAF\x4A\x44\x9A\x4D\x22\xD2\x7E\xDD\x44\x64\x49\x86\x84\x7F\x03\xA5\x70\x57\x8D\x5D\xC4\xC6\x96\x67\x36\x76\xC1\xC6\x96\x5F\xB1\xB1\xD5\x6A\x63\x17\xAB\x8D\x5D\x1C\xA1\x00\x85\x20\x34\x76\xAB\x11\x97\x43\x23\x87\xAB\xD2\x3F\x35\x0E\xB9\x5F\x5C\x25\x82\xBC\xFF\x6F\x8F\x91\x4E\x96\x3F\x1E\x8E\x53\x8D\xB7\xD7\x89\x68\x2E\xB9\x59\xF2\x55\x28\x78\x95\xF2\xE5\x4A\xFF\x17\x19\xAB\xE9\x7F\xE0\x64\xF4\xC5\xD5\x5D\xCE\x66\x33\x0E\x4D\x98\xD2\xDA\x55\xFB\xCB\x1A\x61\x34\x36\xFC\x3F\x8D\x4B\xF3\x55\x8D\x4B\x9B\xC6\xE5\x20\x46\x28\xCF\x7A\xCB\xBE\x0A\x38\xFE\x59\x63\xC6\x11\x03\x4C\xB2\x03\xA9\x68\x1B\x7E\x6D\xE3\xE0\x3C\xB1\x54\xED\x76\xD3\x04\xA3\xB3\x0C\x95\xA8\x43\xC8\x63\x73\x17\xF2\x58\xD2\x6E\x9E\x75\x25\x8B\xA6\xF3\xA9\xAE\x54\x6D\xB5\x02\xBF\x2E\xCD\xAD\xCF\x9C\xE2\xD5\x09\xCE\x56\x27\x38\x3B\x8A\xD3\x9B\x25\x59\xB4\xBC\x07\x97\x79\xC5\x69\x7F\x9A\x55\x44\x9F\x63\xD2\x33\x7E\x59\xAE\x7F\xC9\x87\x70\x34\x4A\xD3\xAD\xD2\x74\x2B\x99\x6E\xB5\x32\xDD\x41\x4E\xE8\x8F\x90\x67\xD7\x45\x25\xDB\x2C\x2A\x19\x22\xEF\x9C\x1A\xE3\x20\xB7\xE1\xDC\xB3\x88\xBF\x81\xEE\x63\x19\x7F\x03\x9E\x40\x1D\x7F\x03\x9A\x48\x19\x7F\x43\x56\x7B\x1B\x7F\x2B\xC3\x6F\x69\xE2\x8A\x60\x02\x0B\x7F\x99\x89\xF0\xDE\xAE\x06\xE5\x38\xED\xE1\x3C\xEC\x6D\xCB\xD3\x5B\x97\x3D\xE3\xF3\xEC\x8C\xCF\xCD\x19\x9F\xEB\xCD\x9F\xAF\xD9\xD5\x7F\xB3\x01\x93\x7D\x77\x7A\x6B\x76\x96\xC0\x67\xB9\x10\x3E\x88\xB5\x82\x78\xB6\x57\x82\xDE\xFF\x39\x6E\xD0\x59\xDC\xA0\x8B\xC9\x80\x9F\x27\x46\x95\x7E\x87\x42\x36\xCB\x02\xAB\xA6\xD4\x1A\x31\x7C\x6B\x57\x86\xE1\xDA\x99\xF0\xDA\x83\xF5\x39\x19\x17\xC4\xA7\x6F\x91\x99\x9C\x0E\x69\xB8\xA7\x2F\x78\x46\xA3\x75\xD4\xF8\x17\x58\xCA\x69\xB7\xF0\x66\x94\x28\xA4\xF0\xFA\x64\xBA\xD6\x33\xD3\x95\x41\x49\x6D\x03\x98\xAC\x96\x3B\x96\xB7\xD7\xFA\xAD\x70\xFC\x7A\xE3\x9F\x85\x96\x6D\x87\x1F\x5F\xFB\x7C\xF8\xB1\xF3\x1A\x06\x34\x5A\xD8\x80\xB6\x61\x6D\x57\x2E\x68\xE4\x2F\x90\x09\xFA\x8F\x55\xCA\x21\x2B\xF8\xD1\xB0\xF0\x97\x0E\xAF\x4D\xD9\x88\xB8\xF9\x3B\x86\x97\xAC\xA5\x92\x69\x2E\x29\x8D\x07\x67\xA8\x8B\xB8\x5E\x58\x90\x19\xB2\xF0\xD6\x15\x6B\x96\x8B\x5B\x88\x02\x28\x62\x30\x49\xBB\xAE\x00\x14\xAF\x9C\x35\xFD\xBB\x5F\xBA\x9D\x1D\xC2\x73\x45\x39\x57\xBD\x06\xF0\xBA\x69\xC8\xA5\xD1\x36\xF1\x64\xC7\x13\x74\x4A\x61\xC8\x18\xB3\x8F\xBB\x73\x0B\x38\x15\x34\x3C\xBB\x3A\xD4\xE8\xF4\xCF\x61\x16\xD0\x85\x87\x0E\xE1\x9B\x05\x92\x57\xEB\x2D\x6E\x90\x82\x41\x7D\x8E\xC7\xEF\x7B\x5C\x1B\xB6\xE1\x17\xC6\xFE\x4D\xAE\xF1\x66\x0C\xC3\x1A\xE7\x12\x8F\x16\x32\x3A\x61\x28\xB2\x26\x1E\x46\xDE\xF4\xCD\x84\x19\x5B\x86\xD9\xFD\x47\xF0\x56\xE3\x6D\xEE\x9A\x7F\x03\xBF\x09\x3F\x74\x89\x4C\xDB\xEE\x07\xC3\x1C\x6F\x99\x72\x81\xCE\x9B\x71\xD8\x71\xCD\x51\xFF\x66\x87\x10\x43\xEB\xDE\xBC\xBF\xCC\xFC\xCB\x9F\xC5\xC8\xB9\x2E\x41\xC5\x21\x51\x63\x01\xF9\x7A\x13\x78\x29\xE4\xA4\xFC\xA6\xA0\xA8\xDE\xE4\xDE\x7C\x08\x7A\x98\xE6\x60\x59\x86\x7F\xDC\x9B\xE3\x49\xB9\x0C\x2F\x41\xE6\x4D\xA4\xB2\x6E\xF3\x26\xF1\x0B\xD7\xFE\xA6\xA4\x8D\x06\x69\x7E\x13\x1D\x67\x49\x90\xC4\xBC\xA7\xFF\xD7\xFF\xFC\x3F\x65\x9B\xB6\xFB\x45\x96\x5A\x24\xB7\x81\x68\x0B\x23\xAB\xDA\xD0\x08\xEB\xDA\xFB\x94\x72\xCD\x7D\x4A\x1D\x2C\x6D\x6A\x02\xCC\xAA\x05\xBD\xE8\x8F\xAA\x07\x32\x09\x10\x5A\xC0\x5F\x6A\xB9\x75\x64\xDD\x47\x99\xB2\x5D\xA5\xD5\x1D\x3D\x7A\x3A\xDD\xAD\x5D\xC2\x9B\xA2\x54\x86\xA7\xDB\x64\xF5\x17\xE2\x2F\x0B\x3F\x6F\xAD\xD5\xB9\xA0\x73\xAE\x10\x81\x7C\x02\x28\xC2\x56\xD0\x8A\x98\x54\xBE\xA0\x3F\x5B\xBB\xC5\x51\xD8\xA2\x16\xC1\xB8\x63\xD6\x60\xF8\xA2\xE8\xDB\x86\x13\xD9\x32\xC0\x42\x79\x7B\x8D\x47\xA9\xA0\x3B\x35\xD6\x03\x0E\x1C\x41\x42\xB1\x48\xB8\xDA\x49\x6C\xB4\x67\x76\x78\x28\x0D\x8A\x38\xA8\x6C\xA8\x6B\xF7\x26\x31\x0A\x35\xC4\xEE\x72\x94\x95\x22\x2C\x2A\x9C\x82\x6B\xBA\x2C\xDA\xAC\x99\x89\xC6\x22\x93\x49\xA8\xF7\x97\x06\x16\x2D\x58\xD5\xD7\x86\xDA\xAB\xFE\xCD\xC0\xF3\xE1\x22\x69\x26\x17\x49\x50\xEA\x1A\xF0\x3A\xA1\xDD\xEE\xCD\x38\x52\x9D\x63\x4E\x8E\x75\xF7\xF0\x95\xE0\x04\xE2\x2C\xB4\xE2\x65\xC9\x1F\x40\x15\x0C\x01\xAE\x24\xEC\x91\x69\xE7\x93\x12\x2F\x9A\xEF\xA2\x12\xAF\x67\x0E\x12\x13\xD3\x43\xA8\xC4\xB3\x07\xB2\x6E\x1E\x22\xAE\x06\xEB\x6F\x5A\xA2\xE9\x3D\xEA\x5E\x7A\x65\xC8\x39\xEF\xD9\x2B\x3C\x7C\xED\x1D\x32\x5B\x52\xFB\xDF\x97\x22\x79\x41\x5E\xD2\x4A\x5A\x14\x5D\xB7\x3B\x61\x93\x0B\x0B\xAB\x44\xBE\x7A\x7A\x43\x8E\xAC\x0C\x5E\x0C\xD3\xDD\x5B\x5C\x52\x99\x7F\x9B\x04\xE6\xFB\x6E\xF4\xBF\xD7\x1B\x71\x80\xE6\x74\x80\x6A\xC6\xFC\xE9\x19\x06\x69\x35\x39\x0E\xED\xE9\x33\x5B\xC9\x3D\xD5\xCE\x94\x9F\x59\xB5\x7E\xCC\x9A\xF5\x53\x44\xC7\x61\x71\x84\x38\x84\x62\x64\xBA\xA7\x98\x43\x55\x74\x1C\xF2\xCA\x24\xAC\xA5\x93\xC7\x96\x41\x70\x52\x3A\xF1\x96\x90\x83\xF0\x90\x8E\x5D\x1E\x70\x29\x71\x4C\x6A\x24\xF5\x12\x79\xAF\x64\x7E\x53\x86\x18\xF6\xA0\xEA\x58\x4B\xDF\xBA\x1A\xBE\x5E\x7B\xC7\xDE\x85\x2D\xFB\xC2\xA9\xDE\x5D\x98\x9D\x35\xA4\x77\x17\xA2\xAF\xF7\xAC\xDE\x55\x6B\xBE\xDE\xB5\xDE\x95\x0D\x70\x21\x25\xDA\x8F\xC9\x11\x4F\x11\x2E\x61\xC1\x8C\xAA\x60\x38\x85\x5F\xBF\x81\xBF\x86\x1F\xFF\x89\xA2\x13\xE8\x53\xF2\xD5\xEF\x7C\x3C\x68\xE9\x92\xD9\x22\x89\xC9\xDF\xD1\xEC\xE9\x3E\x9C\xF6\x5A\x85\x50\xAC\x26\x18\xD1\xB9\xC4\x1E\xBB\xFC\x08\x73\x2F\x6B\x39\x27\xC0\xA8\x64\x30\x54\xE1\xE4\xC0\x2C\xC3\xB6\xAF\x31\xD8\x9B\x3C\xC8\x10\x84\xF0\xE5\x5D\xC8\x82\x4D\xA3\x35\xB3\x84\x73\x67\xF0\x3C\x94\x48\xC7\x5F\x76\xA0\x5F\x76\x28\xF1\x78\x73\x31\xFB\x12\x96\xDC\x0E\x6C\x2B\x6C\x9A\xD5\x99\x87\x76\x57\xFA\xFF\xF1\x64\xDC\xFA\x46\xBB\x3C\xFD\x47\x2F\x97\x4B\x83\x9F\x32\x15\xC6\xEF\xBF\x1C\xB7\xEE\x29\xDB\xB6\xAD\xDA\xD9\x9F\xA2\x6D\x70\x97\x43\x5F\x79\x30\xA3\x7A\xDB\x92\xFC\x46\x6E\xCC\xAC\xEF\xAE\xE1\x33\x67\xFD\x17\xF0\xBD\xD7\x7D\x4B\xCE\xD3\xA9\xCC\xAF\xF1\xC9\x12\x1E\xC7\x6B\x52\xC6\xAC\x96\xF9\x14\xCB\x40\xC3\xBC\xA6\x62\x21\xDC\xA4\xE0\x2A\xF1\x5A\x6F\x81\x97\xD9\xDA\xC9\xC2\xCB\x57\x73\x96\x0A\xC6\xEB\x74\x4E\xB9\x62\xC2\x11\xE8\x73\xA0\xA7\x9E\x3C\x73\x0D\xEF\xB2\xCC\x64\x82\x47\xEB\x62\xD6\x3D\x60\x54\xD0\xFF\xA1\x1D\x70\xD1\xB5\x80\xE1\x0C\x6B\x23\x28\x62\xDD\x57\xE1\xFD\x58\x6C\x3B\x92\x3B\x2E\x0B\xB2\x95\x05\x89\x98\x63\xEC\x45\x51\x14\xE0\xB8\x52\xE2\xF0\x6F\x87\x18\x0A\x2B\x4B\x38\x94\x5A\x88\x59\x5D\x4F\x66\x72\x3D\x37\xB7\x5B\x7A\xB1\xDA\xB9\x49\x5D\xF1\xB3\x2A\x99\xD3\x1E\xA4\x0C\x66\x0A\x1F\xCC\x9B\x1F\xF9\x5D\x59\x76\xA2\x5F\x58\xCC\xB1\x3E\x53\x88\xB1\x7F\x70\xF4\xD5\xC8\x2D\x19\xA1\xC5\x0F\xA6\xB0\x63\x7F\x5B\xD3\x63\xA8\xBD\x3B\x1E\xEC\xEE\x65\x73\x3F\x8B\xEC\x39\xDC\x0B\xFA\x07\x47\x67\x76\x43\x37\xEF\x0D\xE7\x97\xA0\xD7\x8B\x07\xCC\x85\x60\x96\x3D\x90\x75\xAE\x08\x8A\xBC\x57\xAE\x78\x85\x9E\x89\x59\x66\xD8\x60\x19\x71\x97\x1C\x72\xCE\x7A\xF5\xB0\x86\x6D\x4C\x44\xD5\xB0\xF0\x2F\x9B\x77\x00\xD0\x0A\x81\xCE\xF8\xEB\x27\xD5\xD8\xFD\x50\x58\x51\xDD\x9F\x20\x90\x55\x06\x84\xD1\xAB\x00\x4A\x40\xF8\x33\xE2\x8F\x1D\xE0\xE8\xEE\xE2\x71\x89\x81\x0D\xB5\x5C\x95\x98\x6C\x64\x65\x3C\x38\x86\x7F\xCD\x2E\x01\x92\x30\x5D\x0C\x2D\x52\x24\x2E\x16\xCF\x60\x95\xC2\x51\xE3\x0F\x82\x73\x99\xC1\xD8\x20\x00\x84\xF2\xFA\x59\x20\x7E\x0C\xE5\xEE\x80\x20\x78\x7C\xCB\x40\x29\x57\xEE\x62\xAF\x8A\xB7\x63\xE6\x58\xDE\x13\x83\x4C\x4B\xA7\x7C\x90\x92\xB9\x85\x1D\xFA\x89\x0E\x4D\x0A\x46\x31\x14\x61\xAE\x60\xB2\xA8\x60\x22\x5F\xD9\x8A\x82\xC9\x64\xA8\x44\xC1\x64\x1C\xC9\xA4\x60\xD2\x38\x4E\x5F\x3A\x75\xC4\x08\x7E\x75\x25\xE8\xD6\xCC\x9F\x1F\x5D\xD9\x7D\xE7\xAD\x34\x0F\x03\x54\x1F\x9F\x5B\x6D\xB3\x0C\xAD\x95\xA1\xB5\xD7\x9C\xD9\x8D\x09\xCF\xCB\x94\xF7\x5C\x35\x31\xBA\x1C\x11\xED\x7F\x92\xF8\x1C\x89\x60\xD4\x29\xB9\x44\xE7\x31\xA8\x92\xB0\x94\x45\xF6\x88\x2F\xE7\x19\x67\x77\xF4\x52\x96\x6F\x55\x6A\xD0\xFE\x5D\xE3\xD6\x7F\x95\x2B\xA5\x74\xB6\xA8\xE8\x76\x3C\xED\x94\x3C\xE3\x4F\xD9\x2A\x7B\x27\x87\x66\x63\x20\x06\xA7\x66\x6D\xE7\xAB\x9A\xB3\x37\x6F\x9A\xB3\xB7\x7C\x4D\x73\xF6\x96\xB5\x39\x6B\xB7\x11\x7D\x7C\x9E\x09\x24\x34\xB7\x92\x11\x56\xC2\xD4\x22\x82\x5C\x38\x8C\xD5\x7C\x36\xF3\xDD\xD8\xBD\x82\x28\xCC\xAE\x7D\x8B\x84\x25\xF9\x9B\x6E\x0E\xF6\x32\x28\xFF\x16\x67\x13\x28\x85\xA2\xEE\x95\x57\xBF\x94\x10\x16\x52\x1A\xC3\x79\x2E\x4F\xD9\x94\x9B\xB5\x3E\xB2\x87\xBD\x6A\x5B\x11\x8B\xEF\x8D\x98\x35\x1B\x4B\xC9\xB5\xD0\xF9\xB1\xFB\x3E\xC2\x3D\x40\xA4\x76\xCE\xAC\x75\x21\x63\xA0\xF0\x50\xCA\xE0\xB9\xC7\x6F\x39\xE5\xBB\xE3\x5D\xB7\x9A\xEB\x78\x56\x35\xC2\xAF\x1A\xAA\x1A\xC2\x73\xD4\x61\xCA\xDF\x7C\x4D\x5D\x0B\xEB\xFB\xFC\x6A\xF5\x6F\x72\xE5\x7A\xB5\xEA\xB2\x39\x8F\x61\x99\x97\xDB\x76\xEB\xF9\xA6\x67\xBC\xBF\x92\xC1\xF9\xE3\x77\x6C\x65\x19\x07\x07\xF3\x48\x79\xCC\xBB\xBF\x70\x4B\xCE\xED\xFC\x26\x68\x35\x99\xA5\x9A\x0B\x72\xBB\xF1\x15\x2C\x37\x5F\x07\x33\x2D\x73\xD9\xC5\xAC\x1B\x0A\xCC\x1E\x4C\xAC\xA1\xA1\xC4\xE1\x42\x56\xD3\x0A\x41\xEA\xE1\x25\xF5\x3B\xA9\xD1\xE4\x03\x44\xF3\xE3\x78\x19\x44\x0B\x4F\xBC\x30\x52\x23\x22\xE0\xA2\x21\x38\x70\x0B\x03\x50\xAD\x6B\x0C\x62\xCB\xE6\xF0\x69\xE1\x36\x17\xC7\x34\x34\x71\x09\x5F\xC0\x2B\xCC\xAF\xCE\x5C\xE3\xF4\x11\x4D\x5F\xF4\x49\x36\x81\xB6\xFB\x2E\xC8\x44\x81\x47\xDA\x95\x9E\x40\xF9\xD2\x7B\x8D\x5E\xD8\xB8\x08\x06\xED\x9F\x1A\x87\xC6\xD7\x8F\x47\x06\x3A\x66\x2F\xE3\xB6\xA0\xBC\xA4\x8C\x5C\x49\x84\x13\xFD\x2B\xBD\xF6\x99\x53\x47\xFD\x12\x1D\xEB\xB7\x24\x44\x3B\x18\x8C\xCB\x7E\xDB\xB5\xFE\xF6\x8F\xFF\xEC\x7D\x8F\x2D\x71\x38\x3D\x58\xEA\xB0\x5B\x8C\x7D\xE7\x5A\xBF\x78\xDA\x35\x63\xDF\x12\x66\x72\x19\x43\xA4\x3A\x6F\xC2\x3E\x36\x74\x6C\x42\xB0\x7E\x1B\x3E\xC3\x50\xEF\xCE\xBF\x95\xF1\xA9\x27\xCF\x38\x15\x1E\x80\xBD\xAB\x5C\xCB\xC8\x6D\x79\x89\x72\xF6\x28\xB4\xC6\x62\x8C\x7B\x3A\x26\x59\xF1\x12\x0E\x5E\xE5\x4F\xFA\x32\xD8\x25\xCB\xD4\x55\x3D\x75\x75\x29\x5D\xF5\x19\xFC\x1F\xEC\xAD\x89\x81\xFC\xA5\x74\xC9\xB8\x6D\x7F\x72\xB0\x34\xAE\x94\xBE\x94\x02\x99\xC9\x48\x0E\xCB\x9D\x76\xD9\x0B\x28\xD2\xBC\x47\x5E\xB9\xED\xA3\xDE\xBA\x65\xAF\x1A\xD7\xF9\x2F\xC1\xD7\xC4\x75\xF0\x6F\x54\xDA\x5F\xCE\x54\x7C\x76\xC5\xCE\xE5\x12\xB4\xFE\xA6\x91\x10\x2F\x73\x55\x22\xD2\x65\x32\xFF\x06\x9D\x06\xFE\x17\xD5\xE1\xD2\xA4\x85\xD6\x25\x85\x35\x77\x15\xFA\x73\xD3\xC7\xA2\x3C\xD7\x15\x81\x99\x96\xD8\x0B\x71\x89\x51\x27\xDA\xA9\x01\x66\xDA\x7E\xD8\x88\x4F\x9D\x8C\x5B\xE7\x54\x6E\xF2\xF9\x9F\x6C\x7D\xF5\xDF\x13\xDF\xFE\x15\xDE\x09\x07\xF1\x17\xD5\x61\xD8\x7C\x57\xB5\xCC\xDD\x3D\xBF\x58\x7B\xEF\xF2\x2E\x9F\xAB\x1B\x59\xC1\x58\xF3\xB2\x86\x2F\x9B\x2E\x85\x45\xEB\x2D\x53\x6A\xE6\xB8\x74\x53\x38\x96\xE6\xE6\xF2\xB7\xF0\x63\xE6\x74\xA8\x40\xAD\x54\x80\xB5\x19\x8E\xB1\xC8\x3E\xD1\xDE\x86\xFF\xA7\x9D\xB4\xF8\xAA\x76\xD2\x72\xD3\x4E\x5A\x7D\x4D\x3B\x69\xB5\xC9\xFA\xE9\x75\xCC\xB3\xBC\x2A\x5D\x44\x20\x9B\x77\xD7\x70\x20\x7D\x03\x0B\x6D\xDA\x22\x77\xE4\xE4\x74\x0A\x86\xAC\x4C\x30\x64\x00\x6E\xDC\x49\xD1\x1A\x1D\xBA\x7C\x1E\xD9\x87\x74\x24\x66\x5A\x99\xAC\x49\xE9\xA7\x41\xAB\x9D\x93\xA6\xE9\x49\x87\xC7\x34\xDF\x26\xAE\x8E\xE4\xF9\x83\xD1\xB6\x10\x47\x0B\xF2\x7E\x27\xCF\x5F\x35\xF7\xFC\x29\x57\xF5\x36\x4B\xAE\x1A\xE4\xB4\x94\x73\xCF\x5F\xE9\xF4\xC1\xD2\xBA\xF2\x3E\xA5\x9C\x8E\x9E\x3F\xF0\x0C\x43\x3D\x8D\x83\x4D\x11\x1B\x69\xE3\x09\xAF\xBC\x9F\x51\x03\xAB\xBB\x8F\x98\x27\x62\x0E\x6E\x35\x4E\xF7\x75\x38\x24\x66\x72\x83\xBA\x03\xA9\x81\xA4\xAC\x38\xF5\xEB\xCD\x30\x77\x7A\xE6\xD1\x87\xA0\xD6\x92\x9F\x89\x9B\xA4\x3D\x73\x9E\xDE\x2E\xD9\x2A\xE2\xB8\xE5\x77\x6F\xB0\x86\xF5\xC3\x5E\x6E\xB3\x71\xD4\x75\xC8\x9E\x66\xFE\xC8\x7C\x1F\x0C\x1F\x6C\x87\x0E\x2F\x67\xFB\xA0\x4D\x1D\x53\x61\x1F\x5C\x72\x1F\x6C\xEE\x6A\x1F\x5C\xCE\xF6\xC1\x85\x24\x92\x05\xF9\x5E\xAE\x6E\x82\x2D\x08\xD2\xAA\xD1\x35\xDD\x9F\xC0\x36\x98\xE1\x09\x2E\xE2\xCB\x66\xA7\x89\x96\x5E\x6B\xD2\x8F\x84\xFD\x43\x16\xF3\xFE\x52\xCB\x9A\x74\x5A\x88\x98\x57\xAC\x74\x73\xD7\xE3\x25\x72\xB8\x07\x31\x0A\x5B\xC8\xA0\x18\x4E\xA3\x22\x64\x01\x0E\x59\x6F\x59\xC9\x30\x16\xFE\xD7\xE9\xF4\x65\x67\xA7\x2F\x3B\x1D\xC3\x98\xFF\x50\x46\x69\xDB\x33\x6F\xBF\xDB\x93\x5B\x36\x43\xD4\xD5\xCD\x9B\x94\x53\x82\xFF\x95\x8A\xD8\xF0\x45\xB0\xFD\xC3\x4F\x26\xFC\x95\x35\x7F\xDD\xEA\x7C\xE6\xE5\xD4\xEB\x51\x24\xDD\x60\xFC\xAB\xFA\xAA\x40\x12\xFE\xCC\x0B\xD0\x28\x24\xD5\x93\xC4\x32\x0C\x1B\x3D\xC0\x19\x53\xCB\x74\xB0\x86\xFF\x27\x09\x62\x36\x33\x7C\x9C\xF0\x1C\x41\x63\xE8\x42\x20\x70\xED\xD1\x60\xBC\xB9\x41\x06\x3E\xC4\x91\x6A\xFF\xC6\x37\x52\xF6\x67\xF5\xFD\x70\x0A\x79\xC2\x36\xE6\xDB\x31\xE5\xBF\xED\x38\x43\x8E\x3D\xB3\x3B\x91\x6B\x18\x41\x78\x1A\xB2\x08\x04\x13\x2A\xD3\x11\x4D\x9B\x79\xDB\x80\x4D\x1D\x79\x4B\x66\x83\xA4\xCD\x5F\x6D\x99\xB1\xA7\xBD\x1D\xBB\x1F\x99\xD0\x05\x84\x41\xD3\x0E\x92\x31\xC7\xB4\x3E\xC0\xEC\x74\x7F\x85\xF1\x5E\xF2\x7D\xC4\xB3\xA9\x48\xB4\xA7\x2F\x9A\xF6\x01\x72\x50\x53\x3B\xEA\x3D\x53\xC9\x9D\x76\xCB\x60\xD2\x3D\xB3\x83\x56\x84\xC6\x54\x0C\xA3\xD8\x21\xD2\x60\x27\xE0\x26\x88\x4E\x32\x07\x12\xB9\x64\x5C\xB6\x8E\x8B\xD3\x35\xA0\xC0\x8D\x32\x05\x78\x85\x4D\xE3\xF9\xA3\xB7\x22\x61\xD5\x5D\xCD\x24\x76\x82\x94\x14\xF2\xDB\xD4\xA2\xBF\xCD\xE5\xFA\xDB\x03\xFE\xDB\x03\xFE\xDB\xE4\xB9\xFF\xD1\x91\xB7\xFE\xF6\xF8\xFF\xD6\x8E\xBF\x47\x1C\xB3\x9C\x54\x12\xB1\xA8\xFF\x47\x9F\x5F\xE5\xCF\xF4\xD5\x31\xD3\x90\x8C\x60\xE3\x03\x35\xE2\xDA\x65\xA3\x9A\xFF\xFA\x54\x0D\xF9\x59\x35\x60\xE8\xF5\x43\xCA\x3A\xE5\x8B\x11\x11\xF5\x0F\xEB\x60\xB5\xE6\xA3\x23\xDC\x4E\x50\x02\xD7\x5D\x7E\xCD\x1B\xD4\xFD\xF6\xAF\xB2\x75\x9A\xAD\x33\xA1\x92\xEC\x9A\xCF\xBE\x1E\x95\x58\x54\xF2\x7B\xD7\x2A\xC9\x49\x1D\x74\xAA\x92\xE7\x8E\x7D\x7E\x4D\x0C\x30\xA9\x27\x3C\x7E\xBF\x4A\x8F\x86\xBE\x5F\x36\x6A\x7B\xED\xE1\xF4\x78\x81\xF7\x5D\x5A\x79\xA0\x3C\xF3\x81\x12\xA5\xFF\x60\x84\xE7\x01\x70\x4C\x56\x11\x3F\xE6\x0D\xC3\x4F\xEF\x75\xEA\xD1\x93\x07\xCC\x05\x7E\x7A\x05\x87\x39\x22\xFF\x09\xA2\x4C\xA6\x70\xF8\x94\x1A\x82\x61\xCF\xEB\x26\xE5\xFF\xFE\xF4\xE3\xCF\x4F\x3F\x7E\x36\xFD\xD8\xFC\x21\x00\x66\x85\xB6\x3C\xDD\x7D\x6E\x42\x6F\x7C\x4F\xFC\x65\xCF\x7C\x0B\xB0\x53\x14\xB9\x4E\xD6\xC0\x13\xF7\xCC\x53\xF1\xDB\xA7\x36\x7D\x7B\x25\x7E\x7B\xE5\xF4\xB7\xCD\xAF\x57\xBA\x3E\x7D\x96\xA0\x8D\x5E\xAD\x1E\x29\x64\x2D\xAB\x15\x13\x5D\x68\xE7\x66\x9C\x73\x6D\x82\xB6\xCB\x68\xF7\xCB\x57\x80\x37\x8F\xF5\xCA\xCE\xCC\x8C\x0C\xF9\xB4\xCD\x08\x5F\x36\xE7\x8C\x09\xE6\xB6\x30\xD1\xEC\x20\xEB\x27\xF4\x76\xC8\x04\x27\xE5\x7A\x84\xDD\xEF\x4D\x04\xE5\x1F\x2C\x08\x6F\xE0\x75\xD9\x3E\x66\x6A\x49\x06\x42\xBF\x31\xB1\xB8\x15\xE0\x83\x0A\xDF\x3B\xEB\xDF\x77\x0C\x49\x23\xC8\x28\xE2\x32\xEC\xB3\xC2\x87\xC2\xB6\x24\xB2\x1E\xA2\xDB\x36\x42\xC5\x23\x41\x1C\x05\x4E\xD8\x9F\xFD\xC2\x8C\xCB\xAA\xF0\xC9\x59\x5A\x40\x19\x0B\x25\x8E\x96\xB9\x22\x19\x47\x6A\x52\x86\x26\x1D\x32\x34\x08\x4D\x17\x3C\xBB\x32\x36\x09\xC1\x28\xA7\x5E\x84\xFC\x7D\xEC\xCE\xB3\xC6\x9A\xC4\x92\x96\x83\x00\x91\x9C\x3D\x5A\x28\xFC\x24\x93\x3F\xA9\xAD\x69\xEC\xDD\x44\xAC\xB8\xDE\x6D\x4E\x81\xA4\x82\x14\xD1\x21\xF3\x14\x43\x81\x4A\xD2\x16\x71\x68\xCF\x1A\x38\x7B\x97\x03\x87\x58\xA9\x8D\xBD\x89\x21\x7D\x7C\x1E\x73\xC4\xEC\xCB\xA7\x86\x4A\xE8\x4A\x4E\x8F\x0F\x06\x3B\x63\xB5\xC8\xA8\xAB\x48\xE5\x3B\xE4\x49\x12\xCC\xA1\xD4\xE6\xCA\x43\xF2\x64\x58\xD2\x5F\x8C\x97\x94\xF6\xB7\xE1\xBD\xAB\xFB\x3C\xA5\xC3\x46\x1A\xE1\x21\x17\x32\x26\xAB\x32\xA3\x90\x1D\x16\x5A\x44\x58\x5A\x34\x35\x28\x21\x20\x8A\x62\xEF\xA5\x6C\xAE\x3D\x03\x9A\xB2\x67\xFD\xAF\xDF\xBE\x7D\xBB\x3C\x16\x6C\x5A\x46\x3C\xD5\x68\x66\x76\x56\x33\x33\x36\x33\x5B\x69\xE6\xDA\x6B\x29\x86\x92\xBA\x84\x39\x24\xAC\x2D\xCE\xB7\x86\x7C\x43\x3F\xF1\x85\x15\x16\xA1\x4A\xE6\x28\xFC\x7C\x9E\x7C\x43\x3B\xC2\x37\xF4\xE9\xD5\x92\x3B\x89\x6F\xE8\x14\xA5\x11\xD3\x94\x44\xAC\xDA\x04\xF4\x3F\x51\x08\xA5\xEC\x9F\x2F\x98\x44\x29\x31\x0F\xC9\x22\xB5\x43\x42\x1D\x12\x62\xA2\xB1\x0F\xBB\x05\xC8\xB2\xB7\xFB\x0C\xA1\x6F\xFE\x64\x24\x7C\xDD\xE2\x20\x22\xC4\xDA\x71\x10\x7C\xA4\xCC\x2F\xAE\x01\xD6\xD3\x88\xED\x43\x8A\xE8\xCC\x9B\x6B\x61\xDB\x05\xB2\xFF\x01\xA2\xA0\x0D\x28\xA1\x63\x75\xB8\x3A\x31\x8D\xD7\xC4\x77\x43\xAA\xEE\xA7\xE6\x91\x3F\x36\x46\xFE\x38\x3B\x05\x08\x5D\xEB\x23\x38\x65\xE6\xBB\xF0\x02\xB5\x8B\xB6\x46\xA0\x55\x7D\xBA\x1E\x2D\xD1\x43\x3A\xD6\x52\x3F\xCE\x1B\x88\xEE\x1A\x77\x56\x4D\x7A\x89\x3B\x3D\x3E\x3D\x1C\x9A\x20\x4F\x3B\x38\x05\x9E\x41\xDF\x32\xAF\xAF\xFB\x93\x67\xBC\x3B\x76\xD9\xB5\xE6\xD5\x8C\x3C\x4D\xC2\xC6\xFD\xCB\x9F\x5F\x25\x04\xE5\xA7\x5F\xFA\xFC\x1A\x73\xF7\x4D\x73\x65\xA9\xFC\xB7\xFF\xD2\x5A\x61\x7E\xFC\x7D\xAB\x1F\x33\x83\xDA\xFF\xF9\x5F\x5A\x25\x3E\xA4\x5F\x5E\x68\x0F\x0F\x22\xD7\xC8\x27\xAA\x48\x90\xEE\x3F\xC9\x1F\x85\xAB\xDB\x48\x44\x24\xED\xAD\xCC\x57\xE3\x65\xF3\x89\x6A\x9B\x8E\xFF\x1F\xAE\x64\xFD\x16\x47\x41\xF9\xDC\x18\x90\x79\x96\x25\xEE\x56\x23\x79\xEB\xAE\x38\x1A\xAC\x37\x37\x86\xDC\xE5\x2E\x13\x08\x03\xED\xAC\xCF\x6E\x10\xCD\xE0\xEA\xEE\x60\x27\x0E\x78\x1B\x2B\xC8\x42\x91\x88\x61\x80\x45\x5A\x61\x31\x56\x6C\x56\xC5\x66\xB1\xD4\xD8\x6B\xC4\xC0\x60\xA6\xCA\x48\x98\x54\xB8\x32\xA1\x16\xC0\x7E\xF9\x64\x45\x03\xE6\x13\x15\x71\x47\x7F\xB8\x5A\x05\x2E\xD8\x6E\x5C\xBE\x0E\x45\xF0\xA1\xCA\x54\x27\xE6\x05\x2E\x83\x1F\xAE\x06\x9D\x06\x0C\x49\x18\xDE\x8D\xC8\xC6\x24\xC1\xB6\x49\xB8\x84\x9A\xD1\xCE\x27\x4C\x18\xB2\xFE\xFD\xE6\x0A\xD2\x5D\x7F\x6A\x36\xAB\xC8\x09\x7B\x4D\xBD\x57\x74\x77\x75\x31\x53\x43\xFD\xA8\x06\x48\x7D\x71\xFD\xE8\xE3\xDF\x36\x2C\x3E\xF0\xA8\xFA\x2F\x12\x30\xDF\xDA\xF3\xAE\x72\xE1\x11\xB7\xB8\xF5\x40\xC6\x60\x20\x57\xF3\x97\x30\x88\x60\x20\x5F\x79\x9D\x89\xD0\x21\xC5\xB3\x03\x86\xF2\x62\xA6\x1E\x3D\x71\xD6\xFF\xBE\xE3\x8F\x7F\xDB\x07\x86\xC5\x53\x68\xF1\x23\xC7\x6E\xF1\x87\x5E\xB9\x86\x3B\x44\x7F\x92\x60\xA4\xAA\xAD\x4C\x69\x5C\x37\x5C\xCC\x2A\x96\x5D\xFC\xA1\x57\x10\x93\x55\x22\x39\xFD\x62\xD6\x4D\x1F\xDF\xD4\xAC\x20\x7C\x7C\x7E\x56\x3A\x7C\xCC\x80\x4D\x73\x7D\x74\xC6\x45\x5C\xFE\x29\x4F\x1D\x39\x1C\x20\x41\xF0\xAF\x23\xC8\x1D\xD6\xF7\x4F\xF1\x47\x97\x41\xDA\x75\x98\xA6\x24\xEA\x41\x30\xC3\x20\x72\x47\x29\xE3\x20\x86\x2D\x62\xC3\x20\x0A\x3F\x99\x54\xB4\x32\x9C\xA5\x2B\xD3\x70\x12\x9D\x21\x0F\x5A\xEE\xF5\x3B\x17\x71\x19\x6E\x3E\xE7\xC5\x5A\x98\x0C\x32\x17\xFE\xA6\x41\xA1\xDB\x88\x86\x94\x6A\x37\xCC\xCC\xA3\x27\x2E\x4F\xE2\x50\x5C\x1F\xAA\xA3\x8F\x7F\x1B\x90\x08\x38\x51\x32\x3F\x15\x27\x27\xE7\xE4\x44\xC4\xA4\xF2\xEC\xC9\xC9\x37\x4F\x4E\xBE\x79\x72\xF2\x26\x21\x8D\xCA\x24\xA5\xC9\x01\xBA\xD9\x6D\xFD\x98\x5C\x0E\x43\x45\xBD\xBE\x49\xBA\x92\x40\x8B\x78\x41\x98\x87\xFC\x28\xC9\x58\xFE\xEF\xBD\x0F\x82\x75\x14\xD6\xA6\x75\x08\xED\xDB\x33\xAA\xFB\xF1\x09\xF8\x0B\x77\xD2\x39\x08\x21\x1B\x9A\xBA\x04\x4D\xFF\x44\x15\xBD\xCE\x3F\x5C\x09\x9A\x2E\xB5\x85\xFF\x18\x73\x9B\x3E\x5A\x85\xD1\xF8\xF6\x4C\x97\x27\xFC\x75\x30\xC2\xB6\x65\x58\x08\xAB\x9F\x87\xFD\x8C\x78\x9C\x8A\xDB\x5A\x26\x57\x4E\xF2\x4B\x35\x0E\x99\xB3\x11\xD0\x49\x40\xF2\xB2\xBD\x70\x96\xE5\x91\xE3\xE5\x2A\x1C\xDF\xC7\xCB\xE6\x65\xD2\xE6\x29\xFF\x97\x58\x7B\x2E\x44\x8F\x84\x22\x15\xDD\x6C\xA1\x9B\x3F\x56\xB9\x6C\x3B\xA2\x05\x99\xA3\xA1\x08\x6A\x19\x21\x1A\x49\xAB\x2A\xF9\x96\x67\xFB\xA0\x96\x2B\x57\x39\x9B\xD4\x72\x3E\x57\xCB\xF9\xC4\x6B\x97\xC7\x0A\x0C\x4C\xAF\x39\xCB\x1D\xB5\x57\xC9\x16\x95\x6C\x91\xA5\x81\xD6\x93\x69\x1D\x3A\x50\x60\x80\xAC\x33\xAE\x58\x55\xCB\x7F\xA9\x22\x9D\xC9\xC7\xC4\x0B\xF0\xD1\xEA\x94\xB5\x92\x01\xA0\xE5\xA3\xD5\xB8\xAE\x9E\xBF\xDF\x98\xEC\x44\xBF\x00\x86\x4D\x1C\xAD\x5E\x66\x3A\x75\x76\xA6\x84\x52\x5B\xD8\x49\x42\xC3\x16\x92\x24\xB4\xA0\x84\x16\x1B\x24\xD4\xAE\x48\x28\x0A\x16\xA7\x24\x34\x7D\xBC\x2A\xA1\x53\xE9\x28\xA1\x93\x8C\x26\xD9\x24\xBA\xB9\xCB\x1F\x13\x4B\x0B\xD3\x94\x21\x2D\x74\x97\x69\xFC\xD9\xD1\x90\x91\xE4\xFC\x1B\x10\x05\x78\x22\x2E\x95\x9C\x36\xC6\x6D\xFD\x0E\xD2\x97\x41\x7E\x82\xA4\x7E\x72\x31\x01\xA8\xBC\x56\xCD\x4F\x95\xF1\xA4\xAC\x53\xF8\xA7\x0E\x12\x16\xC1\x16\x3F\x31\xFD\xF8\x91\x6A\x7C\xF4\xE6\x1F\xBD\x79\xF3\x83\x0E\xBF\x4B\xE1\xF0\xED\x47\x2B\xA7\x1F\xBD\x19\xFF\x9C\x3C\x90\x09\xD5\x42\x37\x4F\xF1\xCF\xC8\xC3\x38\x59\x96\x4F\x61\xCB\x8E\x4C\x6D\xC1\xAA\x7E\x61\xDC\xCA\x94\xD1\x40\x0E\x2F\x2E\x29\x3B\x94\xAE\xF0\x39\x8C\xCA\xAA\xFB\x94\x5C\x1E\x15\x73\xCA\xE2\x57\xBF\xB0\x62\xEE\x54\xFE\x17\x14\x13\xF9\xBE\xA8\x1E\x47\xBA\x8E\x1D\x89\x5F\x85\xBB\x6A\xFF\x0B\xFC\xEB\xF1\xDD\xA1\x74\x95\xFF\xA2\xC2\x5F\x57\x77\x87\x8A\x43\x5D\xFA\x77\x8F\xAE\xF2\xEF\x1E\xF9\xB2\x26\xE6\xB8\x14\xE0\xAF\x25\x7F\xC6\x53\xA3\x10\x3D\xE4\xE9\x1C\x90\xBB\xE2\x19\xDF\x5D\x0D\xD6\xA2\xEE\x5E\x8B\xB0\x43\x38\x74\xE6\xCC\xE8\xBF\xA4\xAC\xCF\x8E\xF7\x25\xC5\x4A\x23\xD3\xEB\x53\x4C\xE7\xE7\x03\xC1\xB8\x4C\x3D\x0B\x3A\xC0\xFF\xE9\x37\x56\xC8\x98\x01\x5C\x7C\x7F\xAC\x42\xF8\xD7\xB1\x8B\x94\xFE\x36\xB0\xF2\xD3\xBE\xF9\xE5\x30\xFB\xAE\xEC\x8B\xF0\x45\x1E\x2F\x0F\xB3\x98\xDB\x48\x8B\x15\xA9\x51\x59\x32\x9D\xB3\xB9\xDD\xBA\x70\x59\x3C\x61\x31\x9F\x7D\x1C\x8A\x15\x83\x17\x04\x8B\x8B\xA5\x76\x79\x5F\x85\x17\x05\xEB\xBF\x48\x75\x15\xB3\xBA\xDC\x02\x19\x5D\xE5\xD4\x50\x41\xA2\xE0\x19\xA9\x09\xBB\x62\x41\xD0\x2E\xB8\x2B\xD3\x8B\x37\xBD\xF6\x1B\xC8\x7A\x32\x05\xFA\x17\x2B\xA6\xFA\x99\xD5\x6B\x54\x8F\xCA\x5D\x0D\xAB\x61\x25\x59\x60\xAA\x83\x04\x09\x4E\x64\xAD\x71\x05\x58\x10\x61\x58\xA7\xA9\x15\xB0\x8A\x62\x38\x35\xF8\x9B\x86\x9E\x1D\xFA\x77\x18\xFE\x6C\xCA\x59\xF9\x0A\xE3\x70\x6A\xBC\x71\x0B\x3F\x9F\x1D\xCB\xE1\xCD\xBE\xE2\x3C\xA4\x83\xF1\x6F\xFA\xE0\x23\xA5\x63\x75\xEC\xD7\xC7\x1D\x14\x28\x64\x6D\x85\x6E\x11\x44\x14\xA7\xFC\x1B\x6A\x44\x88\x40\xF7\x37\x44\x45\x68\x58\xE8\x05\xFD\x16\x9F\x10\x74\xA0\x64\xDD\xB3\xC0\x47\x53\x81\x8F\xAD\x17\x70\xDA\xBF\x56\xC5\x43\xF1\xCF\xBF\x69\x82\xA9\xFC\xA0\x5E\xB9\xDB\x77\xEA\x92\xCA\x22\x17\x94\x10\xFA\x3E\xB7\xE6\x9B\xD3\xA7\x7D\x73\xBF\xAE\xC6\x84\x61\x0B\xFF\xDC\xF3\x6E\xFE\xF5\x4B\x7A\xFE\xDD\x2D\xC1\xD1\x7F\x1F\x15\xEE\x7B\xE2\x8E\xA8\xE3\x33\x83\xBD\x6C\x9E\x66\xD9\xF7\x20\x87\x2E\x1B\x2F\x9B\xF7\x89\x33\xE0\xA9\x95\x9A\x7F\x7A\xFD\xC5\xEF\x14\x96\x4A\xC3\x76\x66\xD7\x07\x2B\xC5\x73\x29\x3E\xF9\x11\xF3\xCB\xE6\x5D\xAC\xF4\xC1\x15\x77\xE2\x6B\x6A\xEE\x4F\x7C\x9B\xB3\xB3\xEF\x1E\x9C\x57\x00\x2A\x63\x54\x70\x3F\xFB\xB2\x87\xAB\xF1\x9F\x56\x23\x4A\x3A\xED\xF7\x46\x8E\x8E\xD3\xFE\xB3\x6A\xEC\xFE\x56\xC4\x3B\x79\xAA\x2F\x8C\x64\x02\xBE\x4B\x7C\x77\xA5\x57\x11\xF1\x14\x77\xF3\x7B\xE6\xDD\x82\x35\x89\xA0\x85\x72\x62\xEF\xA8\x5C\x99\x4C\x96\x06\x36\xCA\x8C\xD6\xC3\x88\x1B\xED\xDD\x74\x6B\x00\xCA\xF8\x9D\x30\x7C\xE3\xDD\x4A\x0D\x43\x38\x9B\x63\xB7\xFF\xE8\x4D\xD8\x8F\xE7\x5D\xED\x6A\x57\xC0\xE8\x31\xF0\xAB\x76\x7F\x5B\xE4\xCF\xF8\xE7\x46\x57\x73\x91\x64\xD7\xFD\x73\xA3\x00\xCA\xD4\xCE\xC4\x13\xF6\x9E\x01\xD3\x0B\x7E\xAA\x98\xC6\x9F\xDD\x71\x34\xFE\x0E\x97\x45\xA8\x1A\x1E\xC9\x99\xAF\xD0\x71\x3C\xCF\xCF\x1C\x37\x61\xD4\xA2\xD9\xE6\x15\xC0\x15\xB3\xC9\x7C\x83\x5C\x6A\x18\x1E\xC3\x62\x12\xA3\xF3\x90\x9F\xC5\x38\x64\xA1\x4E\xEB\x95\x5B\xD0\xC8\xE8\x17\x9C\xBF\x0B\x6E\xD1\x2F\x44\xB4\x3A\xBE\xB4\x8A\xDE\xE8\x06\x41\xBD\x71\xA8\x73\x64\x32\x66\xD7\x87\x66\x8A\x1B\x69\x19\x83\xC1\xF0\x91\x66\x14\x9E\xDD\x1C\x36\x32\xC0\x99\xC2\xFC\xF6\x59\xF8\x8D\xE4\xC8\xDE\x12\x94\x9E\x7C\x23\xCE\x1C\xF9\xEC\x86\xB8\x20\x08\xF0\xCD\xD1\xAA\x56\x07\xEA\x27\x12\x36\xCF\x80\xC1\x12\x37\x5C\x75\x4D\xB2\xB6\x51\x7E\xCF\x9C\x77\xB1\x6A\x42\x92\xEB\x3D\x53\x85\x17\x74\x7F\x57\xC2\xC0\x60\x99\x87\x26\x61\x30\xC3\x34\x1F\x2C\xE9\x89\x23\xA1\x51\xD5\x1B\xBA\xD6\xE8\x64\x84\x00\x88\x51\xBA\xFA\xB5\x10\x92\xE3\xD1\xC1\x86\x01\x54\x7E\x2F\x5D\x3A\xDC\x9F\x7E\x7A\x30\xFD\xF4\x5A\xBA\x88\x70\x99\x5B\x1C\x79\x73\xE3\xB2\xB9\x37\x32\x45\x44\x93\x1D\x34\x74\xC1\x4C\x07\x6C\x83\x99\xE1\x40\x87\xD9\x0E\xE2\xB1\xF4\xAA\xDF\x92\xE5\xD1\x72\xF8\x08\xF1\x90\x3D\x12\xB4\x69\x04\xDF\x59\xF8\xF2\x78\x68\x98\xA5\x67\xDC\x02\x4E\x78\xB7\xF0\xE6\x59\xFF\x6F\x6F\xDF\xBE\x9D\xC1\x11\x2A\xB7\x62\xAE\x11\xD4\x3E\x98\xAC\x7B\x61\x84\xB2\x09\x6F\x67\x15\x6A\x0A\x58\x4F\x4E\xF9\xD7\xD5\x28\x91\x31\xA1\x5F\x04\xF4\x56\xFE\x6D\xA3\xC4\xEF\xF8\x07\xF1\x53\xEE\x94\xFF\xA6\x51\x72\x81\xFD\xFD\x82\x21\xAA\xFC\x5B\xF1\xD3\x12\x43\x16\x7E\xDA\x22\xB2\x90\x81\x4D\xBC\x70\x8D\x5B\xBA\xAD\x91\x7F\x5C\x58\x51\xFE\xAF\xFC\xD2\xDC\xB1\x82\x79\x88\x06\x67\x8B\x1B\x7C\xB3\xBF\xD4\xFE\x1F\x4F\xA5\x04\x22\x12\x93\xB3\x67\x6C\x8A\x0E\x22\x0E\x0C\x3F\xAA\x84\xAA\x02\xA1\x46\x55\xE4\x89\xA1\x13\xD7\x1E\x0D\x26\x9C\xAF\x32\xE6\x37\x4C\xFA\x34\x72\xD2\xD2\xED\x9D\xD3\xE5\xF4\x14\xA7\x10\xCC\x45\x30\x10\x66\x37\xFB\x9D\x78\xD0\x29\x39\x15\x22\x1A\xFC\xA7\xD5\x08\xCA\x6E\x76\x79\xD2\x44\xB3\x5B\x20\x4B\xAA\xCC\x93\x77\x2C\x8D\x33\x5E\x4F\x5A\x3C\xE3\x65\x53\x4E\x2F\xD7\x95\xC4\x89\x34\x7B\xA5\xD7\xD7\x99\xD6\x7A\x85\x4C\x22\x3F\x99\xE8\xF9\xFD\x6D\xED\xAC\x1C\x32\x42\x81\x7B\x07\x72\x47\xB3\xB0\xA5\xFA\x41\xD2\x04\x56\x2B\x01\x3B\xAE\x8F\xCE\xF0\xEE\xD3\x8E\xBD\xA2\x43\x7D\x04\x35\x60\xC4\xBE\xAA\xE4\x3E\x81\xEC\x3F\x66\x65\x4F\x3E\x3F\xFB\xEE\xC2\xFC\xBB\xE2\x94\x7B\x1C\x9F\xBA\x72\xCD\x15\xDE\x44\x02\xA1\x48\x43\x9A\xF8\xFC\x63\x22\x95\xDE\x33\x6F\x1D\x14\x4E\xDC\xCE\xCE\xA8\xE7\xFC\x75\x02\xC6\x73\x23\x50\x41\x81\x63\xD4\x93\x17\xDD\xF4\x38\xB3\x92\x30\x7E\xCF\xEC\x91\xDF\xC8\x5E\x36\x6F\x5D\xEB\xC4\x83\x83\xDA\xE8\xCC\x7F\xB0\x17\x37\x95\xDE\x33\x57\xE8\xCC\x7F\xFB\x60\x36\x39\xF3\xDF\x8E\xBA\x83\x92\x7D\xFB\x69\x77\xFD\x9E\x79\x5B\xD4\xFB\x6F\x5B\xFF\x36\x2A\xFE\xCD\x0D\x78\x6A\xD6\x80\x77\xB3\x01\xEF\xDA\xDC\x80\x77\xA5\x06\xBC\x6B\x53\x03\xDE\x19\x1B\xF0\xCE\xCD\x0D\x78\xCF\x6C\x1A\x9F\x5E\xFB\xEE\xB9\x33\x1A\xF7\xDC\xAC\x71\x2F\x69\xB6\xEE\x96\xDE\xDC\xBC\x5B\x3A\xB5\xEF\x96\xDE\xD4\xC0\xE7\x63\x03\x9F\x3F\x75\xD9\xF1\x41\x7D\x2A\xFC\xEE\xB3\x3B\xBA\x9D\xE1\x8C\x14\x31\x7F\x5D\x42\x0C\x82\xC6\x2C\x43\x75\x3E\x6C\xE7\xED\xD8\x7D\xF1\x03\xD3\xFA\x6B\x87\x8A\xDD\xF9\xDB\x6F\xAC\xFA\xDD\xC3\x57\xF5\xBE\x30\xDC\x81\x58\x14\x60\x33\x2E\x9C\x34\xFD\xDF\x7B\x63\xA5\x3F\x6D\x2F\x89\x43\xF3\x07\x5C\x25\x3C\xC9\x36\x41\x9B\x0F\x80\x7D\x53\x4F\x02\x5B\xA9\x94\x8B\x2F\x57\xA5\x4C\xF8\xBE\x9A\x1A\x1D\x1A\xEC\xAA\x79\x83\x53\xC0\xC9\xC6\x16\xA7\x36\x4F\x54\x10\xBF\x79\x2D\xAF\xBE\x42\xCB\x49\xC1\xA3\x22\xFD\x08\xA3\xA3\x24\x13\x3F\x99\x3C\x39\x22\xFF\x5D\xFD\xCD\x78\x49\xC5\xF2\x75\xB0\x21\xAA\x04\x50\x5E\x1F\x0D\x25\x63\x61\x4A\x3A\xBA\x90\x6C\x40\x47\x97\xBE\xBA\x3B\x99\x90\x74\xBE\xCF\xCC\x23\xD2\x36\xC4\x6A\x4B\xB7\x18\x89\xAA\x5F\xC6\x17\x70\x08\xA2\xFA\xC9\x85\x09\xC1\x0E\xD5\xC1\x32\x6B\x80\x6E\x58\xF9\xE7\xA5\x28\xA9\x90\x33\xE2\xA5\x6D\x9E\x0C\xBB\x36\x19\x96\x89\x10\x9B\x67\xC2\x4E\x33\xB1\x96\xE8\x58\x4B\x90\x74\xC7\x49\xB1\x6B\x93\x62\x67\x93\x62\xE6\x93\x12\x8C\xBF\x4A\xEE\x7A\x27\x7F\x0D\x2F\x5E\x4A\xE2\x86\x94\xC7\x5B\xA5\x52\x5A\x6B\x53\xE3\x5E\xAD\xDE\x74\x81\x5C\x7B\xFB\x6C\x5F\x83\x27\x74\x02\xA5\x21\x2B\x5B\x23\xFC\x6C\x9D\xAB\x1F\x5B\xEA\x66\xA5\x43\xB1\x80\x6E\xA4\xC4\x37\xB3\xAC\x75\x75\x02\x3C\x87\xF1\x12\x1A\xDF\xB8\x36\x4A\x54\x8D\xC6\x17\x70\xA0\xC2\x32\x39\xF2\xED\x7B\xB1\xCF\x78\x20\x8E\xAF\x2C\x5F\x60\x76\x93\x7A\x05\xD5\xF0\xDA\xBD\x89\x54\x44\x0D\x82\xA3\x19\x3E\xD8\xFD\xEF\xB7\x56\xAE\xCE\xFE\xFD\x76\xC0\xFF\x2F\xAB\x35\x4D\x0F\xF5\x78\xA8\xED\x9B\x86\x26\x70\x7A\xD2\xFA\xF6\xE9\x71\x08\xD2\xDE\x2F\x39\xF1\xEC\x5C\x1D\x16\xC5\x16\x5F\x52\x11\xDD\xBB\x41\xAC\xA0\xD0\x0E\xD5\xC2\x4B\xB1\xED\x8D\x60\x8F\x37\x71\x09\xD0\x6F\xD1\xB8\x2D\xD7\x1E\x0D\x5B\x5E\x13\x7B\xBC\xB9\x1A\x17\xCE\xD6\x7C\xE1\x6C\x31\xA9\x01\xF9\x66\xB1\x82\x65\x28\x12\x4F\x5B\x4D\x83\x24\xF6\xFE\x9C\x6B\x5C\x87\xD0\xF1\x8E\x0D\x6A\x50\x6A\xEC\xB7\x5C\x17\x5A\xDF\x4D\x70\xE3\x8D\x6B\x67\x70\xE3\x4B\xD7\x85\xE2\x35\x49\x94\xCE\x11\xE1\x7F\x85\x8A\x02\x90\x68\x27\x8F\x0D\xCD\xDA\x7A\xE2\x64\xD4\xCB\x60\x22\x83\x01\x08\x42\x39\x03\x8F\x8A\x9A\x3A\x98\x53\x8B\xB0\xBA\xD6\x26\x33\x16\xD1\x44\xDD\x5C\x34\x6E\xB1\x49\xC7\x2D\x66\x3A\x6E\x31\xD3\x71\x25\x39\x87\x4E\xEB\x38\x6F\x9F\x9D\x8B\x64\x5B\x34\x73\xBD\x71\xB8\xAC\xDA\x46\x80\x3C\x40\xD3\xDA\xBA\xC5\xFA\xC2\x37\xEB\x2A\x18\xC2\xB6\x48\xC2\xB6\x38\x2D\x6C\xF5\x24\x6C\x8B\x3B\xAD\x96\xC5\xA6\xD5\x52\x63\xB5\x20\x51\x88\x4B\x02\xB1\xB0\x5C\x38\x75\x5C\x38\x35\x17\x4E\x43\x80\xC4\xD3\xAA\xCD\x4C\xAA\x0D\x02\x83\x1E\xC2\x3C\x6C\xBE\x86\x1E\x36\xB3\x1E\x36\x2B\x3D\x3C\xBD\x9C\xE2\x43\x7D\x13\xEB\xE7\x72\x4A\x4F\xCE\x96\x53\x3B\x2D\xA7\x05\x97\xD3\xF2\x2A\x90\x96\xB0\x74\x9A\xB0\x34\xCC\x8D\x61\x9B\x40\xEB\xC2\x29\xCA\x13\xC7\x22\x2C\xB6\xA3\x61\x19\x96\x4E\xE7\x3A\xB7\x48\x4B\x67\x39\x5F\x3A\x4B\x8C\xC4\x22\x8C\xC4\x32\x56\xB0\x08\x45\xE2\xD2\x59\xC8\xD2\xE9\xDC\xC2\x6D\x63\xE9\x6C\x73\xE9\xB0\xD4\xD8\x57\xA4\xF4\x5A\x9A\x47\xDC\x16\x97\x4E\x38\x18\x6D\xA5\xA5\xD3\x12\x9B\xBF\x66\xC6\x50\x5C\x77\x41\x9E\x9B\x29\xBD\x61\x21\xE4\x2E\x6E\x41\xFD\xD0\x60\x6B\xEB\x2B\xA2\xE5\x14\xC7\x74\x2B\xD6\xF1\x88\xBB\xAA\xE6\xB1\xA1\x2D\x84\x38\x7D\xA8\xB8\x17\x37\x4F\xF2\xA2\xF2\xB2\x50\x6D\xDB\xA8\x4E\x2B\xD9\xC6\x8F\x86\x36\x8C\x1D\xA8\x15\xAA\x55\xB5\x53\x05\xDD\x74\x14\x4E\xA5\x37\x86\x2D\xB7\xE5\xAA\x34\x76\xCD\x7C\xEC\x9A\x89\x0F\x67\xD2\x5B\x60\x61\x99\xF3\xE1\x60\x94\x86\x25\x5B\x52\x91\xA4\xA5\x0F\x83\x67\x49\xEC\xD3\x46\xEA\x9B\xDA\xB5\x69\xD0\xD6\x79\x73\x30\x3A\xF3\x3D\x5E\x56\x75\x1D\xF5\x49\xAB\x9A\x29\x90\x6B\x8A\x1F\x70\xC5\xC4\xEF\x27\x40\xB2\xD0\x02\x57\x9A\x1F\xDC\xCA\xED\x89\x7E\xC1\x9C\xE8\x17\xF4\x09\x52\x60\xDE\x07\xBE\x4F\xFF\xDC\x18\xF1\xFE\xD7\x76\x73\xED\x54\x38\xF9\x67\x37\x86\x62\xAB\xA8\x6B\xA5\xAD\x41\xD8\xCF\x73\xE1\xBC\x0D\xD9\x56\x42\xB6\xBE\xBF\xAC\x80\xC4\xD1\x97\x4E\xB9\x5C\x08\xB5\x72\x57\x3E\x60\x48\x71\x50\x9D\x7A\xA2\x2F\x58\x51\xE8\xF2\xFC\xE3\x9A\x01\x07\xA1\xEB\xA8\xB2\x42\x95\x15\xAA\xAC\x5C\x85\x2A\xDD\xE6\xB7\xB3\xA8\x92\xA2\x2A\xBD\xFD\x22\x79\xF6\x39\x9A\xB1\x2E\x25\x75\xB1\x40\x05\x48\xB0\x92\xCF\x2D\xF8\x6D\xBE\xFA\x8A\x12\xFD\xEA\xCB\x30\x37\xA7\x6A\x60\x17\x6B\x44\x82\x0E\x15\xC2\x40\x87\xDC\x99\xEE\xEF\x09\xE4\x4F\xE8\x69\xB1\x5E\x63\x31\xD5\x58\x48\x8D\x45\xEC\x5F\xB5\x5E\xB4\x62\xD1\x6A\x6D\x74\x21\x45\x6B\x45\xD5\x45\x24\xCC\x61\x28\x2A\xF0\x68\xB2\x67\xF5\xFA\xA8\x15\x67\x8F\x1A\x9C\xF2\xBF\xA6\x27\x6A\x89\x9F\x4A\x2C\x14\x85\x37\xEF\xC5\x25\x6B\x25\x17\x06\xFF\xF3\xEF\xBD\x42\x5C\x50\x57\x78\xFB\xDE\x78\xC7\xE6\xC2\x6C\xE5\xB3\x61\xE8\x1B\xDE\xFF\xCE\x5B\xA0\xA7\x21\xB0\xD2\x2F\x8B\x16\x08\x45\xD5\xAC\xF0\x92\x04\x43\x7D\x13\xF4\xC2\x19\xA3\x81\x95\x5C\x9C\x7A\xCA\xC8\x53\x05\x9F\x2A\xD6\x26\x90\xD4\x75\xA7\x9E\xCA\xE4\xA9\x24\xD7\x6B\x93\x44\x7A\x43\x2A\x98\x55\xD1\x06\xB9\xD7\xAA\xB0\x47\x37\x4C\xD8\x84\x5C\xB1\xBF\x34\x53\x17\x56\x3B\xEE\x75\x7F\xAA\xC2\x50\x3C\xB5\x62\xB5\xED\xBC\xE9\x15\x01\x2F\x28\xE0\xA7\x2B\x24\x8D\xC3\xEA\x93\x78\x91\x5D\x17\x34\x7D\x56\xBB\x1A\x6F\xFA\x26\xDE\x65\xAF\x3D\xA4\x9C\xE5\xCD\x8B\x5C\x24\x05\x1D\xC0\x3C\xCA\x0A\x14\xA0\xCE\xAE\x0C\x6E\xEE\x0A\x67\x0F\xC2\x9A\x05\xBF\xBF\x3C\x51\x50\xAD\xAC\x55\x7C\x86\x78\x00\x66\xD4\x15\x61\xD1\x15\x87\x4B\xD5\xD6\x4D\xB3\x2E\x2F\x48\x01\x78\x6E\x1C\xEA\x3E\xC7\xC0\xCC\x26\xB2\xC6\x73\xEB\xB3\x19\x64\xAB\x89\x0B\xB9\x78\x72\xA9\xB3\x70\xC8\x78\x6E\x24\xD7\x00\x55\xD8\x9A\x44\x36\x44\x3F\xAF\xB3\x47\xD8\xFC\xE2\xD4\xC8\x60\x21\xB9\x22\xD8\x0F\x79\x5F\x07\x6B\xF0\xAC\x35\x2F\x0B\xE6\x60\xB7\xAF\xA2\xA0\x93\x21\x13\x20\x4D\x61\xA8\xA0\x46\x92\x52\x01\x04\x41\x38\x79\x1E\x2C\x37\x68\x01\xE5\x6A\xBE\xA7\x5E\x9B\xF6\xD8\x51\x81\x93\x0E\xDB\xB4\xE9\x7E\xFA\x56\x84\x0B\xAB\xD9\x4F\x1D\x3F\x44\x4D\xBD\x0E\x23\x58\xB4\x79\x10\x82\xCA\x15\x4B\x1E\xC0\x9C\x3A\x02\x5B\xEB\x13\xA9\x1E\x5C\x48\xD0\x7A\x20\xA4\x14\x39\x55\xD3\x17\xBD\x6E\x6D\xC3\xD9\x37\x42\x68\x5E\xC4\x11\x56\x71\x8C\xD7\x06\x90\x43\x5C\xA0\x44\xC5\x1D\x63\x75\x0E\xA8\x2A\xAE\x2E\x95\xB7\x3D\x68\x73\xCF\xD0\xD3\x18\xCE\x1C\x78\xDE\xC2\xE5\x53\x6D\x65\x0A\x04\x69\x39\xE9\x8A\x9B\xE6\xF5\xAD\x15\x78\xF6\x32\x82\x59\xD1\x1F\x50\xF0\x7A\xBE\x12\xF8\xD5\x18\xD7\x15\x3E\x95\x59\xD0\x2C\x16\x8C\x0F\xB8\x0D\xE4\x62\x5E\x0E\xF5\x35\xCD\x22\x29\x54\x4B\xA1\xA0\xB1\xE0\x62\x40\x90\xB5\x38\x12\x49\xAA\x80\xE8\x71\x9C\xD1\x90\xE5\x52\x87\x13\x3D\x8A\xD6\x30\x05\x78\xC9\x13\xCE\x20\x8B\x4D\x86\x2F\x10\xA4\xAA\xC9\xF0\xCD\x67\x86\x6F\xBD\x6E\xF8\x82\x58\x5E\x00\x02\x2B\x61\xB2\x97\x73\x64\xBE\x72\x8E\xE4\xA1\x81\xB6\x56\xE4\x9F\x93\x83\x70\x1E\x4E\xBD\xE9\x14\x1F\x7A\xCA\x71\xDE\xD8\x3A\xB3\xD6\x3A\x73\xA7\xD6\x19\xB6\x8E\x2D\x3B\xEB\x84\x3B\x99\xE4\xD5\x64\x92\xD7\x09\x7C\x16\x2E\x05\x06\x56\x57\x73\x97\x82\x31\xC6\xE8\xB3\x5D\x0A\x55\xF2\x25\xC8\x74\x60\xBA\x87\x56\x0E\x5E\xD3\x5C\xCA\x74\xD7\x14\x12\x58\xE2\x46\xC8\x17\xEB\x68\x2C\x72\x1F\xA8\x5D\x1B\x2C\xF5\x36\x58\x9B\xDB\x6E\xDB\xD5\xC9\xDA\x6C\xE7\xD6\x66\x3B\xD1\x94\xB4\xB1\x82\x2A\x14\x49\x57\x8A\xE1\x6D\x38\xCE\x0E\x5B\x6C\x49\x8D\xAF\x83\xB5\xB9\x25\x87\x89\x48\xB4\x58\xBB\x66\x46\xB4\x58\xD0\xDA\x2C\x48\xA6\x55\x90\xA5\x91\x57\x6A\xD1\xDA\xAC\xE6\xD6\x66\x68\x44\x33\x97\xC9\xE8\x61\x0F\xB2\x20\xD2\xD2\x46\x17\x13\x07\xF7\x2C\xA9\x04\xF3\x6E\x31\xCD\x7B\x76\xA7\x79\x0F\x85\x7B\x20\x21\x16\x02\x1D\x21\x33\x9F\xC5\x99\x1F\x94\xCC\x7D\x26\x73\x5F\xCC\xE6\xFE\x50\xBC\xF0\x6B\x0D\x99\xB7\xA1\xF8\xEA\xDA\x71\x46\x4B\xEE\xD4\x88\x53\x67\xC2\xE9\x09\x21\x22\x06\x92\x29\xC6\x76\xF5\x40\x08\xBA\xAD\x82\x94\x58\x71\x29\xED\x0B\xE1\x1E\x85\xEC\x0C\x6F\x1E\x56\x3E\xB1\x7B\xDB\xB4\xF2\xE5\x50\xDF\x6C\x5C\xF9\x82\x62\xBC\x48\x2B\xBF\x49\x2B\x7F\x22\x99\x4C\x2B\x7F\x41\x31\x83\x4B\x28\xAD\x2F\xFA\xA0\xA6\xD3\xA5\xA1\xF3\x02\x27\xD3\xCD\x0B\x9A\xA3\xBD\x61\x49\x2B\x71\x70\x4C\x1E\xAB\xC5\xAA\xCB\xAD\x94\x8C\xBF\xE8\x69\x2B\xE7\xDE\x5B\x2D\x99\x0F\x12\xE5\xF8\x1F\xDE\x70\xCD\x5B\xF0\x1F\xEC\xD0\x35\x0D\xB7\x31\x9C\x9E\xD9\x97\x98\x3E\xD2\x80\xE0\x32\x71\x4D\x16\xEB\xC1\x91\x7F\xF9\x1E\x5D\x9F\xAC\x39\xD1\x8B\x95\x4D\x13\x26\x77\x2E\xDB\xA6\x90\x7E\xE7\x7C\x77\x19\x49\x6C\xD3\xB6\x29\xBE\x70\x29\x54\x4A\x21\x78\xC3\xA1\xBD\x66\xA1\xF4\x33\x87\x56\x29\xDB\x66\x78\xE2\x79\x56\xCB\x13\x34\x42\xCB\x85\x80\xE1\xE4\x31\x6C\x41\x9B\xDD\xE1\x33\x9A\x28\x4B\x72\x49\xE8\xEE\x72\xA3\x3B\x7C\x52\x52\x70\x7F\x97\xC9\xFD\x1D\x3D\xDD\x43\x9D\xDC\xDF\x45\xDA\xB3\x5C\x09\x5C\x6F\xEB\x95\x2B\xA7\x8D\xB3\x8C\x1B\xE7\xC6\xB6\x99\xB5\xB6\x99\x3B\xB5\xCD\x88\x7A\x29\x92\xA8\x94\x49\x54\x78\xE1\x20\x6D\x33\xB3\xB6\x89\xBC\x94\x51\x7D\x66\x84\xB0\x06\xD6\x11\xA8\x89\x8A\xE8\x7B\xA3\x05\x1E\x0D\x1D\x52\x4B\x62\x67\xCB\xC4\xCF\x52\xC4\x0B\x6A\x49\x87\x0B\xFB\xD0\xD1\x50\x63\x67\x74\x5B\x20\x07\x67\x00\x70\x3D\x0F\x00\x9E\x13\x74\xC5\x0A\x18\xFB\x32\xDF\xF9\x96\xD8\xCB\x97\x6C\x8B\x84\xC6\xF4\x5B\x6E\x09\x7B\x71\xF2\xB3\x84\x9D\xAF\x9D\xD1\x48\x6E\x11\xA8\xA6\x60\x92\xC9\xA9\x9D\x0F\xBD\xC5\x90\xE1\xF4\x00\x53\x2B\x25\x94\xDE\xC5\x9C\x54\x69\x4E\x88\x4D\x7B\xE6\x9C\x4C\x9B\xC9\xDA\xBC\x70\x4A\xAA\xD9\x94\x54\x69\x4A\xCE\x70\x30\x8E\x74\xF8\xC6\xD9\x66\xAB\x87\xD2\x57\xC7\xD3\xD3\xD6\xDB\xEB\xD7\x5C\xE9\xCB\xE3\x6B\x44\x76\x35\x2B\xC1\xB1\xE5\x74\xD9\x52\x04\xCB\xA8\xD2\x2A\xCB\xB2\xDC\xE4\xBA\xF9\x77\x10\x82\xEA\x68\x58\x04\x21\x68\x56\x84\xA0\x4E\x42\x50\x89\x10\xB4\xAE\xFD\x6A\x84\xA0\x39\x25\x04\xD8\x30\x87\x66\x55\x08\x5A\xD7\x44\x21\x58\x44\x21\xA8\xDC\xE2\x34\x03\xEF\x5C\x08\x56\xCC\xEF\x35\x21\x60\xAF\x18\xD9\x8E\xE8\x13\x2B\x63\x8C\x11\xF6\xFA\xFA\x35\x26\xDD\x51\xC3\x50\x17\x6C\x32\x26\x71\x31\x55\xC5\x4D\x27\x8C\x27\x5C\x97\x0B\x99\xE6\x49\x0D\xA6\xD1\x8C\xAE\xCB\x06\xA3\x35\x8D\xA6\x7D\x72\x29\x46\x69\xD8\x23\xC2\x68\xEB\x1B\xC3\xD2\x2D\xD3\x68\x66\x6E\x31\x37\x26\xB9\xBD\xE1\x94\xBE\x48\x15\xC0\x33\x2D\xA3\x09\x72\x2F\xEC\x53\x43\xCB\x96\x14\x74\x5C\xF7\x15\x69\x69\x17\xE4\x60\xB8\x42\x25\xE1\x9A\x99\xEB\xB2\xA2\x3A\x5E\xD0\x75\x59\x9C\xE1\xBA\xC4\x70\xDA\x67\x71\xC1\xF8\xF5\x93\xA9\xFC\x6B\x95\xA9\xF2\xB7\x42\xA6\x5C\x29\x67\xD3\x12\xD1\x4E\xCF\xF6\x25\xBF\xAE\x56\xEF\xFC\x92\x7A\x18\xA0\xDB\x37\xDD\xFA\x15\xFB\x4B\x1D\xF7\x80\xB0\x03\xAC\xA8\xF7\x3B\x28\xF7\xCD\x9A\xA4\x9C\x68\xCD\xA3\x32\xC1\xA1\xBB\x44\x43\xA3\x46\xA9\x37\x6A\x94\xAF\xA3\xA2\xB0\x5F\xEB\xA4\x16\xBF\x95\x93\x3A\xBF\x57\x2B\x0F\xCF\x22\x1B\x9E\x7C\xF6\x9F\xB6\x59\x09\xAF\xFC\x2C\x63\x22\xD1\xCF\xF8\x4F\x56\xE3\x90\x33\xFB\xC1\x22\xFB\xB1\x88\xBF\x7C\xAC\x1A\x87\x52\x7E\x09\xFF\xBC\x5C\x31\x41\xE7\x02\x23\xCA\x98\x0B\x51\xB9\x2C\x72\x7A\xC0\x44\x20\x6F\x1D\xEC\x41\x8C\xC8\x62\xCA\x6F\x90\x09\xAC\x98\x36\x60\x2F\x66\x9F\xAC\x70\x36\x7A\x34\x1C\x54\xA4\x09\xF2\xF2\xD5\xD7\x82\xE0\xEC\xB2\xF9\x48\x78\x7F\x33\xCB\xCC\xB8\xF5\x40\xF6\xC9\x0A\x6D\xA8\x69\xD8\xA2\x79\x0C\x85\xEC\x6B\x10\x70\x5C\x00\x90\x57\xF8\xE9\x3C\x50\xCA\x2C\xF8\x01\x83\x79\x1A\x7E\x7A\xB9\x1A\x05\x85\xC0\xD5\x29\xFF\x61\xC1\x8B\xA2\xC5\x25\x45\x80\x90\x79\x56\xC6\xEA\x97\x6E\x71\x56\xF2\x06\x69\x3A\x6A\x57\x5D\x52\x7A\x68\xBB\x9F\x94\x48\xDA\x33\xCB\xC3\x9C\x60\xB2\x47\x2D\xC9\x1E\xD5\x94\xE9\x51\xFB\x5F\xE0\x5F\x8F\xEF\x0E\xE1\x08\xFE\x45\x85\xBF\x82\xDA\x7D\x4C\xA8\xFC\x5D\xED\xDF\x3D\xBA\x85\x7F\xF7\xC8\x97\x49\x46\xFB\xF9\x41\x66\xA4\x72\xB0\x0F\x2F\x04\x71\xA7\x84\xDF\xBE\xAD\xAF\xEC\x7A\x7D\x1D\x2F\xBA\xF6\xB0\x46\x52\x72\x85\x42\x3D\x48\x42\xC0\xB8\x51\x23\x4C\x16\x7C\xCF\xE0\x80\x9F\x22\xC9\xBF\x2F\xD3\xE5\xC9\x7C\x8F\xB7\x4C\x36\x53\x83\x39\xF2\xED\x7B\x91\x50\x30\x58\xFF\x33\xAF\x7E\xE6\x55\xFD\x24\x11\x47\x8C\x44\xD6\x55\x8E\x45\xC2\xCE\x06\xBA\x7F\x7B\x75\xD7\x7F\x26\x14\x74\x85\x7F\xF5\x33\xAF\x7E\x06\xCB\x0E\x76\x77\x7C\xDE\xB8\xC2\xB7\x4F\xC7\x70\x6E\x90\xCA\xE1\x0C\x6B\x79\x5B\x2B\xC8\xEB\x39\x4A\x8D\x20\x6B\x6E\xC7\xBE\x74\xDA\x99\x83\xA5\x41\x92\xDF\x75\x4A\x36\x20\x8C\x9E\x97\x07\x71\x38\x08\xDF\xE4\xC9\xFD\xAB\xC3\x97\x11\x8D\x45\xCB\x19\x83\xCF\x15\xBD\x75\x79\xAF\x9D\x81\xFB\xB9\x61\x18\x62\x25\x38\x2E\x84\x78\x11\x20\x37\xE3\x14\xB7\x29\x1B\xDE\x9F\xE1\x34\x80\x16\x60\xBB\xD5\x24\x01\x78\x6E\xE4\x99\x23\x81\x65\xE4\x64\xB8\x08\xE7\x4A\xDB\x34\x4E\x9D\x5A\xD1\xF1\x85\xF2\x26\xA0\xB2\x37\xEB\x11\x63\xCD\xAF\x94\x00\xC6\x27\xF3\xA5\x10\x73\x82\xA0\x5A\x40\xE6\xF0\xF1\xFB\xB1\x7C\x57\xD2\x76\xED\x3C\xE3\xF4\x62\xA6\x56\xB2\x4D\x0B\x66\x9B\xA6\x2A\x78\x81\x77\xBA\x9A\x98\x22\x37\xE4\x2E\xBF\x18\xD6\x33\x72\x4A\xF9\x94\x80\xED\x9C\xFE\x5A\x9F\x99\xD7\x1B\xE6\xA3\x78\x76\x9E\x78\xA9\x25\xAF\x17\xA9\x67\xDA\x3F\x72\xBC\x31\xA9\x2D\xA5\x5D\xDA\xCD\x49\x6D\x76\x73\x52\x9B\xDD\x98\xD4\x46\x40\x2F\xE6\x8F\xDA\x95\xFC\x51\x41\xBC\x8A\x5B\x2A\x98\xE6\x24\xC1\x37\x9B\x12\x7C\x33\x49\x67\x47\xE8\x4D\x5A\xEF\xB9\x33\x18\xEE\x6C\x1A\x6E\xE3\xB2\x4D\xC3\xBD\xB1\x7E\x3B\xD5\x6F\xA7\xFA\x6D\x9C\xCC\x1C\x61\xFE\x88\x68\x9E\xC6\xD9\x78\x9D\xC4\x20\x8F\xF5\x48\xD5\x27\x69\x22\x51\xDF\x6D\xBD\x52\x9F\x44\x6A\x9F\x95\x64\x58\x4E\x49\x86\x25\xE7\x23\xE3\x7C\x64\x66\x2D\xC9\x30\x3F\x35\x1F\x19\x7D\xEA\xEB\xF3\xC1\xF8\xFE\x53\xF3\x91\xC9\x7C\xE4\x48\x44\x01\xE0\xEA\x7A\x3E\xAF\x66\x3E\x6F\x1A\xBB\x88\x47\x70\x2A\x69\x32\x47\xBE\xC7\x4A\x7F\x34\xFB\x93\x4D\xFD\xC9\x7E\x33\xFA\x53\x51\x75\x8A\x90\x51\x4B\x58\xE6\xDC\xEB\xB0\x06\xAE\xEC\xD2\x74\x57\x7B\xA6\x15\xD5\x84\x7C\x2D\x2D\x91\xDD\x4D\xD3\xFC\x39\x0D\xB4\x8B\x50\xE4\x7C\xD0\xBD\x8F\x2F\x95\xFF\xA5\x5F\x5A\x01\x2B\x39\xDF\x67\x8D\x8F\xE9\x98\xE6\x71\xC4\xC7\x67\x47\xA4\xD9\xD5\x7D\x26\x7C\xBD\x99\xD3\x47\xC8\xC6\xF5\xB7\xBE\xB0\x92\xB3\xA7\xF6\xCC\x85\xA0\x9F\x8E\xBA\xEF\x0A\xFA\x68\xC8\xF6\x91\x34\xAD\xF6\x8C\x43\xED\x2E\x1B\xFD\xC9\xF8\x56\x05\xA4\xDD\xAC\xB7\xE4\x3C\x08\x12\x47\xD8\x92\xE6\x83\x99\x31\x27\xFA\x05\xD0\xD1\xD8\x95\xFB\x4C\x13\x06\x7E\xFD\x3E\x33\xDD\x68\xE6\xBC\xD1\xD4\xBC\x77\xD1\xCE\x88\xB2\x36\xF1\xA2\x31\x5F\xAF\x72\xE5\x46\x53\x4B\x71\x7D\xE6\x8D\xE6\xE9\x0A\xDB\x08\xCE\x39\x7B\x12\x2F\x5A\x6B\x29\xA2\xA8\x36\xB7\xAB\xF1\xA6\xCF\x53\x78\xFF\xEA\xE5\x99\xE1\x23\x46\x78\x88\x41\x0D\xC2\x9B\xE2\xB5\xFA\x97\x4C\xEB\xDE\x6E\xCE\x7A\x0B\xC3\x28\x4E\x3D\x62\xC6\xED\xE6\xAC\x11\x20\x5B\x52\xD8\x43\x9B\xFF\x35\x33\xF9\x89\x79\x61\xBA\x91\x8A\x04\x2A\xB8\xA6\xB4\xA7\xEE\x9D\x11\x7F\x50\xAC\xDE\xB7\x0A\x12\x51\x1E\xD6\x3E\x18\xF5\x2F\x9A\xAE\xAF\x56\xC2\x03\xAA\x07\xC2\x16\xBD\x7E\xE9\xAC\x19\x1E\x50\xAD\x84\x07\x54\xF3\x29\xAA\x82\x26\xC7\x14\x85\x3A\x6B\x7E\x1D\x2A\xAC\xA7\x52\x55\xFC\xBC\x92\xCF\xDD\x5A\xFB\xB0\xD9\xA2\x36\x67\x67\x97\xA2\xE1\x45\xC4\x09\x39\x1F\x7A\xAA\x0F\x18\x9C\xCA\xAD\x7A\xC8\x57\xAF\xF9\x09\xD1\x08\x23\x21\x43\x12\x22\x14\x8D\x72\x45\xB0\x23\xCE\x8F\xE0\x09\x63\xCD\x38\x99\xBF\x0F\xB7\xD3\x91\xEB\xEF\xB9\x51\x48\x1B\x69\xA8\xAD\xDD\xF0\x86\x4A\xC0\x70\x86\x23\xCB\xC5\x0C\xED\x62\x7A\x79\x29\x40\x73\x98\x0A\x4D\xCF\x68\x36\x85\xDF\x7C\x77\x95\x67\x29\xE0\xE6\xD4\x14\x7A\xBD\x4A\x1F\x82\x68\x1B\x9F\xDD\xD8\x2A\xF2\x3C\xA8\x2E\x7B\x56\xA4\x8D\xE5\xD8\x17\x2B\x91\x36\xC5\x03\x42\xF2\xB3\x29\xD4\xA6\xDC\x1C\x6A\x13\x94\x43\x19\x36\x69\xA9\xB3\x42\x9D\x53\xA8\x4D\xB1\x31\xD4\x66\x56\x74\x92\x8A\xF4\x7A\xB4\x2C\xA2\x8C\xCD\x43\x0E\x66\x25\x28\x11\xC5\x4C\x22\x4E\x05\xDB\x70\x2E\xC3\xE0\xAE\x57\x91\xDD\xE9\x4A\xDC\xA4\x50\x9C\xF2\x8E\xA1\x38\xFA\x8C\x38\x94\xF0\xBE\x69\x2D\x16\x1B\x43\x71\x24\x42\xA2\x58\x09\x3E\x29\xE4\x9A\x79\xC3\x02\xEA\xE2\x50\xC9\xD2\x40\xBF\xCB\x33\xD6\xDA\x86\x51\xD5\x32\xA9\xEB\x41\x26\xD2\xC3\xF5\xA8\x07\x3A\x85\xC2\xB4\x2E\xEE\xD4\x80\x53\x11\x26\x77\x6E\x40\x9C\xD6\xC5\xCA\x62\x2F\xCE\x58\xEC\xF3\xA9\x3D\x35\x7E\xEA\xAC\xF1\x23\x74\xFB\x50\xA5\xA5\xEE\xD1\x1B\xDE\x29\x0E\xA7\x42\x87\x80\xC1\x93\x16\x7C\x2E\x0B\x3E\x1F\x87\x2A\x2C\xF8\x6A\xBE\xE0\xF3\xB8\xE0\xB1\x6D\xE4\x88\x38\xE0\x82\xAF\x36\x2E\xF8\x1C\x0B\x9E\x87\x18\x2E\x78\xB7\x60\xC0\x65\x75\xC0\xAC\x4E\x2C\x7E\x0B\x16\x39\x4C\x0D\x13\x90\xB5\x98\xD0\x88\x7A\x38\x00\x6B\xC9\xC4\x4F\x68\x9B\x8F\xD5\xDA\x46\x3D\x70\x7F\xD2\x03\xE6\x11\xAF\xF6\xCC\x1F\xFF\xE4\x07\x6E\x2A\x7F\xC2\x1C\xCD\xCC\xEF\x8D\x60\x6F\x01\xD8\xF9\x65\x53\x79\x7C\x4D\x62\xD9\x4F\x84\x87\xBA\x8F\x08\x7C\x1F\x4F\xF2\x7A\xCF\xD8\xEE\x7B\x3F\x20\xE4\x52\x6E\x04\xB7\xE0\xC5\x2C\x41\x12\x22\x55\x2E\xFE\xEE\x0C\x7E\xEA\x88\x5B\xB1\xC7\x7F\xEE\x77\x20\x39\xFA\x60\x78\xCF\x65\xF3\x56\x64\xC7\xC5\x4C\xB6\x4B\xEA\x9B\x22\x84\x39\xF3\xE8\x22\xEA\xE1\x83\x33\x8E\x5D\x10\x69\x35\x4C\xBA\x13\x9B\x55\x5D\x52\x8F\xA4\x07\xFF\x8E\x9A\x3F\xF9\xBA\x9A\x1E\xB5\xE9\x51\x75\xD9\xBC\x87\xD0\x8B\xEF\x76\x28\xFA\x2E\xF2\xF8\x7C\x0B\x3F\x7C\x8A\x1F\xBE\x83\xBC\x11\x2B\xEC\x61\xE7\x25\xB3\x15\x20\xF8\x00\x5D\x1F\xB4\xD7\xC7\x43\xBE\x4B\xF7\x0D\x6C\x37\xED\xF5\xB3\x2E\x7C\xD2\x0A\xAA\x94\xBF\xD7\x33\xB7\xDB\x9B\x83\xDD\x41\x3B\xED\x3F\xBC\x7D\x0D\xA9\xDD\x97\x94\xDA\x1D\xB4\x37\xD7\x90\xF0\xED\xE2\xEF\x37\x3B\x7E\xD0\xC7\x0F\x2A\xFE\x3E\xA4\x02\x8E\x1F\xFC\x27\xF8\xC0\x29\x7F\x2F\xE8\x4B\xF4\x33\xD7\xC2\x97\x9A\x5F\xFE\xA7\xA9\xB4\xD4\xFF\x9F\xC5\x0F\xA4\xFA\xDF\xCD\xA7\x81\x74\xFF\xB1\x5B\x60\x13\xB8\x6C\xF6\x4C\x82\xD1\x4A\x7C\x66\x9A\x65\x7E\x8D\xA8\xE0\xE1\xB4\xFA\x56\x66\xAB\x9E\x0F\xDD\x77\xDA\x6F\x5F\xDD\xBD\x6C\xEE\x8F\x6E\xB9\x0B\x40\x55\x0D\x32\xC9\x63\x33\xC9\x90\xC8\xFF\x03\x3C\x90\x41\xFB\xF7\x3D\xB9\xCC\xD2\xE9\xB1\xB9\x2A\x69\x36\xFE\x7D\xC7\x11\xBB\xB5\x17\x68\xAE\x16\x0E\x3D\x7D\x8D\x03\x6C\x2F\x9B\x8E\xC8\xC7\xDA\xEB\xEB\x0F\xA9\x8A\x03\xAC\xAF\xF5\x36\xEC\x05\x3C\x58\x3A\x9D\x1C\x6B\xD6\x69\x82\x67\xE2\x78\x3C\x03\xC9\xA7\x84\xA5\x2C\xB8\xB4\x00\x4D\x24\x54\x23\x1C\xC6\xCC\xDF\xA8\x81\x52\xF3\x93\x4A\x08\x9D\xBE\x98\xA8\x1D\x90\x8C\x29\xA0\x8B\x7B\xDD\x2F\x22\xC6\xC9\x08\xFA\xEA\x53\x0E\x88\xEF\xFE\x3C\x4F\x76\xDF\xF6\x68\x76\xD2\xFD\x58\xCC\x1C\x46\x9E\xD9\x34\xCC\xBD\xF6\xF7\xA7\xE4\x4A\xFD\xE8\x8F\xDC\xBC\x79\xF3\x65\xCB\x2D\x02\xF9\xC5\x26\x74\x64\x70\xCA\x6F\x63\x1D\x71\x1A\x8C\x57\x17\xD5\x3F\x7E\xE3\xE6\x4D\xF5\x80\x52\xC2\x3B\x7D\x51\xFD\xF3\xE9\x83\x0E\x1F\x7C\x7E\xFA\xE0\x3C\x3E\xF8\xD5\xE9\x03\x37\x7A\x75\x9F\xFA\x52\xF8\xE0\x61\xA5\x12\x11\xF9\x53\xDD\xAF\x13\x51\xE8\x5B\xF8\xFB\x3B\xBB\x17\xC3\xEF\x0F\xA9\x77\x09\xF9\xC0\xBD\x77\x4F\x3E\x70\xFF\x74\x8B\x8B\xC1\xFB\x0E\x3A\x34\xDE\xA7\xF5\xC9\x24\x64\x70\xAE\x78\xF4\x0B\xFA\x81\xE0\x8C\x61\x0A\x56\xCB\x58\x96\xF9\x30\xCB\x00\x82\xFF\xAC\x32\x7F\x72\xB5\x4C\xF3\x7E\x4D\xCC\x55\x23\x08\xAC\xD7\xF9\xDB\xD3\x61\x3D\x83\xBB\x20\xFC\xF6\x4D\xD8\x08\x23\xBC\xAA\x1B\xE1\x68\xEC\xA2\x8E\x0B\xBF\x59\xFE\xF2\x96\xD1\x6B\xEA\x99\x9D\x11\xCA\x05\x62\x15\xAB\x7F\x7D\xCA\x76\x06\xC0\x24\x17\x4F\x73\x49\xA0\x60\x01\x97\x44\x8A\x4E\x0E\x77\x92\x92\xEC\xA4\xFB\xDF\x88\xC7\xFF\xEE\xE6\xD3\x06\x48\xB6\x44\xA0\x14\x48\xD5\xC4\x77\x12\xE4\x6D\xD3\x87\xEF\x8C\x1F\xFE\x75\xF0\xDC\xA4\x34\x12\xB5\x67\xDE\x7D\xF6\x57\x6F\x95\xAF\x92\x7E\xE6\xDD\xB0\x9E\x26\x57\xCF\x26\x57\xAF\x3C\xFB\xAD\xB3\x67\x3F\x2C\xCF\x0A\x95\x78\xF7\x33\x02\x08\x33\x2F\xFF\x9E\x59\xF9\x3F\xB9\x56\xFE\x67\x4F\x95\x3F\xAD\xDF\x57\xBE\xFC\xA6\x8D\x6B\xFA\x0E\x72\xD9\x5C\x51\x93\xE9\x18\x36\x36\x69\xA3\x9A\x35\x98\x3D\xEF\x62\xCF\xFD\xF9\xB9\xBA\x88\x8D\x6B\x1E\x5F\xAF\xE8\x67\xE7\x15\xFD\x6C\xAC\x88\x9A\x3E\xD5\x75\x61\x63\x5D\xFF\xD0\x4C\xDB\xF6\x07\x75\x4A\xD8\xF4\x3C\xA1\xAA\x3D\x73\x3F\xFB\xF1\x2D\x2E\xC7\xDD\x98\x57\xFE\x25\x2D\x40\x06\xA0\x0B\x14\x80\x13\x8F\x23\xCA\xB5\xDD\xA1\xF4\xF7\x5C\x59\x2A\x7F\x82\x2B\xD7\xA1\x76\x25\x2F\x35\x8E\x5D\x79\xB8\xDB\xFD\x31\xAC\x95\x0A\x91\x74\xAC\xA3\x8E\x51\xB7\x92\xA6\x6A\x9D\x09\xA6\x66\xF7\x0F\x44\xD1\x25\x00\x4E\x44\x5E\x21\x5E\x8D\x70\xD5\x47\xE2\x7D\x24\x59\x3D\x2C\x2F\x44\x66\x22\x4E\x2F\xF7\x0A\xE8\x4E\x12\xDD\x7A\x32\x0E\xA0\xE0\x45\x22\xB5\xB8\x66\x32\xE2\xB6\x96\x11\x52\x5E\xF1\xE9\x8C\x36\x47\x29\x7F\xD9\x73\x37\xE3\x91\xBE\x55\x4D\x30\x82\x4A\x57\x87\x5F\x34\xD8\xBA\x40\xAA\xDB\xB8\xBA\x7B\xE9\x03\x82\xB3\x92\x52\x62\x5D\xD9\xFC\xDC\x02\xAE\xD0\x2A\xDD\x7C\xA8\x14\x74\xE9\x79\xBC\x34\x2E\x7B\x9C\xE3\x7B\x7F\x64\x0D\xAF\x1D\x69\x95\xC9\xE9\xBC\x78\xC5\x91\xC3\xED\xBC\x2B\xDD\xE2\x51\xF7\xD2\x2B\xCE\x1C\xB9\xCC\xD5\x47\xA0\xC0\xBF\xC0\xAB\x8A\x8E\x18\xCA\x4B\x2E\xC3\xAD\x4B\x4A\xA5\x8C\x2C\xC3\x91\xB9\xAD\x8F\xE3\x05\xC8\xD6\x25\xA5\x07\xB0\xE5\xD4\x2E\x3B\x5C\x1A\x6F\xFA\x2E\x95\xF0\x61\xBF\xC1\x2F\xBB\xB8\xA2\x30\x87\xBB\xBD\x10\x69\x86\x05\x31\xD8\x60\x77\x9C\xA3\xDC\x47\x2C\x02\x9A\x39\xFA\xD9\x3E\x93\x2B\x32\x18\xB8\xE7\x38\x44\x95\x33\x71\xCB\x35\x8F\xB8\x73\x60\x34\x0F\x85\x87\x73\x57\x97\x25\x0D\x2A\x71\x0B\x5E\x52\xEF\x60\x5A\x4C\xAC\x79\x38\xE7\xDA\xC7\x97\x45\x5A\x7E\x52\x08\x23\x7E\xCE\xEB\x2B\xA0\xBA\x3A\x37\xF6\xF7\xB8\x06\xA0\xC3\xD9\x25\x25\x54\x68\x06\x56\xA2\x33\x33\xBC\x9D\xDD\xF0\xEF\xE1\xB2\x88\x3C\x70\xB8\x1F\xC8\xDC\x3D\x57\x13\x58\x5C\xE3\xCE\x91\xE5\x7D\x69\x1B\x57\xBA\xD6\x9D\x3B\xC2\x00\x87\x2F\x82\xC1\xEA\x8D\xD7\x0E\xC6\x54\xDF\x35\x5E\x39\x75\xD9\x7C\x04\x8A\x5E\xF9\x37\x13\x5B\xD5\x6D\xED\x99\x2B\x7D\x1B\xFE\xB9\xB7\x37\xA1\xB1\xEF\x02\x62\x8A\x7A\x48\x6D\xBB\x65\xB0\xDD\x0A\xB7\x74\xC5\x95\xDD\x21\xF3\xC5\x15\x57\x79\x75\x78\xDC\x2F\xFD\xCD\x9B\x9D\x33\xEF\x05\xC8\x44\xB5\x96\xA3\xD2\x01\xC2\xAB\x66\x48\x92\x57\xAE\xD8\xC5\x82\xC3\xEC\x95\xA1\x05\xF7\x72\x91\x3E\x4D\xE3\xBA\x74\xA5\xBF\x77\xBC\x6C\xDE\x86\x9F\x82\x99\xFD\x20\x7E\x7A\x70\xA4\x0D\xFC\x39\x35\xBA\xD2\xDF\x3F\xFA\xBF\x3A\x33\xB3\xA3\x8A\xDE\x12\x1C\x67\x70\xFF\x1A\xDC\xC6\x6C\xB9\x12\xC4\x57\x3E\xD8\x5B\x61\x29\x6F\x8F\x5E\x75\x9F\xFF\x00\x06\xC9\x95\x97\xD4\xF6\x32\x6B\x4B\x69\xF8\x54\x5A\x39\xBB\x5E\x1A\xBC\x7E\xDB\xFB\xCB\x1A\xAF\x00\x67\x71\x01\x82\xD9\xAA\xAD\x9B\xAF\x7B\x57\x22\x24\x75\x26\x12\x88\xE1\x93\x6E\xE0\xC7\x0D\x3D\xD9\x5F\x56\x78\xC6\x34\xDE\xF4\x36\x02\xD4\xD6\xFE\xA6\x7B\x3C\xCC\xDD\x33\xD7\x70\x13\x56\x5F\x05\xD0\xA0\x7E\x8C\x30\xAF\x2D\x59\xB6\x70\x0D\xA1\x7B\xE3\x6A\xD7\x7C\xF3\x92\x72\x8F\xDF\xA7\x01\x04\xA4\xDF\xFA\x9B\x8D\xBC\xB9\x08\x13\x0C\x30\xA0\x50\x9C\x71\xDE\xC5\xFE\x32\xB5\x02\x2F\x84\xD3\xF5\x70\x99\xB9\xDC\x95\xC2\xA6\x9C\xC5\xE7\x42\x93\x51\x7F\x5F\x85\xD1\xAC\x70\x06\x7A\x90\xF1\xB1\xCF\x38\xB3\x7F\x8D\x4A\xE6\x9D\x18\x40\x32\x0E\x85\xE3\x5F\x77\x0B\xAC\x55\x02\x22\x8C\xCE\x94\x7B\xE6\x6D\xAE\x79\xFC\x8C\xF2\x2F\x7F\x20\x96\xDF\xF8\xF5\x6F\xE0\x6B\xF6\x9F\x20\xBF\xD5\x84\x51\x08\xAD\x91\x33\x8C\xF0\xDC\xC8\x60\x0A\xDC\x24\x9F\x83\x2A\x0F\x06\xF7\x13\x24\x5C\xF2\xD9\x75\x08\xBB\xCB\x8E\x7C\x76\x5D\x50\x33\x42\x57\x5F\x4B\x6C\x52\x55\xB3\x03\x62\x71\x00\x09\x18\x97\x75\x7F\x9F\x96\x88\x6A\x1A\x1C\xF5\x3E\xFB\xD7\x3E\x30\xFF\xF5\x73\xF8\xF5\x61\x9A\x61\x3A\xA8\x14\xFD\xB0\x86\x4B\xF0\x61\x6D\x78\xF0\xD2\xA4\x1D\x08\xCB\x79\x2B\x98\x50\x5A\x6C\x2D\x5A\x58\xCD\x27\xB4\x80\xFB\x47\x5D\xA8\xFC\xF3\xE3\xD6\x39\x9D\xE1\x8F\xCA\xE4\x0F\x90\x0C\x85\x60\x4B\xEF\x2F\xCD\x57\x34\x65\x1C\x53\xC8\xD2\x13\xDA\x69\x6E\x26\x8E\x79\x4A\x80\x25\x11\xEB\x49\x36\x99\x59\xE9\xB0\x6D\xB6\x63\xC4\x0A\xE8\xFE\xD8\x8A\x11\xE3\x54\xF3\x23\x56\x67\x27\xC1\xC4\x71\xEA\x60\x30\x0F\xA9\x96\xB6\x7F\x26\xE0\x21\xD9\x25\xD5\x46\x4C\x61\x80\x89\xD0\xF3\x2F\x54\x32\x06\x37\x4F\x70\x60\x86\x72\x8A\xB7\x9A\x36\x72\x1F\x13\x36\x28\x7A\xDB\x23\xC3\x9F\x25\x5A\xBA\x46\x40\xA4\xC0\xF1\xF3\xDE\x30\x13\xB6\x15\xCB\x73\x05\x5F\x08\xF7\x84\xCB\x0E\x79\xBB\x25\xD8\xC3\xB8\x52\x25\x62\x88\x8D\x0F\x05\x9B\xB7\xE5\x61\xA4\x25\x3B\xBB\x1A\xF8\x3A\x06\xAE\xB1\x9D\xA8\x1A\xD0\xE3\x04\x97\xC1\xBF\x02\x5A\xE4\x0C\xDF\x22\xF8\x2F\x6C\x13\x60\x8F\xB7\x93\x1D\xB1\xDA\x51\x3D\xEF\x28\x9C\x17\xA0\x4E\xA6\x8D\xB0\xFE\xE6\x95\x3E\xEA\x33\xFB\x28\xF8\xCA\x9A\xD0\x4D\x40\xF7\xD4\xF1\xA1\xD3\x7D\xB4\x44\x49\xB7\x1C\xCD\xAF\xA1\x8F\x4A\xAE\x43\xA4\x66\xA7\x0E\xC2\x4F\x96\x27\x09\x83\x5B\x65\xC5\x9B\x9C\x5B\xF9\x59\xEC\x6B\x4E\xDD\xA7\xED\x60\xFC\x97\x6F\x07\x0B\x85\x9E\x9D\x8F\x4D\x9E\x1D\xA2\x41\x11\xF8\x4A\x33\xDE\xC1\x7F\x4C\x4E\x54\xDD\xE8\x3F\x39\xE9\x69\x54\xD4\x9B\x26\x54\xF0\xD1\x50\x62\xC8\x84\xD6\x67\xCF\x58\xA2\x3C\xFA\xDB\xB7\xB3\xE3\xF0\x57\x8C\xD2\x24\x4F\x11\x8A\x65\x09\xA2\xA2\x1B\x8C\x53\x12\x9B\xAC\x06\x03\x94\x3E\xDB\x5A\x80\x75\x3E\x19\x34\x25\xAB\xCD\x05\x78\x3F\x73\x16\x7E\xAF\xDE\x02\x24\x0B\xD7\x42\xBC\x24\x21\xFA\xFE\x8C\x4C\xD2\xB8\x1C\x43\xFB\xA8\x5C\x76\x28\x46\x6F\x18\x22\x33\x29\xE1\x90\x84\x68\x00\xBE\x52\x70\xB7\xC8\x8E\x24\x82\x8B\x48\x01\x84\xC3\xE7\xDD\x2F\x10\x07\x0B\x12\x81\x10\xBB\xAA\x71\xC6\xEF\xE0\x26\x3A\x61\x3E\xC5\x95\xF5\x1F\x61\x5F\x55\x98\xCB\xA0\x72\x43\xA7\xC3\xCF\xE2\x1E\xE3\xAD\xF0\xC3\xDA\xCE\xB1\xF3\x9D\x6A\xFE\xF3\x70\x46\x57\x5E\x47\x8E\xB8\x97\x21\x67\x9B\x0E\xDB\x1F\x5D\x3B\x6C\x7F\x4F\x01\x18\xF8\x28\xAE\x7A\x26\x44\x14\x59\x4A\x1C\x16\x17\xD4\x13\x58\x75\xB2\x2C\x01\x18\x74\x83\xE5\x40\x13\x74\x7C\x89\xAC\x2E\x1B\x06\x19\x9C\xA9\x63\x1F\x97\xFF\xFE\xD2\x06\x8D\xD0\x38\x1D\x0E\x13\xC2\x55\x8A\xDA\xF8\x99\x38\x2C\x14\x80\xD8\xE3\x60\x5B\x2E\xFD\xC8\xD5\xCF\xC1\xCE\xA8\xFA\xEC\x34\xD8\x46\x06\x5B\x23\xFD\x10\x5A\xC7\x88\xEE\x08\x83\xCD\x20\x5C\xEB\xB4\x0C\x36\x94\x45\x1A\x6C\x1B\x37\x08\x7F\x42\xF0\x76\x0C\x02\xB3\x5D\xCD\xC3\xDA\xAE\x8D\x42\xBE\x0F\xA8\x2B\x3B\x0E\x45\x9F\xB9\xBC\xD7\xE0\xFC\x27\x3E\x83\x53\x57\x07\xBB\x1B\xBA\x4C\x8E\x42\x69\x13\x76\xEB\x83\xA5\x11\x4E\xD7\x98\xA4\x0F\x85\xAC\xC5\x3B\x35\x20\x3B\x3B\x8B\xC8\xCC\xE4\x05\x61\xB3\xC2\x6F\x7C\x01\x4B\xA5\x5C\x06\xA7\x87\xD8\x6D\xE4\x00\x88\xE6\x5C\x8A\x58\xC2\x67\x86\x9E\x9E\x08\x62\x4C\xD6\xFD\x73\x4A\x1B\xF7\xBC\xED\x66\x7D\x9E\xF3\x34\xCF\xF9\xD7\x75\x9E\xF3\xAF\xCF\x3C\xE7\xA7\xE7\x39\x5F\x9D\xE7\x7C\x3E\xCF\xD0\x16\x2B\xF3\x8C\x65\x8A\xCC\x65\x0D\xA6\xF0\xB0\xC6\x30\xDD\xC7\x61\xBA\xBB\xFF\x1B\x66\x51\xF3\x9D\x0B\x5D\x9C\x38\xDD\x4F\xAC\xC9\x3A\xC1\x73\xC5\x4E\x86\x69\xB7\x62\x02\xE0\xB2\x8F\xFB\xCF\x52\x03\x6E\x58\x6E\xA5\x01\x16\x95\xB6\x1E\xAF\xA0\x52\x38\x51\x15\xC9\x6A\x08\xEB\x15\x8C\x37\x21\x9C\xC1\xC6\xE2\xE1\x59\xED\xB1\x0D\x15\x88\x58\x92\x28\xC9\xCB\x26\xA8\xB0\x4B\xAA\xC5\x0D\x8C\x71\x02\x9C\x07\xD1\x40\x1E\x04\xC0\x8E\x2A\xDA\x1A\xC4\x50\xBF\x2C\x49\xFF\x43\x29\x1E\x4A\x61\xB7\x62\x4F\x4A\xEE\x95\xE4\xD3\x95\xBD\xD2\x04\x23\xAC\xE5\xDE\x07\x3F\xC9\xC1\x6E\x3F\x03\xA0\x01\x10\x2B\xCE\xB4\x74\x38\xE4\x24\x51\x0D\xC6\x40\xAF\x63\xC6\x04\xAD\x23\x27\xC9\x95\x1A\xE7\x65\xB5\xC9\x2E\xB1\x03\xC7\x96\xED\x10\xE7\x05\xDB\xA1\x57\xF7\x6C\xE2\xFD\xC5\x3D\x9B\xD1\x58\xA2\x76\x45\x7A\x9C\xA0\xE6\xA9\x21\x1C\x3B\x19\xC5\xDA\xDB\x94\xC5\xA1\x88\x0D\x16\x86\x6F\x9F\x07\x12\x9B\xC4\x5D\xE3\xC3\x12\x37\x10\x30\xFC\x62\x37\xAC\x23\x25\xBB\x5C\xE7\x04\x8B\x27\x8B\x06\xD6\xB6\xB0\xD4\xE8\x65\xDE\x4A\x0C\xBF\x8E\x83\x23\xE2\xB9\xDE\x63\x96\xE7\x5C\xEB\x84\x54\x86\xA0\x49\xE9\xEC\x24\x20\xD2\xDD\xA5\x8A\x2B\x01\x34\x9F\x62\x53\x59\x8A\x42\x34\xFF\x0C\xA1\xD8\xA1\xB3\xD2\x74\x71\xDE\x53\xA7\xC3\xDB\x26\x3B\x4D\xAF\xD8\xAB\x70\x13\x68\xE9\xBB\x80\x0B\x3A\x78\xFC\xC2\xB7\x98\xBE\x59\xB7\xC5\x53\x42\x77\x7D\xAC\xD7\x34\x64\xA0\xB0\xA9\xD7\xAC\x4F\x82\x24\x06\xC3\x29\x66\x73\x30\x63\x70\xFD\xCB\x14\xEB\x99\xF1\xA7\x67\x06\xAE\x8E\x3D\xE4\x84\xD3\x24\xC6\xCC\x28\x11\x52\xC5\x97\xD1\x65\x3B\xA4\x01\x0E\x5B\xA5\xF4\x86\xE3\xAC\xE4\x8D\x4A\xDE\xA8\x66\x6F\x54\xB3\x37\xAA\xF8\x46\x05\x63\x57\xA4\xD9\x89\x3C\xC7\x98\x68\x2B\x04\xC4\xE1\x1D\xCD\x76\x03\x01\x4A\xB8\x7E\xF2\x6E\x11\x26\xCE\xAD\x1D\x14\xA7\x5D\x49\x66\xBE\xAC\x35\x01\x76\x94\x3E\x11\xA4\xC3\xCE\xD6\xAE\x8D\xCB\x16\xDC\x74\x16\x9B\x99\x18\xB1\x32\x00\x32\xDA\x56\x28\x93\xC2\x28\x08\xF9\xAD\x0C\xC0\xD7\xF0\x46\xD5\xFC\xA8\x99\x59\x08\x05\x54\xAF\xEA\x7E\x7E\xF2\x4C\xF3\x52\x5A\xC0\xEA\x4C\xDC\x30\xA0\x53\x51\xD8\xD0\xE5\x05\x87\x21\xC3\x19\xA1\xB4\x60\x05\xC1\x79\x02\xE6\x3E\x72\x15\xC4\xE4\x3C\x9D\x20\x13\x69\x43\xD3\x28\x89\x1E\x74\x7C\xCA\x71\xD6\xCE\x04\x3D\x6E\x60\x12\x19\x51\xE4\x7C\xAE\xC0\x03\x6B\x0D\x25\x05\x2F\xB6\x21\x82\xA4\x17\x11\x32\x94\x9D\xBB\xBA\x14\xA2\x1B\x30\x92\xDB\xE9\x0C\xA9\x1E\xD6\x45\xB3\xDA\xF1\xA0\xAC\xB4\xCB\xEE\xD3\x45\xAF\x99\xBA\x69\xA0\xB3\xC3\xB1\xE0\x8B\x31\xC0\x2B\x9C\x66\x81\xA9\x7E\xF3\x97\xD7\x59\x00\x60\xA4\x0A\xC3\xB4\x77\x74\xF6\x5E\x36\x89\x1D\xCC\xF8\xEA\x4A\x18\xBE\xFF\x0E\xE1\x61\x28\x05\x8D\x00\x6F\xDC\x09\x20\x93\x89\xB8\x19\x8E\x18\xA1\xE3\xCE\x24\x97\xB1\x68\x45\xA1\x01\x8D\x80\xF8\xF0\xBB\x58\x38\xAB\x32\x92\xA7\xED\x38\x33\x86\xE9\x1E\x57\x29\x13\xC5\xDB\x37\x21\x5D\xDB\x78\xC8\xDE\x33\x9D\x5C\x9B\x09\x6F\xBD\x9C\xAE\x25\xC0\x2F\xAC\xBB\xD0\x77\x86\xB3\x5E\x52\x80\x1B\xC6\xCC\xDD\xA7\x0D\x55\x99\xDE\x9E\x58\x56\x4A\xD2\x91\x7C\x59\x1F\xFB\xEA\x70\x69\xA4\xD4\x01\xFD\x9A\xF1\x11\x1D\x1E\x49\x86\x42\xF6\xA8\x7A\x40\xE3\x94\xF6\xB0\x0E\x0D\x2D\x1F\xD6\xA1\x8A\xF7\xDF\xC4\xEA\xC8\x48\x29\xA0\x79\x2D\x45\x63\xEE\x3E\x5D\x78\x2D\x89\x10\x2A\xED\xC7\x8A\x77\x80\xD0\x3C\x22\x55\xD9\xFA\x6B\xDA\xE9\x35\xF0\x78\x64\x0E\x37\xD6\x91\xDE\x7C\x7A\xE9\xCE\xE8\xD5\xE5\xD0\x8E\x66\x4B\x79\xB0\x2E\x77\x9F\x85\x25\xB1\xA5\x7C\xB9\xFA\x6B\x3D\xFB\xF5\x47\x8A\x89\xA8\xD3\x6C\x19\xAD\xA8\x41\xD5\xD4\x0E\x2C\x18\xE5\x0B\x20\x86\xD2\x17\x63\xE9\x8B\xB1\x2E\x0B\x2D\xC3\x6F\x61\x5F\x8D\x4E\x18\x9B\x9C\x30\x50\x5D\x9B\x9F\x31\x77\x78\xC6\xA7\x10\x56\x4A\xA9\xBC\x9C\xA6\xFF\x2A\xB0\x78\xCC\xC9\xCC\x83\x92\xE6\x6D\xAE\x90\x83\xFE\x19\x5C\x54\x9B\xB0\xD2\x0D\x54\x96\xC4\xAD\xDE\xBC\x69\xAF\xCC\xAC\xBE\x82\xCD\x2B\xD8\xBC\x82\xCD\x2B\x38\xD8\x05\x07\xBB\x48\xAD\x2C\x62\x2B\x51\xD3\x97\x6F\x67\x38\x18\xBD\x17\xB4\x22\xF6\xEA\x52\xF9\x3F\xF5\xAF\x56\x42\xFA\xCD\x75\x2E\x06\x80\xBC\xD8\x87\xC3\x7B\x1C\x4C\x24\x30\x22\x55\x48\x40\x71\xA5\xB3\x3C\x3B\x9B\xAB\xBB\x43\x4D\x66\x79\xAF\xFA\x26\x7B\x64\x32\x4D\x29\x0A\xEB\x63\x88\x46\x5A\x36\xD2\x46\x09\xF0\xFF\xF6\xF6\xB6\xBC\xB5\x65\xCD\xE1\xBD\x96\x89\x23\xAD\x5B\xE0\x7B\x5E\x7B\x87\x57\x23\x49\x0C\xFC\x80\xFC\x6A\xEC\x17\x80\xE4\x18\x1A\x57\x01\xC1\xB8\x26\xC2\x1B\x6E\x27\xAB\x50\xDD\xD3\x83\x15\x66\x1C\x7B\x14\xC4\xB2\xFB\x9C\x10\xCA\xB4\xBA\xB9\xE3\xB0\xC2\xA2\xBC\x8B\xA1\x8D\xD3\xB4\x36\xA0\x54\x48\x86\xF9\x5E\x26\x8D\xED\xC0\x2D\xA5\x08\xCB\x24\x7C\xCE\x84\x30\x44\xCF\x45\xA7\x84\xB9\xBA\x2B\x1C\x87\x53\x9C\x3E\xAE\x95\xE4\x22\x89\xFA\x1B\x5A\x26\xDF\x5F\x66\xAD\x99\x22\xE8\xAD\x24\x8A\xE5\xD1\x43\x57\x34\xF7\x28\x9F\xF3\xAE\x36\xF6\x3D\x7C\x54\xAC\x7D\xF4\xF6\x70\x1C\x3E\xB5\x94\x35\x97\x72\x03\x8D\x01\x6B\x60\xB6\x86\xC9\xD0\x3E\x5F\xCA\x7E\x5E\x49\x38\x83\x04\x21\x60\x25\xD6\x2F\xB0\x5A\xC2\x43\x3B\x34\x14\x82\x58\x49\x5D\x70\x6E\x75\xCE\x36\xFF\x3A\x2A\x42\xE0\x65\xA7\xBA\xCE\x70\xAA\x0A\x76\xF1\x06\x6F\xEA\x36\xB7\x24\x38\x0F\x71\x19\xA7\xA7\xBA\xA6\xF2\x9B\x6B\xCD\x1E\x5E\xAD\xCB\xF1\x0A\x7D\x5D\xD5\x15\x41\xE7\x87\x07\x93\xC6\xB3\x54\xAC\xB1\xFA\x2C\x56\x81\xD3\x6E\x68\x45\xF7\x2F\x78\x8D\x4A\xB4\x65\x1E\xA7\x70\xD4\x6D\xF6\xE7\x23\x67\x52\x15\x86\x2F\x32\x7C\x91\xE1\x50\x1A\xBF\x13\x5E\x64\x52\x0B\xC5\xA1\x62\x9A\xE7\x0B\xCD\xC4\x88\x17\xC2\xBF\xC9\xC5\xF6\x0D\xF0\x8F\x0D\xE6\xA2\x22\xCE\x7F\x35\x0E\xF6\x22\xE3\x5D\x2F\x2A\x46\x42\xEA\x8B\x0A\x51\x13\x0C\xFE\x0B\x9F\x86\x73\x51\xFE\x80\xB0\xB3\x0E\x4A\xCA\xBB\x2C\x7C\xA4\x5D\x11\xFE\xB1\xAE\x42\xA4\x05\xCA\x35\x1F\xAB\xF4\x72\x16\xA6\x69\x26\xB7\x5E\xD4\x8B\xB0\x29\x97\xC4\xEF\x3E\x60\xC4\x39\x29\xF8\x75\xF7\xAB\x72\x19\x1B\xF6\x59\xC6\x1F\x59\xFF\x87\x47\x5F\x20\x88\x9B\x29\x30\x06\xF1\x4A\xE6\x91\xD3\x50\x6B\xC2\x29\xF8\x87\xC7\x08\x47\x62\xC2\x3B\x82\x71\x92\xEF\x0B\x17\x66\xE8\x24\xA2\xCE\x84\xD9\xA6\xCA\x1E\x71\x25\x99\x9A\xE3\x9A\x5B\x4A\xF6\x29\xCE\xE8\x99\xE0\x9D\x17\x0C\xB0\x0F\x1B\x22\x4A\x14\xE2\x88\xC2\x6D\x04\xB3\x03\x63\xDB\xB3\x78\x0A\xB1\x04\xE1\xA9\x86\x05\x38\x33\xE4\x1C\x42\x3E\x51\x1B\xB3\xF3\xE9\x5F\x6E\xE6\x98\x7A\x09\xEC\xBF\x95\xE8\xFD\x85\x6B\x24\x31\x75\xCB\x6D\xB9\xFC\x4E\x89\xA9\xF9\x4A\x62\xEA\xD6\x3C\x31\x15\x71\x78\x11\x53\xCF\x32\xCE\x7E\x31\xC7\xD4\xB3\x53\xAE\x77\xBE\x92\xEB\x6D\x88\x72\x22\x66\xB4\xA1\x2B\x6E\x25\xD7\x7B\xE5\xB4\xC8\x4E\xE7\xB0\x54\x99\xE9\x67\xA0\xB1\x4E\x75\x39\x4F\xB9\xB8\x76\xB5\xCB\x36\x58\x96\x37\x86\x85\x5B\x24\x6A\xAB\xCC\xB5\x6B\x5D\xB5\x67\x77\x15\x53\x17\x73\x70\x6B\xDA\xE5\xEC\x6A\x3D\xE5\xE0\xE6\x91\xD0\x2A\x68\xC9\xA9\xAB\x35\xBB\xBA\x60\x57\xED\x69\x40\x17\x61\x83\x07\x5D\x62\x25\xE2\x75\x35\xB9\x31\xD8\x39\x7B\x14\xBA\x96\xF5\xB9\xCB\xB6\x4C\xC1\xAB\x9A\x8A\x5C\xF8\x2A\x19\x55\xB0\x48\x85\xAA\x97\x2E\x69\xD5\xAB\x15\xB3\x4B\x0C\xFD\x78\x32\x97\xE5\xB1\xCC\xB8\x5E\x0C\xCF\x52\x79\xE3\x3F\xF2\xCB\x6B\xE9\xA7\x53\x23\xE0\x2D\xF5\xE6\xBA\xB3\xA3\x7F\x1E\xC2\x0C\xB2\x11\xFF\xC2\x38\x30\x07\xC3\xC6\xD7\x66\x0E\x2C\xFE\x06\xAC\xFB\xDD\xBF\xA6\x86\xC2\x6D\x33\xA9\x7F\x41\x5C\x66\xE4\xF0\x2C\x5F\x30\x50\x21\x9C\x6E\xF7\xD3\x97\xCE\xA6\xA8\xB3\x29\x6A\x33\xDB\x6E\x9C\x39\xC5\xCD\x98\x69\x73\x92\x6E\xB9\x06\xF8\x4E\xE0\xC2\xD1\x7D\x66\x4E\xC3\x28\x86\xED\x6E\xEB\xCD\x8B\x85\x36\x59\x66\xAD\xCD\x8B\x05\xFF\xA8\xAA\xAC\xC5\xE6\x55\xF7\x69\xF3\x8C\xBF\xF9\x9A\x3A\xDE\xDF\x16\xA5\xB2\x67\x3A\xF8\xE0\x0E\x97\xD5\xBC\x8C\x0E\x25\xE4\x20\x21\xDF\x17\x78\x60\xA5\x0C\x02\x07\x68\xA5\xEB\x99\xB5\xCE\xDB\x7B\x54\x70\x9F\x2E\x30\x61\x07\xDB\x2B\x4D\x60\x0B\xD6\x3E\x59\x66\xC2\x73\xB6\x52\xE3\xE1\xD2\x4E\xAD\x3C\x38\xDD\xEC\x53\x0F\x1C\x48\xC3\xE3\x03\xEC\x05\x43\x33\x71\x87\xB7\x67\x2C\x1D\x61\x24\x2C\xF1\xD9\x75\xF6\x31\xFC\x74\xC8\x84\x21\x3A\x0D\x9C\x49\x3D\xF1\x24\xE9\x6C\x5E\x31\xC8\x76\x8C\x27\x7C\x72\xC7\x06\x41\x72\xFA\xEA\x32\x79\x5E\x32\x1F\x5A\x63\xAE\x0F\x12\xA0\xE3\xF4\x28\x0C\x13\x72\xC9\x65\xE2\x56\x37\x58\x59\xE7\x85\xD3\xE3\x04\xD4\xAD\x48\x8B\xE1\x4A\x98\x63\xC1\xDC\x25\x93\x5D\x99\x96\xBB\x99\x2B\x37\xB3\x62\x43\x1B\x4E\xC2\x8C\x5A\xB4\x00\xB1\xC5\xE9\x36\x21\x6F\x08\x0B\xDF\x4C\x2B\x3E\x0B\xFA\x65\x46\x61\x67\x98\x6B\x28\x04\xE9\xD9\xFC\xAE\x6D\x22\x16\x3D\x95\xA4\xF8\xFD\x76\xE5\xF6\x6A\x6D\x9B\xC3\xA2\x26\x37\xBB\x8E\xF9\x4D\xDA\x6F\x3D\xBE\xEB\xCD\x75\xFF\xC3\x61\xB9\x92\x51\xA6\xFB\x10\x4C\x7F\xC4\x8B\x9A\xCD\xF1\xA2\x13\xB7\x4F\x6A\xA3\x15\x94\x04\xD0\xEE\x09\xC9\x05\xE3\x45\xC1\x6A\x34\xF6\xB9\x04\x8B\xE6\xC1\x76\x9B\x01\x0C\x48\xB0\x28\x4D\xD2\x6E\x4C\x61\x80\xD8\x53\xBB\xD8\xD4\xDC\xE5\xA7\x9B\x5A\x48\x53\xF3\x79\x53\x73\x36\x35\x5F\x6B\x6A\xC6\x4D\x02\x0A\x54\x9A\x2A\x57\x84\x86\x6A\x1B\x6C\x23\x68\xAE\xF1\xFA\x1A\xC8\x63\x6C\xB0\x61\xE1\x9E\x2B\x66\xB3\x93\xB9\x5C\xC8\xE1\x0D\x38\x89\xF6\x60\x2D\xEA\x11\xB1\xAE\x43\xEE\xF5\xF1\x90\xED\x52\xE7\xE5\x5E\x3F\xEB\xB2\xDD\xEE\x4F\x44\xF8\x97\x59\x38\xAC\x99\x5D\x87\xA3\xE3\x9F\x83\xC6\xFC\x10\x43\xBD\x51\xB5\x96\xF5\x53\x49\x05\x4C\x8E\x90\x51\x12\xF3\x1B\x31\xDF\x1D\x62\xEC\x4D\xBA\x43\x82\x70\x48\xC0\xA7\x33\xF1\xA7\x9B\xC1\x80\x9D\x82\xD4\x27\xE2\xBE\x7B\xA5\x3F\xDA\x5F\x48\x14\x7D\x3B\xF1\xA7\xF0\x4F\xEB\xB4\xFF\x95\x5F\x89\x07\xEC\x47\x5F\xBB\xF5\x7B\x1E\xC8\xF6\xC8\x7F\xA3\x70\x51\xF5\xA1\x5B\x91\x4E\xFC\xAD\x3D\xAD\x9A\xEE\x7B\x62\x52\xC6\xFD\xD3\xC5\xD6\x77\x46\x5A\x68\x18\x5C\x5E\x09\xAF\x87\xEA\xBE\x9B\x5B\xB6\xC4\x35\x21\x48\x89\xD7\xF4\xE2\x4F\x32\x0C\x70\x92\xA1\xC3\x45\x33\x92\x76\xD4\xE8\x72\xB6\x1C\x87\x10\x03\x51\x1B\xD4\x44\x82\x19\x56\x5A\xAC\x9F\x2A\x20\x4C\x52\x31\xAF\x48\x31\x4A\x5A\x8F\xBD\xD0\x1F\x87\x15\x89\x8C\x83\x60\xFD\xFD\xD0\x8B\x41\xDA\xFA\xE9\x15\xF1\x80\x83\x9A\x12\xD6\xCB\xBC\x42\x9D\x2A\x84\xEC\x28\xA7\x8F\x52\x17\xC3\x21\xEF\x83\x0C\x3E\xFC\x90\xD5\x35\x07\x63\x2D\x48\x22\x4E\x86\x12\xEB\x44\x33\x8A\x9E\xB7\x08\xB2\x38\x50\x26\x7A\xA4\x88\xC8\x99\x8C\x4A\x62\x73\x22\xCD\x02\x31\x28\xD9\x23\xAE\x10\x7B\x12\xBE\xBA\x22\x99\x92\x65\x90\xA2\x50\xC3\x3D\x41\x07\xCB\xD9\xB5\x72\xF9\xD5\xDE\xC2\x98\xE8\x2B\x67\x41\x59\xBF\xCD\xB7\xE3\x5E\xA1\x88\x61\x8F\xA9\x2E\xA2\x53\x10\x9D\x2A\xD4\xF6\x47\x18\x77\xA6\x1F\x17\xB7\x5F\xC5\x8D\x1A\xF5\xD7\xAE\xC2\xDC\x16\x7D\x93\x2C\xDC\x60\x68\x26\x0B\x16\x57\x5F\xD6\xB5\x80\x06\xF7\xC1\xEA\x2B\x47\x6F\x47\x29\xEA\xAA\x23\x6C\x5E\xF3\xB3\x72\x06\xF8\xF9\x31\x79\x81\xBE\x01\x98\x7D\x7D\x1D\xA3\xDD\xC2\xC8\xF0\x16\x38\x1D\x63\x4A\x1E\x5E\x4A\x1E\x5E\x4A\x9E\xD6\x32\x27\x70\x54\xE1\x05\xE1\x34\x53\xA6\xC3\x4E\x19\x0F\x34\x8D\x5B\x84\xB7\xBB\x63\x49\xBD\x43\x7F\x33\x42\x17\x74\x23\x02\x9F\xC4\x08\xDC\x11\xE0\x9E\xC5\x9E\xE9\xC2\x68\x97\x5E\x1D\x3A\x78\xBE\xBB\xDF\x10\x16\xBA\xA0\x8C\x16\x97\x94\xF1\xFA\xF8\x21\x9C\x90\x9B\x7F\xAE\xE3\x02\xB9\x6B\x99\xC8\x92\x30\x44\xA3\xC5\xCE\x04\xC2\x32\xF2\xB3\x1A\x85\xF3\x1C\x93\xC6\x18\x68\x66\x50\x0F\x34\x9B\x1A\x67\xE7\xD3\xC6\x31\x76\xC7\xC2\x13\xC5\x43\x4C\x46\xE0\xD7\x6E\xEC\xAD\x17\x4E\x42\x31\x03\xC8\xFB\xD3\xD1\xB0\x53\x87\x92\x10\x10\xBB\x09\x02\xF7\x59\x37\xB3\xE6\x77\x6B\x7D\x42\x2F\x73\x35\xC5\x86\x77\xA3\xC4\xA3\x73\x53\x69\x9D\x6A\xFE\xEA\x42\x97\x27\xB2\x2D\x1B\x17\xD6\x25\xF6\xA2\x2F\x2A\xD2\x14\xFD\xB8\xE2\x27\xC6\xFF\x43\x85\xBF\xAE\xEC\x0E\xD6\x69\xFF\xE3\x0A\x7F\x3D\xB6\x3B\xE4\x02\x5F\xBC\x67\x94\x3F\x09\xCD\x3A\xF2\x7A\xBC\xEE\x4F\x9E\x71\xB9\xFF\xD4\xC9\x78\x9D\x6C\xFA\xC6\x7F\x51\xE1\x2F\x79\xFE\x17\x14\xFE\x5A\x79\xDE\x9E\x7A\xFE\x07\xF8\xBC\x65\x26\x63\x47\xEE\xD1\x41\x96\x88\x75\xA5\x70\x8F\x85\x9D\x4B\x6C\xA5\x2B\x43\xB5\x0B\x96\xE0\x04\xF4\x9F\xD3\x41\x6D\x5D\x71\x10\xE1\xC5\x10\xEC\x66\xE1\xCC\x7D\x62\xC9\xC0\x98\x2B\xCC\xEF\x4C\x58\x02\x9B\xAB\xCF\x36\x57\xBF\x5A\xF9\x13\x91\x8A\xAA\x1B\x70\x8F\xB3\x33\xD4\xFE\xE4\x19\x10\x11\x4F\x78\x1C\xB8\x78\xC8\xF9\x4C\x05\x44\x4D\x67\x0F\x96\x1A\xD9\x4F\x89\x83\x78\x00\x78\x6B\x26\xB7\xB2\x28\x35\xDD\x64\xE7\xC4\xE1\xCC\x71\x06\xA9\x9D\x79\x0C\x67\xD5\x60\xF9\x91\x2F\x49\xB9\xEA\xB2\x69\xF1\x95\xCB\x77\x41\xE3\x35\x85\x83\x9C\x8C\x5C\x86\x56\xAE\xB7\xED\x14\x5E\x6E\xFC\x97\x6F\xFF\x4D\xCC\xFE\x34\xD8\x92\x59\x3F\x0D\x4A\x8E\xC3\xDA\x4E\x84\xDA\x7A\x4C\x06\xC5\x99\x77\x6C\x1A\x17\x84\x42\x77\x57\xD2\xB8\x70\x54\xCC\xBF\xEF\x51\xD9\x38\x1C\x77\x37\x0A\xE6\x91\xE9\xC6\x81\x29\xBA\x61\x4D\xD6\xAE\xDC\x47\x33\xBE\x2A\xE1\xE3\x28\x18\xDE\x2E\x74\x57\x96\x66\x3E\x0A\xBF\x35\x63\x80\x9B\x0C\x74\x7F\xE7\x74\xF7\x63\xAF\xFB\xCC\x95\x4B\x82\x28\x1B\xA7\x8F\xD8\x4A\x86\x06\x48\x64\xCD\xDA\xA7\x61\x43\x3E\x5E\xCE\x7C\x34\xCE\xB8\x72\xCF\x74\x4F\x88\x6F\x65\xC8\xC3\x0E\xD8\x17\x2D\xC2\x51\x65\xCC\x9E\x58\x66\xDC\xB5\x80\x2F\x28\xC1\x22\xB6\x2F\x1B\x57\xF6\xB5\xCB\xFB\x12\xB7\x86\x65\x5F\x84\x0F\xC1\xD1\xDE\x17\xAE\x26\xCD\x4E\xD1\x97\x38\x88\xA2\xF6\x99\xBF\x10\x20\x5A\x55\xC4\x35\xC9\x9D\xF9\xF8\xA3\xEE\xDB\x9C\xFE\xF8\xAD\x07\x4C\xE7\x4A\x38\x32\x64\x5C\x70\x54\x49\x77\x7D\x2D\x5D\x40\xB9\x1C\xB4\x24\xD2\x88\x4E\x29\x0E\x59\xE8\xE7\x4E\x50\xAF\xDF\x21\xD7\x55\x64\x2E\xA4\x87\x2D\x3B\x10\xA3\xC6\x0C\x69\x7E\xBB\x2F\xDD\x4A\xAC\x3C\x36\x46\xAF\xAC\xDC\xF6\xCB\xCC\x9E\x11\xBD\x62\xD6\xA2\x57\x4C\x8A\x5E\x09\xB6\x35\x23\xEA\xFE\x9B\x64\xF1\xC1\xCF\x87\x1D\x67\x7F\x22\x33\x77\x6A\x3A\x4A\x5E\x61\xB0\x8D\x9A\x82\x6D\x14\x83\x6D\xCC\x41\x84\x65\xD5\xEF\x00\x0F\x0E\x4F\x58\xE6\xB0\xF9\xC1\x1C\xC4\x07\x19\xDC\x8A\xD8\x1B\x44\x3F\xE0\xE7\xDF\x48\x3F\xFD\x82\x9A\x29\xF2\xDB\xB7\x6F\x7F\x63\xD0\xF8\xFA\xE8\xD9\x63\xD9\x4E\xB1\x17\x05\xD5\xFF\x0F\xD2\x23\xAF\x41\xCD\xA8\x94\xCB\x28\xBB\x08\x1E\xFE\x87\xF1\x61\x5C\x26\x76\x73\xBE\x37\x2D\x36\x47\x58\x92\xB9\xD3\x61\x72\x81\x70\x6D\xFA\xD2\x65\xF0\xED\x55\xAE\x74\x95\xA8\xA7\xDA\xE9\xC7\x76\x61\x73\xD5\x4E\x27\xF5\x54\x85\x6E\x97\xB8\x3A\x83\x8C\xFA\xDB\xE8\x94\x76\xE5\x45\xD3\x0D\x8B\x47\xDD\x4B\x3D\xCD\x4E\x08\x65\x45\x09\xAE\x86\x3A\x82\x7F\x31\x8B\xB5\xCD\x1E\x71\x90\x21\xA4\x86\xB4\xAE\x72\x55\x2C\x35\x4D\x1A\x7F\xC7\x1B\x1B\xD7\xBC\x12\xEC\x95\x57\x80\x2D\x8C\x85\x59\x72\x61\xD6\xA7\x17\x66\xE6\x4A\x59\x98\x65\x5C\x98\xAE\x0A\xFB\x62\x2B\x1A\x63\xC8\xA0\x33\x5C\x8B\x0D\x2D\x85\x75\xA1\xB5\xA6\x0F\xA7\xAE\x92\x84\xBE\xA5\x2C\xC7\x4A\xE6\x57\x09\xCA\xF0\x52\xBB\xAA\x2F\x83\x71\x27\x83\xE5\xDA\x27\x96\x06\xC0\xA5\x61\xC0\x90\x2D\x12\x96\x63\x09\xF0\xE8\xBE\x74\x35\x89\x33\x38\x2C\xF3\x15\x57\x13\xC4\xAE\x8E\xF8\x60\x8B\x47\xC3\x19\x7E\xFB\x03\xAE\x90\x35\x57\xAF\xAE\xB9\x6A\x3F\xA9\xA3\x44\xCC\x81\x35\x57\x4B\x20\xD2\x7C\xBD\x91\x74\x0C\x8E\x06\xEB\xDA\x23\x70\xE6\x56\x8D\x84\xE8\x62\x05\xFC\x6A\x6B\x96\x93\xCB\xBB\x4B\x07\xAF\x89\x75\xB5\x12\x0A\xF4\x53\xEE\xA5\x18\xEC\x4C\x3C\x27\xE6\xC6\xD8\x09\x07\x4D\x93\xF6\x3C\x9B\x88\x51\x6D\x8C\xA1\x01\xA0\x0D\x27\x81\xE4\x72\x11\x40\xE2\x10\x89\x23\xE6\xF1\xA0\xF3\x11\x95\x04\x85\xC6\x64\x01\x2B\xA9\xD9\xD9\xEA\x23\x07\x4B\xD3\x48\x4A\x54\xBE\x67\x3A\x46\x61\xEE\x80\xFF\xD9\x24\xB8\x43\xCB\x8F\x2B\x38\xEF\x5C\x1E\x5E\x63\x11\x35\x85\xC0\xAA\x5C\x02\xAB\x70\x2A\xC3\xF6\x80\x95\x80\x4F\xF1\x5B\x8C\x02\xC9\x29\x9D\x8D\x80\xEB\x53\x69\x85\xB7\xE2\x96\x4F\xF4\x76\x46\xB7\x5F\x05\xCD\x59\x06\xCD\xC9\x0B\xA3\x6A\xC4\x18\x48\xBC\xA0\x78\x7A\x6A\x9F\xDD\x18\x90\xE3\x9D\x18\x8A\x8B\x58\x8F\xB3\x47\x43\xE6\xCD\x8D\x21\x9C\x20\x4C\x82\xA0\xCB\xE6\x10\x74\x4C\xA7\xC8\xE8\x9F\x90\x0A\x2A\xDC\xD7\xCF\xBD\x3D\x0B\x02\xAE\xAD\xB6\x29\x43\x31\xDE\x07\xCA\xFD\x75\x3D\xF9\x7B\xEA\xE4\x51\xD0\xBC\xC9\xD4\x4E\x66\x90\xC1\xDF\x73\x6F\x0F\x46\x7C\x3E\xDC\xD9\xEA\x70\x87\x45\x8F\x61\xCD\x39\x5C\x41\x64\x2A\xA4\xC2\x33\x1A\xD1\xAE\x2C\xFA\x8C\x61\xEA\x99\xE0\xFD\x6A\x64\xBC\x87\xC1\xA6\x5E\x47\xA0\x1F\xA3\x66\x4B\x7F\xF3\xE6\xEB\xEA\x09\x84\xF2\x4B\x5C\x9A\x0C\x7E\xF9\x71\xE4\x68\xBE\x71\xFB\x53\xD5\xA9\xF1\xCF\xE6\xE3\x5F\x85\xF1\x37\xD3\x25\x82\x91\x2C\x64\x38\x82\xE2\xF8\xD7\xAE\x4E\x97\x08\x6B\xE3\x9F\x4F\xE3\x9F\x4F\x9C\xC0\xF9\xDA\xF8\x1B\x21\x4F\x5E\x6F\x56\x46\xCC\xAB\x3E\x17\x4A\x59\x62\xE0\xC5\x29\xA8\x66\x53\x90\x6F\x9C\x02\x13\xA7\x20\xEC\xEE\x26\xD5\xBD\x41\xE2\xCA\xD0\x99\x32\x74\x16\x6E\xF8\x89\xBE\x55\x3C\x8F\x70\x2A\xC2\xED\x18\xAC\xAD\x2C\x75\x36\xFF\x4A\x9D\xCD\x4E\x75\x96\xE4\x79\xD5\x4A\x6B\xA6\x8E\x56\xB1\xA3\xE5\xD4\xD1\xF2\x2B\x76\x74\x26\x6B\xCA\xB7\xE3\xD0\xC2\xC2\x96\xA4\x0A\x20\xAA\xB5\xE2\x29\x56\x41\xEB\x15\xE2\x87\xE0\x34\x37\xC1\x84\xC4\x96\xB7\xF4\x20\x2F\x08\x07\x72\x34\x67\x11\x93\xE2\xC8\x80\x2F\x87\xE3\x3D\x63\xC3\xA2\xE0\x01\x79\x99\x02\xBD\x01\x94\x09\x8B\x2C\x8B\x16\x59\xD6\x43\x9C\x32\xD9\x02\xAA\xB0\x05\xCC\xE2\xE5\xA3\x45\x16\x8A\x00\x21\x2E\x93\x2D\xC0\xAE\x6C\x01\x7D\xEE\x16\x17\x8D\xEA\xB7\x82\xD0\xAD\x58\x5F\x5B\x0F\x20\xDC\x5B\xAC\xAE\x56\x02\x52\xB1\x93\xB7\x29\x09\xE3\x94\xD5\xB5\xBA\x03\x68\xC2\xBD\x23\x5A\xCE\x84\x8E\xEB\x71\x08\x07\x9B\xE6\xC8\x67\x37\xC2\xFC\x15\xCC\x14\x04\xB7\xF3\x69\x60\x39\x2C\x31\x6C\x23\x47\xF4\xFA\xF2\xB4\xFA\x0C\x4C\x8D\x63\xFA\xF8\x78\x00\x4D\x1F\xD9\x88\x10\xAE\xC9\x29\xD5\x4C\xAC\xA4\x09\xA2\xE6\x07\x8D\x2E\xC2\xD9\x59\xA7\x5D\x24\xAC\x11\xDF\x8D\x72\xD7\x0F\x4F\x52\x4B\x87\x92\x65\x4C\x0C\x01\x2A\x3A\x5C\x05\x56\x91\x95\x74\xDA\x05\x82\x68\x6A\xDE\xFD\x87\xB9\x75\xF9\x45\xD3\x01\x85\xE3\xD4\x1E\x60\xCE\x50\x4A\x96\x4A\xC9\x4E\x46\xE6\xA4\x94\x56\xA2\x6F\x61\x45\x81\x90\x55\x0E\x69\x20\x31\x96\xC8\x7B\xCC\xC5\x0E\x63\x7F\x34\x49\x96\xB5\x33\xEF\x58\x2A\xFF\x85\x5F\x5E\x47\xCD\xAA\x18\x0E\x80\x9B\x88\x30\xAE\x32\x84\xFB\xE8\x6C\xF3\x26\xE5\x54\x74\x54\xC6\xEC\xE0\xDF\x95\x32\xB0\x9B\xC3\x68\x33\xDE\x8B\x99\xBE\xB7\x71\xA6\xB1\xCA\x99\xE6\x4F\x1B\x9D\x45\x4F\xEA\x4F\xAB\xF5\x1D\x1D\x08\x18\x74\xD7\xBC\x2D\xF9\x53\xBF\x69\xEE\x4F\x7D\x6B\xB0\x2F\x6F\xFD\x9E\xCB\xE6\x8A\xD3\xFE\xC3\xBF\x0A\x3F\xD6\x1E\xAC\xCE\xBD\xE8\x51\xFD\x63\x2F\x22\x20\x60\x3F\x26\x84\xBF\x1D\x46\x09\x63\x9B\x78\x3B\x6D\xE0\xAF\x8D\x6F\x4A\xFE\x5A\xDF\xCE\xDF\x64\xE3\x9B\x9C\xD3\xFE\x13\x7C\x53\xCC\x1D\x93\x97\x84\x45\xCD\x7A\x25\x62\x97\x2F\xBC\x80\x94\x1F\x43\xD2\x4A\xA6\xBA\x3B\xDD\xBD\xF4\x62\xE4\xAE\xDF\x1B\xE5\x17\xB2\xA7\x8B\x8F\xD7\x34\xDF\xBF\xD0\xC5\xEC\xAE\xDD\xC6\x4B\x08\xE4\xDA\x63\x59\x47\x9E\x03\xDC\xE7\x14\x92\xA1\x8C\x39\x1D\x87\xD2\xBF\xAA\x7F\xFF\x50\x25\x0C\x75\x57\xF9\x57\xF5\xD3\x47\xFE\xFC\xD3\x63\x38\x96\x0B\x3E\x6B\x0A\x5F\x3B\x3F\x7B\xB6\x2F\x69\x8B\x15\x0D\x2F\x21\xCE\x27\x5E\x5D\x57\x1C\x0D\x85\x37\x37\x42\x55\xFE\x84\xA9\xA4\xAE\x3C\xF2\x9F\x86\xD7\xB1\xE2\x25\x64\x32\x63\x71\x3F\xF5\x3C\x70\x4A\x01\xDC\x96\x37\x2E\x9E\x56\xF5\xBE\x80\xA2\x21\x4C\xA2\x46\xA7\x5A\xF1\x33\x28\xFA\xE4\x87\x72\xC3\xBB\xE2\x0A\x44\xF0\x55\xDD\xFD\x99\x17\xA5\xF9\xB0\x1E\xCF\x23\x02\x80\xA4\xBB\xE8\x40\x29\xE0\xE2\xCC\x04\x2D\x30\x22\x11\xF9\xD0\x85\xF1\x8A\x23\x82\xA6\x51\x1F\xE7\xBC\x32\xCA\xFD\x09\x73\x0B\x75\xD2\x1B\xE7\xB1\xF5\x5E\x36\xE7\x19\x23\x58\x39\x19\x2E\x8C\x1C\xEA\x4E\x84\xAD\xB9\x2B\x53\xDD\x43\x2E\xF1\x94\x09\x57\x1F\x6B\xDC\x9F\xA4\x45\xE1\x06\x83\xC3\x80\x23\x3C\x22\x4F\x4B\xFA\x21\x75\xC1\x85\x66\xE7\x7B\xB4\x3C\xC3\xBB\x24\x8D\x1C\xF9\xA5\x8C\xB5\xDA\x06\xF8\x94\x94\xB2\x7E\x7B\x94\xA2\xD5\xB9\x6F\x47\x51\x92\xFC\xDB\x4B\x6A\x7B\xA9\xE3\x5E\x13\x7B\xEA\x77\x22\x7C\x4D\x79\x9F\x2E\x86\x6A\xCB\x18\x25\xF7\x37\x5D\xA4\x23\x35\xF0\x72\xA4\x20\xCC\x66\x56\x3C\xFA\x99\x21\xD9\x38\x28\x69\x1A\x05\x11\x3C\xB7\xD7\xA4\x43\x19\x4C\xD8\x6E\x54\xBA\xE9\xE5\x66\xBA\x94\x30\x55\x89\x1B\x9E\x85\xAD\x7A\x73\x7D\x28\x79\x92\x4D\xDF\x0F\xC2\xFD\x1E\xCC\x37\x7A\xC8\xA5\xA3\xDD\x84\xF5\x24\xD4\x43\x46\xA8\xEF\x51\x94\xF1\x97\x70\x47\x53\x06\x41\x2F\x05\xD4\xE6\x78\x36\xC3\xEA\x29\x69\x52\x4C\xC2\x96\xEF\x12\x0F\x90\xAB\x29\x3F\x43\xDE\xF2\x28\x68\x61\x0A\x56\x57\x5C\x3E\x5B\x71\x43\x12\xAE\x62\x45\xB8\xCC\x69\xE1\x22\x3E\x97\x4C\x68\x64\x95\xD8\x71\x46\xBA\xDB\x9E\xFB\x0E\x11\x01\xCC\x02\xDD\xFD\xD1\x47\x27\x4C\xF2\x31\x38\x77\x5A\xD7\xB3\x35\x5C\x70\x83\x1C\xCA\x95\xDE\x96\xBB\x51\x9A\xE3\x62\x2E\xEE\x62\x31\xEF\x8B\x4D\xB6\xBE\x1A\xA8\x7B\x8A\x49\xF7\x14\xD3\x48\x94\x93\xF6\xE1\xA9\xBB\x48\x01\xD6\x34\xFE\x10\x70\x56\x0A\x62\x74\x78\x14\x50\xF2\x4C\x3D\x9B\x62\x0E\x4C\xF3\x63\x99\xB6\x27\x4E\xF9\x4F\x8B\x2E\x96\x73\xDE\xF9\x98\x5C\x14\x35\xCA\x8C\x50\x9E\xD9\x12\x0C\xC9\x92\x89\xB5\x68\x6A\x94\x32\x84\xE7\xCD\xD5\xA4\x80\xD2\xAE\xAA\x49\x79\x96\xBE\x7A\x39\xB2\xEA\xD9\x10\x1B\x97\xD1\x1A\x9F\xC6\x37\xDB\x75\x36\x8E\xAC\xD9\x3C\xB2\x5C\xA9\x18\xDB\xB0\x12\xE8\x93\x0B\x62\x73\x9E\xE9\x2F\xA2\x15\x35\xA2\x4F\xAC\x0F\x47\x58\xB9\xD9\x9B\xD7\x04\x3A\x12\x9D\x0E\xAB\xC6\xC5\xF2\xE6\xF1\x50\xA3\xFF\x8C\x9A\xE0\x3F\xFD\xC9\xC8\xE8\x5A\xFF\xAA\x4E\x37\xFC\x3B\x0D\xAD\x80\x83\xF5\xEA\x0D\x2E\xEE\x06\x4D\xD4\xE0\x50\xB9\x96\x40\x82\x03\x21\x40\xD4\xCE\x1C\xF9\xE7\x47\x6F\x9E\x0D\x16\x96\xB9\x1E\x84\xA5\x49\xB0\xCA\x4C\x3E\x4E\x39\xC5\x7F\xC5\x68\x73\xE2\x35\x33\x63\xCC\x4A\x46\xF4\xC9\xB8\xF5\xE6\x3C\xCF\x73\xA5\xB2\x2C\xB3\x26\x97\x3F\x3A\x17\xEC\xBE\x88\xB9\x15\xB4\x90\xE2\x0A\x30\x49\x85\xA8\xFD\x65\x1E\xAC\x20\x83\x0B\x39\xC0\xF6\x49\xF4\x76\xA8\x38\xD8\x01\xB9\x58\xB6\xC9\x9A\x8B\x97\xC8\x83\xF1\xEA\x10\x67\xD3\xD7\xD5\xE1\xB1\xC4\x5E\x20\x75\x60\xFD\xF3\x78\x30\x50\xA7\x82\xA8\xE5\xB5\x5E\x1D\x7E\xE5\x72\x83\x41\x03\x55\x18\x05\x5E\x68\x13\x67\x62\x30\xBE\x7E\x72\xA9\xFD\xDB\x9C\x79\xD6\xEB\x63\x5C\x19\x06\x91\x68\x16\x61\x04\x71\x33\x37\xFF\xF1\x3B\x6B\x24\x22\x20\x52\x65\x60\x74\xFF\x2F\xDE\x9A\x65\xB1\x31\xE0\xBC\xC4\x5D\x16\x80\x0C\x6C\x5F\xBB\x82\x5B\x51\x84\x8E\x63\xBC\x53\x09\xBE\x2E\xC2\x9E\x03\xE2\xA7\xFB\xAE\x17\x19\x29\x1C\xEA\xEE\x0B\xB9\xB6\x04\x6B\x73\xDD\x16\x4D\xBC\x1E\xAC\x5C\xC1\xEB\xC7\xF2\xEA\x12\xF0\x25\xF8\xA5\xD8\x4F\xA1\x04\x05\x37\x85\x12\xC0\x09\xFA\xF1\xDD\xBE\xE6\xF9\xB8\x70\x96\x5C\x20\x56\x10\xDA\xAA\xC3\xA5\x6E\xC1\xCD\x5E\x09\x8F\x96\x3A\x5C\xEA\x98\x92\x50\x11\xB7\xBC\x06\xA6\xF7\xFE\x32\x5B\xB9\x89\xAC\xD0\x4F\x44\xE5\x74\xFF\x04\x38\x56\x8B\x98\x2A\x51\x0C\x35\xC5\xA4\x9E\xA2\x9F\x0B\x57\xB9\xFA\x61\x1D\xFE\x29\xC4\xD7\x2D\x1F\xD4\xA0\x12\xC5\xD5\x6F\x23\x51\x84\x45\x4C\x19\x88\xD1\xB9\xA9\x41\x18\xB7\xFA\xEA\x52\xB5\x45\x13\x4A\xD7\xA4\xEC\xA3\x83\x42\x48\x12\x95\x80\xCE\x20\x37\x32\x8D\x2A\x55\x60\xD1\x7D\xF7\x8B\xA4\x12\x93\xBE\xC9\x34\x5E\xC5\x6D\x71\x98\x28\x26\xE1\x86\x56\xD4\xA8\x27\xC8\x89\x39\xC0\xB0\xF3\x91\x22\x0C\x47\x41\xBC\x6D\xAF\xE0\x23\x0D\xAD\x95\xE8\xFD\xD4\x60\x3D\x0E\x8C\x66\x62\x3C\xDB\xBA\x90\x14\x8C\xA3\xD5\xE2\x44\xC2\xD9\xB3\x16\xAC\x1C\x75\xB8\x34\x69\xAE\xD7\x5E\x9A\x35\x0D\xE7\x72\xC3\x43\x7A\x9A\x31\x3C\xCB\x87\x22\x08\xCE\xE4\x55\x2C\x4F\xCD\x5A\x9C\xB7\xCA\x8B\x65\x39\x9B\xB7\xDA\x55\x98\xA6\x34\x6F\xF2\x41\xB5\xBF\xD4\xE4\x55\x2C\x9A\x3B\xCE\xD9\xC9\x48\x37\xED\x74\x3D\xFC\xEC\xB8\x65\x94\xB6\xA1\x2A\x8D\xB3\x58\xE7\x16\xDB\xF8\x6D\xCF\x74\x97\xF9\xDB\xDD\x0E\xED\x34\xB2\x77\x16\x4D\x3C\xE0\xEA\x23\x10\x3A\x20\xAE\xFC\x73\xBF\xBA\x82\xA5\x5F\x30\xAE\xBC\xF2\x27\x08\x81\xC2\xB2\xC2\xCD\x7D\x11\xF3\x59\xE0\x20\xAD\x5C\x81\xDE\x87\x8E\xF3\x31\xC4\xBA\x42\xDB\x34\x8C\x03\x90\x35\xB0\x9F\x20\x23\x0B\xF1\x13\xC0\x7B\x55\x41\xA2\x0E\x71\x1E\x48\x79\x44\x05\xB9\x22\x1A\x92\x5D\x90\xEB\x84\x0E\xAE\x66\xAD\xDF\x35\xDE\x47\xA1\x5A\x34\x7F\x2B\xEC\xCA\x93\x2A\xFF\x83\xA3\x27\x34\x51\x52\xDB\x51\x71\x13\xDD\x59\x82\x19\x19\xEF\xB3\xF3\xF8\x52\x7B\xED\xF2\xEB\xFE\xA5\x97\x5F\x53\xC7\xFB\xCC\x77\x31\x92\xBD\xA6\x52\x21\x35\x2B\x44\x4A\xC5\xE5\x21\xF3\x16\xF5\xEC\x9D\xC8\xA4\x61\xBA\x92\x84\x69\x40\xB6\xF3\x95\x20\xCC\x7C\x0A\xF5\x0D\xF3\xFA\x2B\x53\x84\x72\xBE\x1E\xF1\xB7\x94\xB4\x7A\xEA\x72\xB4\x5D\xE2\x9D\x24\x63\x2B\x3C\x43\x3D\xC6\x7A\xA2\x71\x84\xE8\x81\xF8\x44\x9E\x9E\xC8\xC9\x80\x91\xC2\x1E\xD0\x76\xCB\x35\xB8\x13\xF1\xD5\x72\xD9\xA9\x37\xBC\x2F\x4E\x9F\x45\x0D\x07\x4B\xE5\xFF\xC5\xBF\x9E\x1D\xDA\xD1\x96\x8E\x10\x1C\xA7\xB6\x9F\xA9\x7F\xE1\x83\x18\x33\x49\xC8\x15\x60\x2A\x7F\xA9\xD4\xC5\x09\xC3\x1A\x0C\xEF\x20\x75\xC2\xCC\x77\x26\x8D\x40\x26\x09\xFC\xB3\xFE\x50\xF8\x43\x19\xE3\x95\x57\xDD\x1B\x90\xF7\x7C\x3D\xDC\x93\x13\xB5\x65\xAD\x20\x90\xFB\x9D\xD1\x87\xA3\x56\x6B\xE5\x17\xA4\x7C\xE9\xBE\x88\x1F\x20\xE5\x3F\xE5\x6D\xE1\xF9\x45\xE8\xF4\x87\xE7\x9D\x06\xC5\x4B\x01\x13\x9F\xA2\xBC\x3D\xAB\xAD\xF1\x27\xC8\x9A\x08\x7A\xA3\xDB\x10\x82\x6A\x19\x71\xB3\x75\x8F\xCD\x8B\x22\x34\xAB\xB0\xB6\xC0\x1F\x1B\x1E\x0B\x36\x7B\xD7\xA6\x9F\x50\xEF\xC0\x23\x87\xE0\x2E\x00\x17\x21\xDF\xD4\x32\xB2\xE5\x81\x01\xE5\xE4\x60\x09\xFE\x1F\xE6\x89\xC5\xDE\x49\x45\x31\xE7\x7E\x94\x38\xC5\x8D\xBD\xE4\xC4\x77\x3C\x40\x4A\xB3\xEC\x99\xAF\x26\x53\xDF\x80\x17\x67\x67\xD5\x99\xDA\x97\x53\x6D\x76\x68\x9B\xCC\x8B\x24\x18\xB3\x93\x77\xD7\x43\x33\xEB\x21\x58\x43\xCD\x7A\x60\x74\x4C\x76\x2F\x36\x4B\x6F\x11\x6A\x2B\xBD\x45\xCA\x38\xD9\x8F\x6D\x4A\x19\xFC\xDB\xAB\x83\xC1\x16\xE3\x80\x79\x46\x21\x46\x81\x64\x89\xB0\x2D\x86\x79\x87\x26\x82\xEE\x49\x3A\x4A\x02\x1F\x57\xDC\xA9\xA3\x4D\xC4\xD1\x28\x89\x57\x92\x87\x33\x4A\xE8\x68\x73\xC7\xD7\x77\xBC\x15\x50\xE2\x20\x4D\x65\x0F\x63\x8C\x4A\x36\x92\xFE\x61\xF6\xE0\xF4\xC5\x59\x15\xE7\x52\xF5\x91\xB3\x61\x6F\xF9\x65\xAE\x35\x00\xF8\x46\xF9\x9F\xB2\x9F\xBC\xF1\x8A\xDB\xCF\x37\x08\xB3\x87\x38\xB1\xC3\x09\x47\x76\x34\xF1\x4F\x6D\x36\x34\xA3\x43\x4F\x87\x7D\x64\x5B\x92\x7C\xA7\xAF\xFF\x42\x21\xAA\x7F\xC3\xDA\xD2\x0C\x16\x9C\x2C\x7B\x63\x4C\xA6\xA3\x65\x6F\xF3\x42\xF6\xA8\x72\x7F\x59\x87\x7D\xE6\x70\x59\x9C\x69\x8A\x54\x6B\xA6\x48\x89\x23\xA7\x10\x5C\x6B\x81\x1D\xDE\x13\xE4\xF5\x49\xD5\x93\xD7\x50\x3C\x42\x74\x98\x4E\x1B\x5E\xE8\x89\xDE\xB0\x73\x5B\xDF\xCC\xB7\xED\xD2\x59\x5E\x81\x22\xEB\xAD\x48\xFE\xCD\x30\x31\x1F\x5F\x99\x71\x56\x68\x93\x01\x51\x6E\xAF\x28\xEC\x60\xD8\x60\xEA\xB7\x63\x93\xCF\x68\xAB\x4D\x6D\xB5\x67\xB4\x15\x2B\xA0\x4A\xC9\xA4\x41\x10\xA4\xE6\x29\x48\xCF\x15\xF3\x5E\x68\xE6\xD8\x55\x8C\xDA\x43\xF0\x1E\x6A\x5C\x6D\xE2\xD4\x40\x9B\xE2\xE3\x81\xE5\x81\x9A\xBE\xE7\x45\xD6\xB4\xDE\xAA\xD0\x9F\xEE\xC3\x2F\x72\xEC\x38\x06\xDB\x82\x49\xA4\x67\xA6\xD1\x5D\x9B\x8C\x9C\xDC\x75\x93\xB1\x5A\xED\x50\x3D\x37\x19\x4B\x49\xF3\x99\x4C\x46\x1D\x66\x9C\xA6\xBE\x4E\xA6\x3E\x3E\x00\xD6\x3F\x2C\x27\xD0\x60\xEA\x75\x93\x51\x42\x55\xA3\x84\x81\xEE\x13\x9D\xAA\x61\x0F\x96\x1B\xED\xC1\x60\x26\x0D\xE5\xDC\xD4\x5E\x99\xB1\xE6\x0B\x56\xE7\x27\x53\xDA\x43\x42\x95\x71\x3A\x18\xA1\xA6\x71\x71\x0F\xCD\xBA\xFF\xE7\x56\x8C\x41\x8E\x9F\xFC\xCB\x38\x7E\x53\xCE\x29\x62\xD9\x2C\xC2\xE3\x13\x02\xAF\x3C\xD0\xE7\xCE\xF4\x85\xD7\x68\x69\xC1\xB5\x2E\x66\xF0\x54\x42\x4B\x8B\x71\xB0\x37\x8C\x19\x01\x26\xCA\xBF\x94\xB8\x50\xF0\x39\x64\xDD\xE7\x49\x80\x1E\x39\x7A\x5C\x41\xBC\x03\xA7\xAF\x4D\xF7\x43\xDE\xB8\x8C\x93\x32\x4F\xCE\x33\x92\xA3\x37\x3F\x72\x84\xEF\x9E\x98\xC8\x20\x57\xFA\x13\x3B\x32\x94\x7D\xE1\x34\x6E\x3D\x52\x83\x67\x5D\xA0\xBF\x9B\x21\x1E\x53\x03\xAB\x18\x48\x21\xC9\xB5\x8C\x03\xAC\x48\xEB\x37\x0D\xE4\xFF\xCB\x05\xCD\x86\x8C\xCE\x3C\xC1\xA8\xC0\x67\x9C\x19\x71\x37\x13\xC7\xC7\x4C\xF1\x46\x3E\x8F\x9D\xEB\xAD\xAF\xD2\x79\x24\xF7\xC8\xF2\x14\xDF\x5E\x21\x84\xAB\x11\x32\x20\xBD\x69\x4A\x05\x6C\x5C\x85\x70\x8B\x22\xE2\x17\xC6\xC1\xD3\xA9\xFE\xB0\x91\x6E\xA7\xAB\xAD\x59\x5A\xE3\xD4\x84\x7C\xD6\x04\xC5\x26\xA8\xB5\x26\x90\x2B\xB1\x79\xAF\xC0\xF0\x86\x45\xBD\x13\x54\xD5\xFB\x7F\x6D\x43\xCE\x3D\x9C\x34\xF0\x71\x1C\x30\x85\x6F\xD6\x2C\x95\xD6\xDA\xA4\x55\x7C\xBB\xF2\x71\x1C\x30\x5E\x05\xC9\x85\x46\xBC\x0A\xC2\x5D\xCF\xE6\x2F\x14\xBE\xCD\xC2\x5F\xBA\xF9\xEB\xF6\x2C\xD4\xB0\x04\x0C\xE0\x5F\xD5\x57\x09\x41\xE4\x7F\xE6\x05\xA6\xDA\x47\x74\x07\x84\x3C\x19\x26\xA3\x45\xB8\x38\x89\xA1\xFF\xC8\x8B\xBC\xED\x31\x33\x3F\x60\x78\x8E\xCE\xB1\x74\x3F\x07\xFE\x4A\xE3\xCD\x8D\x27\x09\x21\x88\x8B\x7C\xFF\xC6\x37\x46\xFC\xA8\x54\xDF\xF7\xBF\x28\x37\xD9\xDA\x9F\x1F\xE1\x69\x4C\x4C\x5B\x3B\x41\xDC\x6F\x78\xED\xCC\xEE\x30\xCB\x6A\x91\xDC\xF0\x59\x88\x49\x45\xC8\x43\x26\x6A\x85\x8E\x30\xD4\x04\x17\xA1\x8C\x53\x9B\xBF\xDA\x32\x4B\x20\x68\xA4\xEE\xCF\xBE\x28\x17\x41\xF3\x3C\x1E\x71\xD8\x31\xB1\xE1\xF9\x71\x30\xDD\x0F\xBC\x48\xBF\x1D\xBF\x8F\x7E\xBB\xAA\x8F\xB4\x30\xED\x03\x91\xEA\xCB\x12\xF5\xB7\x12\x37\x79\xCB\x7C\x94\x3D\xB3\x83\x56\x70\xB3\x80\xA9\xB0\xC3\xFB\xEE\x4E\x00\x23\x78\x1F\x4F\xD8\x36\x43\x22\x9E\x55\xFF\x5F\x87\xEC\x31\x9D\xEE\xFE\xB1\xD2\x37\x8C\xE7\xCB\xD1\xB1\x7E\x77\x33\x39\x87\xF9\x9A\x96\xC9\x17\x8C\xE0\x06\xCC\x06\xA6\x1D\x04\x86\x50\x08\xB0\x88\x3D\x86\xFC\x3B\x46\x21\x9B\x1B\x5E\x8F\xFE\x85\xF7\x86\xC9\x29\xFE\x7F\xF6\xDE\x03\x3C\x8A\x6A\xFD\x1F\xFF\xBC\x33\xB3\x21\xD9\x84\x10\x12\x21\x09\x24\x67\x77\x15\x24\xC0\x04\x36\x60\x23\x52\x12\x7A\x17\xC5\x0A\x62\x58\x92\x4D\xB2\xB0\xD9\x5D\x36\x1B\x8A\x57\x25\x8A\x05\x2B\xA8\xD7\xEB\xF5\x5A\xC0\xAB\xEE\x90\xC4\x02\xF6\x02\x82\x15\x2C\x28\x28\xCD\x1E\xEC\x35\x22\x16\x4A\x80\xF9\x3F\x67\xE6\x4C\x98\x2C\xA8\xF7\xDE\xEF\xEF\xF7\x7F\xBE\xCF\xF3\xBB\xE7\xE1\xC3\xBB\xF3\x9E\xF7\x94\x39\x73\x66\xE6\x94\xCF\x3B\x99\x2D\x08\xD2\xCA\x1C\x73\x2D\xDD\xF4\x26\x5A\x66\x3A\xA6\x43\x7C\xC4\xC5\xA0\x08\xCC\x3E\x5E\x78\x16\x29\xC6\xE8\x4F\x7C\xB9\xD0\x31\x87\xCF\x6B\xF9\xC5\x97\xBB\x0B\xE2\x82\xF1\x48\x93\x8D\xDC\x8F\xBC\x8D\x8D\x61\x0B\xD7\xCE\x13\xC5\xA6\x88\x67\x95\x32\xC7\x20\xA4\xA4\x98\x6B\x99\x29\x7C\xF0\x27\xB9\x8D\xA5\x64\x29\xE6\x31\x87\x9A\xE6\xD0\x35\xC5\x58\x8C\x38\x6A\x8D\x43\x36\xFE\x4A\x8B\x61\xE1\xE0\x9D\xC0\xD1\xB1\xED\xEF\x7B\xA5\x5A\x4C\xAC\x36\xA7\xA8\x14\xD3\xE1\xC8\xD8\x1C\x11\x5B\x07\x6D\xD4\x50\xA3\x97\x48\x6D\xBE\xE9\x6D\x6B\xCB\xA7\x48\xC9\xFF\x6D\xF0\xFF\x36\xF8\xFF\x2B\x0D\x7E\x58\x3A\xF2\x29\x12\xF3\xE9\x64\xB6\xAF\xF1\xE7\x28\x25\xF1\xD1\x28\xF3\x62\x38\xC4\xFE\x9A\xF9\xEA\x48\x3A\xD2\xF2\x92\xD5\xF2\x8A\xF9\x07\xF3\xE6\x98\xAC\x18\xC5\x2D\xCF\x11\x9F\x05\x39\xD2\xFC\xC9\xE6\x59\x28\x66\xBD\x1D\x46\xCB\x27\x8B\x6F\x9C\xFD\x9B\x2D\x2F\xCF\x3E\xD2\xF8\x8A\xE0\x89\x49\x73\x4C\xCE\xB9\xF8\x83\xED\x6E\x07\x1F\x62\x3B\x44\xE3\xCB\x6D\x8D\x6F\x78\x8A\x19\x8D\x2F\xB5\xFF\x00\x8E\xF9\xA7\x9C\x14\xF3\x4F\xC3\x5C\x32\xDB\x20\xBE\x99\x8D\x2F\xD9\x1A\x3F\x45\xF4\x1B\x5B\xE3\xDB\xFF\xB8\x63\x5B\x37\x53\x4A\x2F\x99\xDD\xBE\xFD\x15\x63\x87\x51\xF9\x6F\xFB\xFF\x6F\x69\x7F\x1F\xC9\x8B\xCC\xBF\x18\x68\x3D\x5E\xA4\x49\xC2\xEF\xE9\x78\xD9\x6D\x54\xC4\xF8\xB2\x9B\x87\x0C\xEF\x38\xD9\x2D\xCD\x36\xFC\xA0\xDC\x64\x50\xDE\x0D\x97\x12\x63\xEB\x3E\xD5\xED\xF0\x20\xB5\x74\x11\x7F\xB6\xCC\xE1\x4D\x3B\xBA\xBB\xE9\x93\x7C\x90\xC8\xB1\xC8\x4D\xAA\xF1\xBD\x75\x9E\x93\x6C\xFE\xD1\x2F\x3E\x35\x1A\x6F\x51\x68\xA8\xB4\x9E\x6A\x8F\x77\x94\xAE\x37\x3D\x0F\x8C\x3F\xED\x38\xF7\x78\xA5\xF4\x13\x63\x15\x52\x29\x5D\x4F\xE3\x3A\x92\xF1\xF5\x6A\xB9\x34\x29\x56\xBA\xBE\xD3\x5C\xB7\x23\xCA\x2F\x12\xD7\x1B\xC3\xB4\x42\x48\x46\x1E\x49\x47\xE5\xF1\xA3\xF1\x57\x06\x0D\xD2\x5B\x69\x52\xCC\x2D\x97\xA6\xC5\x4A\xEB\x9B\xE5\xB9\x51\x77\x92\x91\x45\x86\xC8\x42\x29\xDD\x6B\x94\x46\x85\x90\x8D\xAC\xC8\xCC\xAA\x54\xB1\x27\xE5\xA9\x8C\x2A\xE8\x3A\xE6\xBA\x89\xE7\xA0\xEB\xF2\x84\x8E\x94\x5A\x7A\x48\x97\xCD\x93\x13\x3B\xAD\x70\x2B\xA9\x5B\x8C\xCF\xF6\x98\x3E\xC4\xA5\xBA\xF1\x27\x73\xCD\xEF\x47\x97\x92\x98\x40\xE9\x9D\x84\xB2\x74\xD8\xDC\xD2\x7A\x8A\x8A\x4F\x4C\x97\x26\xCD\x2B\x5D\xCF\x8F\x50\x2A\x71\x4B\x9E\xBB\xF1\xF9\x52\x5D\x7F\x09\x13\xBA\x9B\x1F\x2C\x4D\x4C\x29\x19\x47\x69\xF3\x4A\x9B\x8D\x94\x56\x3E\x6D\x19\x97\xCA\xE2\x13\x90\x96\xC6\xF0\xB9\x2C\xCD\x9C\x57\xBA\xFB\x98\x09\xDA\xF2\x3B\x92\x83\x92\x7A\x36\xD1\xA2\x52\xF1\x8D\x48\xCB\xE7\x0A\xFC\xF2\x82\x5F\x50\x63\x7A\x67\xA8\x0C\x4F\xB8\x66\x7E\x81\xDC\x54\x2A\x9B\x7F\xE0\x55\x36\x3C\xD8\x8D\x98\xDD\x3C\xC6\x60\x67\xA5\xBA\xA5\xD4\xD4\x67\x56\xC9\x32\x4A\xEB\x93\x53\xEF\x6F\x92\x6B\x63\xD1\x40\xA8\xAA\xD8\x1D\x08\xCD\xF3\x05\x03\x15\xEE\xBA\x50\xA0\x3C\x5C\xE1\x87\xAF\xAA\x2A\xEA\xAF\xF2\xC5\xFC\xB5\x3C\x2E\x86\x70\xC4\x57\x16\xAA\xAB\x99\xE5\x8F\x96\xC5\xC2\x65\xB3\x2A\x8F\xA4\x30\xB5\x86\x81\x2F\x1A\x88\x55\x97\x45\xC3\x75\xA1\x8A\x3F\x88\x8F\x04\xEB\x6A\xFF\x20\xBA\x26\x10\xFA\xE3\xF8\xBA\x60\x2C\x10\x09\x2E\xFC\x03\x93\x8A\xC0\xBC\x40\x85\xFF\x98\x06\xB3\x02\xB1\xDA\xB2\xDA\xEA\x40\x65\x0C\xDE\xA2\x01\x03\x4F\x3A\xF9\x94\x53\x4F\x1B\x84\x23\xE1\x88\xD2\x37\xAB\xBC\xC2\x5F\x89\x84\x70\x24\xBE\x74\xF8\x88\x91\xA3\x46\x27\xC6\x9B\xDA\x31\x63\xC7\x8D\x9F\x30\x71\xD2\xE4\x33\xA6\x9C\x79\xD6\xD4\xB3\xCF\x39\xF7\xBC\xF3\x2F\x98\x66\xE6\x57\x55\x1D\x98\x3D\x27\x58\x13\x0A\x47\xE6\x46\x6B\x63\x75\xF3\xE6\x2F\x58\x78\xF1\x91\x3C\xFB\xF6\xFF\x3F\x9B\x5F\x61\x19\x7A\xD6\x16\x9B\xFF\xDC\x3D\x6B\x51\xEB\x8F\x15\x20\x54\x17\x0C\x22\x16\xAD\xF3\xA3\xD2\x17\xAC\xF5\x23\x10\x0C\xFA\xAB\x7C\x41\x77\x5D\xC8\x5F\x5B\xEE\x8B\xF8\x2B\xDC\xE5\xD5\xBE\xA8\xAF\x3C\xE6\x8F\x1E\x89\x8B\x55\x16\x9E\xD6\x76\x64\x76\x1A\xB7\x69\x6E\xB3\xEE\x59\x65\xB4\xF1\xEC\xDA\x70\xA8\x6C\x7E\x34\x10\xF3\x47\xCB\xFC\x35\x81\x98\xE8\x38\xC5\x6E\x2B\x7D\xD4\x1F\x89\xA2\xA7\x77\xC0\x02\xC3\xBC\xC6\x17\x0C\x86\xCB\x8F\xC4\xCE\xAA\x0B\x04\x63\x81\x90\xBB\xDC\x57\x5E\xED\x77\x07\x42\x15\xFE\x05\xE8\x59\x79\x8C\x1E\x18\x9E\xE7\x8F\x56\x06\xC3\xF3\xFF\xBC\x73\xF6\xAC\x38\x96\x8D\xBD\x36\x7F\x96\x85\x7B\x41\x5B\xF7\x29\x76\xF3\xFF\xDD\xE5\xE1\xD0\x3C\x7F\xB4\x36\x10\x0E\xD9\x62\x7C\xB3\x7E\x2F\xA2\xA2\xC2\x1D\x0E\xF9\xDB\x6B\x6C\x47\x46\xAF\x4F\xB0\xA8\xA9\x0B\x1E\xE9\xB3\xBE\xD0\x11\xF3\xB2\x90\x71\x6B\xB6\x8B\x2C\x0B\x85\x63\x47\x14\x61\x5B\x6F\x5F\x60\x3F\x08\xF9\xAB\x50\x13\x31\x6E\xCF\x80\xFD\xAE\xAE\x2D\x8B\xFA\x42\x55\xFE\x62\x77\x79\xB8\x26\xE2\x8B\x06\x6A\x45\xED\x8F\x8A\x6D\x3B\xB7\xFE\xB8\xAC\x08\x97\x79\x71\xD9\xD1\xF7\x4C\xCF\x59\xE8\x19\xE6\x8D\xDE\x73\x01\xCC\xBE\x52\x5B\xEC\x8E\x45\xEB\x42\xE5\xBE\x98\xDF\x5D\xE9\x0B\x04\xFD\x15\x47\x22\xAA\xFC\x31\x77\x5D\x20\x14\x6B\x1F\x71\xF4\x23\x09\xE7\x24\x1F\xC1\xF9\xC9\xC0\x74\xDB\xF1\x45\xC9\x30\x3B\xF6\xAC\x70\x38\xE8\xF7\x85\x20\xAE\xBC\x99\x19\x7C\xD1\xA8\x6F\x21\xC2\xB3\x66\xFB\xCB\x63\xFC\x2E\x68\xBB\xBF\x52\xD0\x11\x04\x37\xAE\x82\x1F\x2B\x50\xDF\xB5\xB9\x0B\xDC\x4E\x37\xA1\xC0\xDD\xDB\x4D\xE8\xEF\x2E\x73\x7B\x01\x2F\xBC\x64\xDD\x8F\x00\xA6\x89\xDF\x6E\x21\x7D\x00\x2E\x06\xD0\xAC\xEB\x3A\x84\x7C\x02\x00\xC7\x57\x92\x69\xC3\xE5\x7A\x00\xDB\x13\xD2\xEE\x04\xF0\x71\x82\xAE\x19\xC0\x6F\x09\xF9\xED\x03\x70\x38\x41\xC7\x7F\x70\x2C\x14\xE9\x0C\x49\x40\x7F\xA3\xB2\x9D\xDB\xC0\x2B\x6F\x9D\xC0\x20\x91\xBE\x88\x4C\xE4\x88\x63\x2E\x07\x10\x70\x6A\x42\xDA\x41\x04\x8C\x4D\xD0\x8D\x27\x60\x7E\x82\x6E\x01\xC1\x00\x0F\xD7\x88\x3C\x17\x12\x70\x59\x82\xDD\x22\x82\x81\xAD\x87\x4D\x1B\x2E\xEB\x09\xE0\x78\x49\x9C\x07\x97\x97\x93\x09\x1E\xDE\x11\xFA\x2B\x08\xB8\x2A\x21\xBF\xAB\x09\x06\x78\x78\x4B\xD8\x5D\x43\xC0\x92\x04\xBB\x6B\x09\xB8\x4E\xD8\x6D\x12\x76\xD7\x13\x70\x43\x82\xDD\x4D\x04\x03\x3C\x9C\x21\xEC\x96\x92\x09\x1E\x5E\x17\xBA\x65\x64\x82\x87\x37\x84\xEE\x66\x02\x6E\x49\xC8\xEF\x56\x82\x01\x7B\xB9\x7F\x25\x13\x3C\xBC\x2D\x74\xB7\x91\x09\x9F\xAD\x4F\xFD\x8D\x4C\xF0\xF0\xAE\xD0\xDF\x4E\x26\x78\xD8\x22\x74\x7F\x27\xE0\x8E\x84\x72\xFF\x41\x30\x70\x9F\xB0\xE1\xF2\x2E\x32\x61\xCF\xEF\x6E\x32\xC1\xC3\x36\xA1\xBB\x87\x4C\x5C\x61\xB5\x3B\x80\xE5\x64\x02\xB6\x3E\xBC\x82\x80\x07\x12\xCA\x8D\x13\x0C\xF0\xF0\xBE\xB0\xD3\x08\x58\x99\x60\xD7\x40\x30\x60\xB7\x7B\x90\x80\x87\x12\xEC\x1E\x26\x18\xB0\xDB\x3D\x42\xC0\xAA\x04\xBB\xD5\x04\x3C\x2A\xEC\xDE\x13\x76\x8F\x11\xF0\x64\x82\xDD\x53\x04\x03\x3C\x7C\x20\xEC\x9E\x26\xE0\x99\x04\xBB\x35\x04\xAC\x4D\xD0\xAD\x23\x18\x38\x4D\xA4\xE3\xF2\x65\x32\xC1\x83\x64\xDE\x7E\x78\x85\x4C\x18\xAE\x34\xC2\xF6\x55\x32\x71\x58\xDC\x1B\x3C\x6E\x03\x01\x1B\x12\xD2\x6E\x24\x13\xF6\xB4\xAF\x91\x09\x7B\xDA\xD7\x09\x78\x3D\x21\xED\x1B\x64\xC2\x9E\xF6\x4D\x32\x61\x4F\xBB\x89\x80\x0F\x13\xCE\xED\x23\x82\x81\xD5\xC2\x8E\xCB\x8F\x09\xF8\x31\xC1\xEE\x27\x82\x01\x7B\xB9\x7B\xC8\x84\xBD\xDC\x9F\xC9\x84\xBD\xDC\x5F\x08\xF8\x35\x21\xBF\xDF\x08\x06\x78\x58\x2E\x6C\xF7\x92\x09\x1E\x36\x0A\xDD\x3E\x02\x5C\x52\xFB\xB4\x6E\x09\x06\x78\xB8\x4C\xD8\x1D\x2F\x01\x03\x13\xEC\x8A\x25\x18\xE0\xA1\x6F\x1F\x53\x9E\x2E\x01\x83\x13\xEC\x86\x48\x30\xC0\xC3\x0C\x91\xDF\x50\xC9\x04\x0F\x05\x22\xED\x30\x09\x28\x91\x80\x61\xD6\x71\x1F\x3E\x73\x00\x86\x27\xE4\x37\x42\x82\x01\x1E\x86\x88\xFC\x46\x4A\x26\x78\x18\x25\xDA\x6A\x94\x64\x82\x87\x31\x42\x37\x5A\x02\xCE\x48\xC8\x6F\x8A\x04\x03\x2E\x51\x2E\x97\x67\x4A\x26\xF2\x84\x8E\xCB\xB3\x24\x13\x4C\xE8\xB8\x9C\x2A\x99\xE8\x27\xEA\xC1\xE5\xD9\x12\xC0\x31\x40\xE8\xB8\x3C\x57\x02\xCE\xE3\x6D\x28\x74\x5C\x5E\x20\x01\x1C\xA7\x08\x1D\x97\xD3\x25\x80\xE3\x64\xA1\xE3\xF2\x42\x09\xE0\x38\xE3\x01\xB3\x5C\x2E\x67\x4A\x26\xEC\xF9\xF9\x24\x80\x63\x82\xB0\xE3\xB2\x5C\x32\x51\x64\xBD\x9F\x74\x5D\xF7\x4B\x00\x47\x81\xB0\xE3\xB2\x52\x32\x31\x52\xE8\xB8\xAC\x96\x4C\xF4\x17\x69\xB9\x0C\x48\x00\x47\xA1\xD0\x71\x39\x5B\x02\x66\x27\xA4\x9D\x23\x99\xD8\xDB\xDB\xD4\x71\x19\x94\x4C\x94\x0A\x3B\x2E\xC3\x92\x09\x7B\x7E\x73\x25\x80\xE3\x90\x48\xCB\x65\x54\x32\xD1\x57\xD8\x71\x59\x27\x01\x1C\x7D\x84\x8E\xCB\x4B\x25\x80\xE3\x2B\x91\x96\xCB\x7A\xC9\xC4\x89\xC2\x8E\xCB\x2B\x24\x80\x63\x84\xA8\x0B\x97\x8B\x25\x13\x76\xBB\x6B\x24\x80\xA3\x8F\xB0\xE3\x72\x89\x64\xC2\x6E\x77\xAD\x04\x70\x3C\x27\x74\x5C\x5E\x27\x01\xD7\x4B\x40\x2F\xA1\xE3\xF2\x06\x09\xE0\x78\x46\xE8\xB8\xBC\x45\x02\x38\x7A\x0A\x1D\x97\x77\x4B\x00\x47\x17\x51\x2E\x97\xF7\x48\x26\x32\x85\x8E\xCB\x51\xB2\x89\xB3\x45\xFF\xE6\x32\x22\x03\xB5\x72\xFB\x3E\x3E\x4F\x06\xE6\x27\xE8\xFE\x22\x03\x97\xCA\xED\xDF\x47\x8B\x64\x13\x3C\x58\xA3\xB9\xAB\x65\x13\xC6\xB9\x0A\xDD\x12\x19\xB8\x4E\xE8\x7A\x0A\xDD\x0D\xB2\x09\x1E\x4A\x84\xEE\x26\x19\x58\x2A\x74\xC3\x84\xEE\x66\x19\xB8\x57\xE8\xAC\xB1\xD9\x7D\x32\xD0\x94\xA0\x7B\x50\x36\xF1\xBE\x68\x17\x2E\x1F\x92\x81\x47\x64\xE0\x03\xA1\xE3\x72\xB5\x0C\xBC\x20\xB7\x1F\xC3\xBD\x28\x03\x1C\xBB\x84\x8E\xCB\x97\x64\xE0\x8D\x04\xBB\x37\x65\x80\x63\xBD\xD0\x71\xB9\x49\x06\xDE\xE2\x79\x0A\x1D\x97\x6F\xCB\xC0\xDB\xA2\x7E\xC9\xA2\x7E\x9B\x65\x13\x2F\x0A\x3B\x2E\xB7\xC8\x00\xC7\x06\xA1\xE3\x72\x9B\x0C\x70\x6C\x11\x3A\x2E\xB7\xCB\x00\xC7\xEB\x42\xC7\xE5\x0E\x19\xE0\xD8\x67\x3D\x9B\x75\x5D\xDF\x29\x03\x3F\x26\x5C\xB7\xDD\x32\x0C\x34\x0A\x3B\x2E\x7F\x92\x01\x8E\x55\x42\xC7\xE5\x1E\x19\xE0\xE8\x20\xEA\xCB\xE5\xCF\xB2\x89\x1B\x84\x1D\x97\xBF\xC8\xC0\x2F\xE2\xDC\x5E\x16\xFA\x5F\x65\x13\x2B\xC4\x31\x97\x7B\x79\xDD\x12\xEA\xB2\x5F\x86\x01\x1E\xF6\x0B\xDB\x03\x32\xD0\x9A\x60\x77\x48\x06\x74\x61\x67\xBD\x53\xA0\x00\x9D\x14\x53\x37\x45\xD4\x31\x43\x01\xFA\x2B\xED\xFB\x81\x57\x01\xCE\x50\xDA\x5F\xB7\x29\x0A\x50\xA6\xB4\x3F\xDF\x99\x0A\x70\xB9\xD2\xBE\xDC\xEB\x14\x60\x5D\x82\x6E\xBD\x02\x03\x3C\x74\x12\x65\xBC\xA0\x00\x6F\x25\xD8\xBD\xAD\xC0\xC0\x4F\xA2\x0C\x2E\x37\xF3\xFA\x39\xDA\xDB\x15\x39\x80\x73\x1D\xA2\xAE\x22\x3F\x9F\x03\xB8\xDA\x01\x6C\x16\x69\xB9\x5C\x91\x01\xBC\x92\x61\xC6\xCF\xCC\x33\xE5\x86\x0C\x13\x76\xDD\xA6\x0C\x13\x76\xDD\xE6\x0C\xE0\x40\x06\xB0\x3E\x15\x6D\xE1\x50\x06\xA0\x27\xE8\x56\x64\x01\x3F\x66\x89\x34\xB7\x9B\x72\x77\x16\xF0\x6B\x56\xFB\xBE\xBB\x2F\x0B\x38\x94\xD5\xBE\xAF\xD5\xE7\x01\x1C\xB7\x7C\x65\xEA\xB8\xBC\x3C\x0F\xE0\xB8\x55\xE8\xB8\xBC\x22\x0F\xE0\xB8\x4B\xE8\xB8\x5C\x9C\x07\x5C\x99\x07\xDC\x23\x74\x5C\x5E\x95\x07\x70\xDC\x2D\x74\x5C\x5E\x9D\x07\x70\xDC\x2F\x74\x5C\x5E\x93\x07\x70\x7C\x20\x74\x5C\x2E\xC9\x03\x38\x5E\xBC\xD6\xAC\x2F\x97\xCB\xF2\x80\x67\x45\x7B\x94\xFC\x62\xDA\xAE\xCD\x03\xD6\x25\xE8\x16\xE6\x9B\x50\xAE\x33\xF5\x5C\x5E\x9A\x6F\xE2\xCB\x74\x53\xC7\xE5\x4D\xF9\x26\x4E\x13\x76\x86\x64\xC0\x6D\xAC\xFD\xF5\xBD\x93\xC1\xC0\x2B\xA2\xAD\xB8\xBC\x87\x01\xF7\x30\x33\x5D\xE9\xA7\xE2\x1E\x61\x80\x9E\x90\x16\x2E\xA0\x83\xEB\x48\xBB\x73\x99\xEC\x02\x3A\xB9\xC4\x75\x10\x79\x66\xB8\x80\x2E\x09\x76\x39\x2E\x20\x3F\xC1\xCE\xED\x02\x7A\x25\xD8\x15\xB8\x80\xFE\x09\x76\x5E\x17\x70\x6A\x82\xDD\x69\x2E\x60\x58\x82\x5D\x89\x0B\x18\x95\x60\x37\xD6\x05\x4C\x4A\xB0\x3B\xD3\x65\xC2\x6E\x37\xD5\x65\xC2\xAE\x3B\xC7\x65\xC2\xAE\x3B\xCF\x65\xC2\xAE\xBB\xC0\x65\xC2\x5E\xC6\x74\x97\x09\xBB\x6E\x86\xCB\x84\x5D\x57\xE6\x32\x61\xD7\xCD\x74\x01\x55\x09\x65\x54\xBB\x80\x70\x82\x5D\xC4\x05\xCC\x75\x01\xE3\x85\x1D\x97\x51\x17\x50\xE7\x02\xCE\x15\x3A\x2E\xE7\xB9\x80\xF9\x2E\xA0\x42\xE8\xB8\x5C\xE0\x02\x16\xBA\x80\x7A\xA1\xE3\xF2\x62\x17\xF0\x17\x17\x10\x11\x3A\x2E\x2F\x71\x01\x97\xBA\x80\xCB\x84\x8E\xCB\x7A\x17\x70\x4D\x42\xFD\x96\xB8\x80\xA5\x09\xF5\x5B\xE6\x02\x6E\x4F\xB0\xFB\xBB\x0B\x58\x9E\x60\xB7\xC2\x05\x68\x09\x76\x2B\x5D\xC0\x23\x09\x76\xAB\x5C\xC0\xEA\x04\xBB\xC7\x5C\x26\x52\x84\x8E\xCB\xA7\x5D\xC0\x33\x09\x69\x9F\x75\x01\xCF\x09\xDD\x93\x42\xB7\xC6\x65\x82\x87\xBD\x42\xF7\xBC\xCB\xC4\x07\x9F\x89\xFB\xF7\x33\x5D\x7F\xC9\x05\xBC\x94\x50\xC6\x46\x17\xF0\x86\x48\x6B\xBD\xBB\xDE\x74\x99\xB0\xE7\xB7\xD9\x05\x6C\x49\xA8\xF3\x4E\x17\xF0\x5E\x42\xFD\xDE\xE7\x65\x0A\xDD\x5D\xD6\xBB\xC1\x05\xEC\x4A\x48\xFB\x85\xCB\x84\xFD\x3D\xF8\xB5\x0B\xF8\x26\x21\xBF\x6F\x5D\xC0\x77\x42\xB7\x4C\xE8\xBE\x77\x99\x80\xED\xFD\xF6\xB3\xCB\x84\xFD\xDC\xF6\xB9\x80\xFD\xC2\xAE\xDE\x7A\x0F\xBA\x80\x56\xA1\xBB\x42\xE8\x0E\xBA\x4C\xD8\xCF\xF7\x44\x8F\x09\x1E\xEE\x13\xCF\x92\x3E\x1E\x13\x3C\x94\x7E\x22\xC6\xBC\x1E\x13\xC6\xB5\x11\xBA\x01\x1E\x13\x3C\x88\xC7\x20\x26\x7B\x4C\x7C\x2E\xF2\xE7\x72\xA6\x07\x08\x0B\x3B\xF1\x6A\x41\xC4\x03\x2C\xF2\x00\xBB\x85\x1D\x97\x8B\x3D\xC0\x95\x86\xDD\x91\x67\xD8\x93\x3D\x80\xB7\x7B\x98\x69\xBA\x89\xB4\x9B\x7B\x00\xDF\xF4\x00\xBE\x14\x69\x0D\xA9\x02\xFD\x54\x33\xDE\x7A\x0F\x7A\x55\xE0\x22\xB5\xFD\x7B\x70\xA6\x0A\xF8\xD4\xF6\x65\xCC\x52\x61\xC0\x68\xCF\xED\xA6\x6D\xB9\x6A\xC2\x28\xF7\x27\x53\x57\xA1\x9A\xE0\x21\x57\xD8\xF9\x55\x13\xDB\xB6\x99\xC7\x5C\x56\xAA\x00\xC7\x4E\xA1\xE3\xB2\x4A\x05\x82\x09\xE5\xD6\xA8\x30\x60\xB4\x93\xB0\x0D\xA9\x26\x78\x90\x45\x19\x61\xD5\x04\x0F\xBB\x84\x5D\x44\x35\xC1\xC3\xA7\x42\x17\x55\x81\xDA\x84\x32\xEA\x54\x60\x5E\x82\xEE\x32\x15\x58\x24\xD2\xBE\x20\xD2\xD6\xAB\xC0\x67\x09\x76\xDF\xA9\x40\x4B\x82\x6E\x8F\x0A\xFC\x9C\xA0\x43\x21\xD0\xB3\x10\x58\x21\xEE\x41\x2E\x7B\x15\x02\xBD\x12\x74\x85\x85\x40\x61\x82\xAE\x24\x0E\xD4\xC4\xDB\xE7\x57\x1F\x07\xEE\x4C\xD0\x1D\xAF\x01\xFD\xB5\xF6\xBA\x01\x1A\x10\x4E\xD0\x2D\xD4\x80\x4B\x12\x74\x97\x6A\x30\x60\xDC\x03\x75\x66\xD9\x97\x69\xC0\x35\x09\x76\xD7\x6B\xC0\x0D\x09\xBA\x1B\x35\x18\x30\xAE\xE3\x34\xF1\x9C\xD4\x80\x5B\x13\xEC\xFE\xAA\xC1\x40\x5B\xDF\x03\xF0\x37\x0D\x68\x48\xB0\x6B\xD4\x60\x80\x87\x35\x22\xBF\x26\xCD\x04\x0F\xAB\x85\xEE\x41\xCD\x04\x0F\x4F\x08\xDD\x43\x9A\x09\x1E\xD6\x09\xDD\xC3\x9A\x09\x7B\x7E\xAB\x34\x13\x3C\xB4\x08\xDD\x6A\xCD\x04\x0F\xDB\x85\xEE\x51\xCD\x04\x0F\xDF\x09\xDD\x63\x9A\x09\xE3\xFA\x88\x31\xF3\xE3\x1A\xB0\x2E\xE1\x3C\x5E\xD4\x80\x97\x12\x74\x2F\x6B\x30\x60\xDD\x6B\x5C\xBE\xA2\x99\xE0\x61\xAD\x28\xE3\x55\xCD\x04\x0F\x1B\xC5\xF5\x98\xDA\x64\x62\xE6\x41\xF1\x4E\x3D\xA8\xEB\x91\x26\x60\x5D\x13\xE0\xAD\x16\xE3\x8A\x6A\x5D\xF7\xE8\x40\xB1\x39\x82\x6F\x1B\xA3\x97\xEA\xC0\x34\xBD\xFD\x18\x1D\x0A\xA1\x97\x62\x2E\x38\x15\x08\xBB\x02\x85\x70\x86\x42\xD8\x29\xEC\xB8\x5C\xA5\x10\xDE\x4D\xB0\xDB\xA9\x10\x5A\x13\xEC\xEA\xD3\x08\x8F\xA6\x99\x76\xD6\xFC\x72\x7D\x1A\x61\x4F\x1A\xB5\x9B\xBF\xAD\xC8\x21\xAC\xCB\xA1\x76\xF5\x5B\x9F\x43\xF8\x24\x87\xDA\xD5\xAF\x24\x44\x28\x0B\xB5\xB7\x9B\x19\x22\x2C\x0A\xB5\xB7\xC3\x37\x04\xCF\x37\xA6\xDD\xF1\xC2\xEE\xF8\x6F\x08\x23\xBE\x21\x7C\x2C\xEC\xB8\x0C\x47\x7C\x65\xF3\x7C\xC1\x3A\x7F\x99\xB9\xD9\xE3\x17\xBB\x3C\x6D\xFB\x7B\x46\x24\x8E\x98\xD5\x56\xFB\x82\xC1\xF0\xFC\xB2\xF2\x70\x64\xE1\xB1\x37\xF3\xEC\xDB\x67\xD1\x85\x65\x81\x50\xEC\xF7\x0D\x7C\xB5\x65\x95\xC1\xB0\xAF\x9D\x45\xE5\x9F\x19\x44\xA2\xC6\x34\xCC\x78\x4F\xA6\x09\x5C\x84\xE9\x98\x81\x42\xF4\x41\xBF\x3E\x28\xC0\x25\xE8\x8D\x1E\x40\x3F\x4C\xBF\x08\x15\xFE\x60\xA0\x26\x10\xF3\x47\xDD\x81\x5A\x77\x28\x1C\x73\xFB\xDC\xB5\x81\x50\x55\xD0\xBE\x5B\x89\xF2\x70\x5D\xB0\xC2\x88\xAD\x0B\x45\xFD\xBE\x0A\x77\xB4\x2E\xE4\x47\xFB\x30\x8C\xBF\xE3\xF8\x98\x0F\xC0\x5F\x70\x74\x48\x8C\xBF\x14\x80\x78\x5C\xA2\x2E\xE4\x5F\x10\xF1\x97\xC7\xFC\x15\x6E\x7F\xA8\xC2\x1D\xAE\x74\x07\x42\x91\xBA\x18\xDA\xB4\xE5\xC1\x70\xAD\xDF\x6D\x6C\xAD\xD9\xEB\x35\x03\x47\x67\x10\x0B\xCF\xF1\x87\x12\x72\xB4\x1F\x06\xFD\xA1\xAA\x58\x35\x2F\x23\x18\xB6\xE5\x75\x4C\x8B\xEA\x80\xCD\xC2\x7E\xE0\xAE\xAD\x36\x9A\x64\x96\xDF\x5D\x15\xF5\xFB\xB8\x26\x56\xED\x0B\xB5\xCF\xF2\x48\xAB\x45\x7C\x51\xAB\xFA\x09\xAD\xB2\x5B\x2C\xA1\xF4\x10\xDD\xFF\x02\x53\x96\x2C\x35\xE5\x92\x97\x4C\x39\x7B\x9F\x21\xEB\xAF\xEC\x57\x6A\x3C\x07\x32\xCB\x0D\x79\xE5\x8E\xDB\x0D\xE9\xDF\xB4\xA9\xB4\x32\x14\xE8\x8B\xCA\x50\x00\x95\x91\x68\x20\x14\xAB\x2C\x36\x4A\x0E\xD4\x44\x82\xFE\x1A\x7F\x28\xE6\xAF\x40\xA5\xB1\xD3\x7C\x0C\x7D\xA4\x2E\x56\x7E\xB4\x1A\x21\xF7\x50\xB7\xD7\x7D\xE2\x89\xEE\x1A\xF7\xD0\x21\xEE\x10\x6A\xF8\xB1\xA9\x35\x7F\xF3\xB8\x50\x9B\xB2\xC8\x38\x8C\x04\xEB\x6A\xED\xF6\xC2\x66\x88\xBB\x06\xB5\x41\xBF\x69\xDD\xB3\xB6\xB8\x67\x45\xB1\xDB\x1F\x8D\x86\xA3\xC5\x6E\xD4\x46\xCB\xFB\x07\x03\xB3\x6A\x22\x15\xFE\xF2\xFE\xB1\x85\x11\xBF\xC1\x4D\xE8\x57\x8D\xDA\xBA\x59\x65\xB5\x81\x8B\xFD\x65\xB1\x82\xDE\x47\x36\xB1\x8B\xDD\xE5\xD5\xFE\xF2\x39\xEE\x58\xB5\xDF\x5D\x1E\x0E\xC5\xFC\x0B\x8C\x8E\xE2\x1E\x3C\xC4\x3D\x08\xE5\xBE\x68\x74\xE1\x74\xEF\x0C\xF7\x90\x21\x66\xD9\xA6\xA2\x28\x51\x31\xC0\x54\x20\x50\x1B\x09\xCF\xF7\x47\x07\x14\x84\x7A\xC3\xA8\xE6\x49\xC7\x52\x85\x78\xE6\x03\xFB\x4C\x9A\x32\xB2\x6C\x52\xE9\xF9\x67\x9F\x55\x3A\x79\xEA\xE8\x33\xCE\x9A\x54\x36\x60\xB2\xB0\x38\xED\x8F\x4C\x6A\x27\xFB\x26\x63\x5C\xA8\x32\x10\x0A\xC4\x16\xA2\xC2\x5F\x5E\x38\xD4\x6A\x89\xF2\x88\x7B\xB0\xBB\xC2\x5F\x6E\x6E\xBE\xF6\xAD\xF1\xD7\xA0\x3C\x52\xD8\xA6\x70\x0F\x76\xF3\x0C\xA7\x4E\x1D\x37\x6D\x14\xCF\x16\xA1\xF9\xE1\x68\x45\x2D\x2F\x32\xEA\xAF\xAD\x0B\xC6\x0A\x87\x1A\x94\x80\xF6\x99\x7A\x6A\x22\x15\x65\x81\xDA\xF2\x70\xA8\x36\x56\x56\xE1\x8B\xF9\x0A\x4C\xE3\xDE\x56\x4C\x6D\xB5\x2F\xEA\xAF\x68\x1F\x65\x54\x7C\xDC\xE4\xD2\x89\x13\xCF\x18\xC1\x4F\xA5\x7D\xFE\x56\xC2\x88\xBF\x3C\xE0\x0B\x16\xF8\x44\xCB\x78\x13\x23\xAC\xDC\xF8\xFB\x5C\xF4\x70\xB2\x61\xF0\x07\x80\x99\x20\x10\x8A\xF9\xAB\xFC\x51\x9E\xD3\x2C\x5F\xAD\x9F\x67\x36\x00\x15\x81\xAA\x40\xAC\xD6\x38\x87\xA8\xA1\x1D\x3C\xC4\x5D\x50\x74\xCE\xE0\xC1\x45\xA7\xF4\xE6\xBD\xC4\x3A\xC1\xDA\x68\xF9\x51\xD1\x09\x9D\x8D\xFF\x32\xC8\x2B\x46\x82\xB6\xEE\x61\x15\x6E\x5C\x0B\x7F\xC1\xAC\xDE\x56\x5F\x9C\xEF\x8B\x86\x8C\xBD\xF4\xF6\xBD\xD1\xF8\x3F\x50\xE3\x0B\xF6\x2B\x87\xA5\x34\x68\x3E\xFE\x68\xC8\x17\x34\x7B\xB0\x3B\x10\x72\x97\xF1\x8C\x79\x9D\xCA\x42\x15\x81\x79\x35\xE1\x8A\x62\x77\x24\xE8\xE7\x75\x8C\xFA\x23\xE1\x68\xEC\x77\xDA\x23\xF9\x43\xE0\xE6\xDF\x69\x27\xB3\x29\xDD\x9E\x21\x6E\x1F\x6A\x7C\x0B\x22\x51\x7F\xB9\x75\x8A\xBC\xF2\xD6\x31\xEA\x8C\x56\xE1\xBD\xD4\xFA\x35\x2F\xE8\x0F\xFD\xCB\xF7\x98\xAF\xA2\xE2\x5F\xBC\xC7\x6A\xEA\x82\xFF\xA2\x65\xD0\xC7\xAB\x11\x9C\xC5\xEB\x1A\x9C\x65\xD4\x32\xE8\xE3\x17\x80\x77\xB2\x09\xA5\x67\x95\x9E\x3D\xF5\x9C\xE1\xA5\x65\xC3\x4B\xA7\x8E\x1A\x51\x3A\x75\x94\xFB\x92\x4B\xDC\xF3\xF9\x89\x4E\x3E\x67\xE2\x44\x44\x79\x19\xE6\x09\x99\x89\x06\xF6\x29\x38\xC6\x5D\xD5\x7F\x40\xEF\xF6\xE9\xE6\x8B\xDE\xC1\x0B\xAD\xB3\x7A\xCA\xFC\xA3\xFA\x49\xDD\x51\x1A\xFB\xFD\x5E\x1B\xA8\x0A\xF1\x07\x43\x61\x11\xCF\xDC\x3A\x2A\xC2\x94\x22\x9E\xA4\x26\x5C\x11\xAA\x33\xBA\x96\xF8\x35\x78\x88\x7B\xCA\xC0\xA3\x1F\x18\x45\xA7\x98\x8F\x83\x63\x3D\x0C\xDA\x8C\xA3\xE1\xF9\xB5\xB6\xC2\xCB\xC3\xC1\xDA\xDE\xFF\xF2\x55\xFB\xD7\xAF\x05\xCF\x97\x9F\x83\x2D\xC5\x00\xD5\x6D\x16\x7E\x54\x90\xF8\xFC\x08\x80\x43\xCC\xA9\x53\x01\x74\x14\x23\xD1\x2C\x00\xD9\x00\xF2\x61\x2C\x81\x19\x7B\x04\x3C\x8B\xBE\x00\xFA\x03\x38\x19\xC0\xE9\x00\x86\x00\x18\x21\xF6\xE7\xC6\x09\x6E\xC0\x54\xFE\x66\x13\x7B\xF5\xFC\x0D\x58\x05\x60\x0E\x80\x1A\x00\x73\x01\x2C\x02\xB0\x18\xC0\xB5\x00\xAE\x07\x70\x1B\x80\xDB\x01\xDC\x2D\xF6\xE3\xF9\xB0\xF6\x21\x3E\x8E\x16\x5C\x91\x75\x7C\x8E\xC5\xC7\xC1\x00\x36\x88\x3D\x7A\xFA\xFF\xB1\xBE\x0B\x8F\x51\xE7\xA5\xBF\x53\xEF\x86\x84\xBA\x3F\x77\x8C\xFA\x6F\xE1\x73\xCF\xBA\xA8\xDF\x3D\x2F\x10\x8D\xD5\xF9\x82\x08\x54\x98\xEF\xAB\x4B\x2E\x71\x47\xA2\xE1\xAA\xB2\xC2\xA1\x81\x50\x6D\xAC\x20\x50\x51\x58\xD4\xBB\x70\x68\xD0\x57\x1B\x2B\xE8\x8D\x82\x70\xA4\x3C\x5C\xE1\x2F\xE8\xDD\x9B\x1B\x17\xCC\x19\x17\xAA\x8D\x8D\xF0\x45\x62\x75\x51\x7F\x6F\x58\x71\x3C\xCA\x88\x29\x0D\xC6\x78\x76\xC7\xD2\x4F\xF2\xC5\xCA\xAB\xED\xFD\x80\xB7\xDD\x71\x00\xBA\x00\xE8\x2A\xDA\x31\x07\x00\xAF\x44\xD9\xF4\x68\x38\x1C\x9B\xD1\xEF\xF7\x0A\xF8\x43\x9B\xE1\x0B\x63\xFE\xB3\x8C\x61\x50\x41\xA0\xC2\xAC\x76\xC8\x48\x50\x58\xD4\x1B\x05\x21\x53\x53\x73\xEC\x53\x6B\x4B\xFB\x9F\x9C\x5C\x41\x38\x52\x26\xF2\x3A\xCB\x5F\xE5\x5F\x10\x99\xC8\xC7\xBC\xBE\xE0\x54\xE3\x3D\xDB\xFB\xE8\xF8\xB6\x86\xB4\x42\xAE\x58\x2B\xE9\x2E\xD6\x65\xF2\xCD\xA5\x65\x18\xD5\xE6\xCF\x93\xB9\x85\x43\xF9\x0D\x56\xD0\x9B\x9F\x49\x6D\x6C\x8E\xA9\xAD\x8D\xF9\xCA\xE7\x94\xF5\xB3\x62\x3C\x81\x48\xDB\xF5\x6B\x1F\xAC\x79\xC8\x09\x42\x8A\xE5\x99\xB6\xBD\x38\x6B\x9F\xAE\x97\x90\xD6\xBC\xCA\xCA\xC7\xCB\x8B\x33\xEE\x70\x18\xFF\x1B\x0F\x2C\xDF\x82\x32\xB3\xE4\x63\xB6\xE8\xA8\x9A\x48\x6C\xE1\x79\x81\x8A\x58\x75\x6F\x93\xE1\xC8\x1F\x6D\xC7\x5B\x53\x09\x83\x4F\x78\x3C\x3C\xFC\x49\xE2\x0B\x84\x6A\x0B\x02\xBD\xAD\x9C\xFF\x2C\x63\xDB\xA5\x0A\x98\xE3\x84\x40\xFB\x44\x3E\xFE\xD8\x2E\x2B\x33\x79\x68\xB6\xDF\x05\x91\xA8\xBF\x32\x60\x9A\x95\xF5\xE6\x49\x0B\x8A\xFE\xD5\xE6\x2C\x88\xD6\x85\x8E\x5C\x03\xA3\x32\xDE\xDE\x88\xD9\x73\x8F\x15\x0E\x8D\xFA\x2B\xDB\xE2\xFE\xF0\x06\xFA\x93\xF3\xFA\xB3\xF6\x3C\x56\xBC\xD5\x1B\xFF\x93\x0E\xFC\x27\x97\xF7\xD8\xCD\xFC\xFB\x17\xB5\xDA\x57\x5B\x66\xFC\xFE\x9D\xAB\xDA\x2E\xDE\xDE\xCA\xC7\x2E\xE8\x3F\xE8\x7C\xB6\xB6\x6C\x0B\x7D\xC4\xF3\x99\xCF\x0B\x0B\xC1\x67\xA8\xE6\xB3\xDA\x1E\xEF\x05\x50\x04\x60\x80\x88\xE7\xF7\x60\x69\x68\x21\x2E\xAC\xE0\x71\x83\x70\xE1\x48\x5C\x58\x8B\x14\x38\x91\x06\xF3\x5B\x87\x17\x4E\xC5\x85\xF3\x8D\xB8\x52\x4C\x43\x19\xCA\xE0\xC3\xC5\xB8\xF0\xBC\xF6\xF7\xDF\x8A\x22\xF3\x1D\x72\xBF\x90\x56\x58\x59\x64\x72\x72\x12\xF5\x4D\xE2\xF8\xE1\x22\xF3\x9D\x69\x85\x67\x85\x7D\xA2\x7E\xAD\xB0\x5F\x5F\x64\xCE\xCF\xAD\xB0\x59\xD8\x27\xEA\x93\xF8\x5C\xB9\xD8\x17\x0C\xD5\xD5\x14\xCF\x68\xAB\x3D\xAF\xF9\xF4\xE2\x8B\x2C\x35\x37\x88\x54\xFB\x8A\x67\xA0\x7D\xAC\xA9\x9B\x5E\xEC\xAB\x2D\x0F\x04\x8A\x8D\x69\xF2\x22\x23\x46\x1C\x4F\x2F\x9E\x15\xF4\x85\xE6\xF0\x98\x14\xA4\x18\xAD\x34\xBD\xF8\x22\x4B\x37\xBD\xB8\x3C\x14\x8B\x06\xCD\x74\x2E\x2C\x32\xD3\x5A\xBA\xE9\xC5\xC6\x38\x9D\xC7\xF2\x5A\x4D\x2F\xBE\xC8\x3A\x9E\x5E\x5C\x15\xF5\x45\xAA\x79\x8C\x07\x97\xF1\x18\xEB\x78\x7A\x71\x90\x0F\x73\x78\x8C\xA8\xA3\x75\x3C\xBD\xD8\x98\xAE\xF2\x18\xB7\x99\xC6\x3A\x9E\x5E\x1C\xA9\x0B\x95\xC7\x8A\xC5\x24\xDF\x83\xFE\x28\x46\x09\xA6\x63\x26\xFE\x22\x2C\x45\xFC\xF4\xE2\xDA\x88\xAF\xDC\x6F\x9E\x4D\x47\xEB\x6C\x2C\xDD\xF4\xE2\xBA\x48\xC4\x2C\x9B\xB7\xD1\xF4\xE2\x8B\xAC\xE3\xE9\xC5\x7C\x3A\x25\x0A\x48\xEC\x1F\xD3\x8B\x2F\x12\xB1\xD3\x8B\x17\xB4\x3B\xE3\x52\x8C\x86\x0F\x95\xDC\xE2\x48\x04\x62\x03\xCC\xEB\x7B\xD9\x80\xF6\xD7\xFD\xBA\x01\xE6\xF5\x4D\xD4\xDF\x26\xEC\x57\x0C\x30\xC7\x5D\x6D\xFD\x4D\xD8\x27\xEA\x1F\x13\xF6\xCF\x0F\x68\xDF\x0F\x5F\x14\xF6\x89\xFA\x4D\xE2\x78\x67\x42\x3E\xCD\xC2\x3E\x51\xFF\x9D\xB0\xFF\x2D\x41\x7F\x58\xD8\x27\xEA\x53\x06\x9A\xF6\xC7\x0D\x6C\x5F\x6E\xCE\x40\xD3\x3E\x51\x7F\x82\x38\xEE\x97\xA0\x1F\x20\xEC\x13\xF5\x43\xC4\xF1\xD8\x04\xFD\x44\x61\x9F\xA8\x3F\x4F\x1C\xCF\x4A\xD0\x57\x0A\xFB\x44\xFD\x5C\x71\x5C\x3F\xB0\xFD\x7D\xB7\x4C\xD8\x27\xEA\xEF\x14\xF6\xF1\x81\xED\xDB\xE1\x61\x61\x9F\xA8\x7F\x46\xD8\xBF\x9C\x50\xEE\x46\x61\x9F\xA8\x7F\x57\x1C\x37\x27\x94\xBB\x5B\xD8\x27\xEA\x0F\x08\xFB\xA4\x93\xDA\xF7\xAB\xCC\x93\x4C\xFB\x44\x3D\x1F\xB3\x14\x94\x9B\xEF\x54\xFE\xEE\x2B\x37\x5F\xA8\x03\x4E\x3E\x99\x3F\x9F\xEB\x62\x65\xE6\x33\xBA\xEC\xC8\xDB\x31\x18\x3E\x62\x5D\x1D\x38\xF2\x9B\xEB\x8F\x24\xE5\x31\x47\x8E\xAC\xB8\xEA\x40\x6F\x14\xC4\xC2\x31\x5F\xD0\xCC\xAF\x36\xE6\x8B\x05\xCA\xCB\xCA\x7D\xB5\xB1\xC1\x81\x50\x6C\x68\x41\x65\xD0\x17\xB3\x5E\xE3\xBD\xC1\xE3\xA3\x31\xDB\x6B\xFB\x18\x03\x81\x01\xDC\x2C\x70\xB1\xDF\x3C\xB2\x67\x68\xCE\x6B\x86\x16\x44\x0A\x23\xDE\xDE\x7F\xFA\xB6\xF9\x93\x77\xD5\x7F\x34\xB6\xFC\xC3\xA1\x40\xBB\x77\xE9\xB1\xDF\x9F\x11\x5F\x2C\xE6\x8F\x86\xDC\xB1\x70\xD8\x1D\xF4\x45\xAB\xFC\xEE\x42\xC3\xBF\x20\x10\x6C\xF3\x02\x08\x47\xCA\x8C\xA2\x8F\x31\x7C\x35\x86\xA0\xA2\x89\xDA\x42\xE9\x20\x60\xFC\x20\x60\xFA\x20\xA0\x76\x10\x70\xFD\x20\xA0\x61\x10\xF0\xD8\x20\x60\xED\x20\xE0\xF5\x41\xC0\xB6\x41\xC0\xFE\x41\x40\xE7\x62\xA0\x77\x31\x30\xAC\x18\x98\x54\x0C\x14\xBB\xDB\x46\xB5\x47\x5D\x38\x63\x15\xB3\xB6\xEC\xC8\xB5\x33\x57\x82\x8C\x05\x17\x9B\x6D\x5D\x20\x14\x2B\x3A\x85\x5F\x15\x73\x54\x1F\x42\x28\x6C\xCE\x6B\xDB\xAD\xCD\x1A\x0A\xCB\x7F\x41\xB8\xC9\xD4\xFA\xE7\xD6\xF9\x43\xE5\xFE\x36\xFD\x91\x65\xD7\xF2\xA0\xAF\xB6\xF6\xF7\xF4\x62\x81\xB5\x26\x50\x5B\x1B\x08\x55\xB9\x67\xB4\xFD\xEA\x6D\x2F\xB2\x37\x62\x51\x5F\x20\xC8\xF5\x17\xF2\x3A\xF9\xA2\x55\x75\x35\xFE\x50\xCC\x5D\x19\x8E\xBA\xA3\xFE\x88\x3F\x16\x88\x05\xC2\x21\x77\x38\xE2\x8F\xFA\x62\xB6\xDA\xD9\xE2\xF8\xB9\x63\x96\xAF\xE2\x0F\xED\x23\xFE\x68\xF0\x68\xED\x39\x67\x8F\x2E\x3C\xAD\xED\x28\xE4\xAB\xF1\x57\xB8\xCB\xCD\x31\xA7\xBB\x2A\x1A\xAE\x8B\xD8\x6E\x59\x63\xBC\x33\x08\x40\xB1\x6D\xBC\x73\xBA\x2D\x7E\xB0\x98\xBF\x0E\x15\xEB\xEB\x25\xC2\xEF\xC2\x1E\x3F\x5C\xCC\x6F\x47\x0A\x1E\xED\xE8\x84\xF8\x31\x22\xFD\x58\x31\xFF\xE5\xE3\xA9\x7F\xF5\x92\x1E\x35\x6B\x3A\xCB\x1F\xF1\xFB\x62\x7F\x34\x9D\xFA\xB7\x27\x62\x22\xFE\x58\x79\x56\xFB\xA2\x23\xF8\x85\x37\x7A\xBD\xE5\x6F\xE2\x16\xF3\xF6\xD9\x62\xAF\x67\x8E\x98\xBB\xAF\x73\x03\x41\x00\x51\xA1\xAF\x05\xC0\x91\x46\x26\x2F\xF2\xE2\x63\xF8\xA3\x58\x7E\x28\x6E\x9B\xFF\x09\xFF\xFD\x89\xC0\xBA\x7C\xD3\x07\xE5\x73\x91\xF6\x0B\x00\x1C\xA3\xDD\xC0\x97\x36\xDF\x14\xBB\x4F\x8A\x6E\xF7\x45\x11\x3E\x28\xFC\x39\x6E\xF9\x94\xF0\xDF\x96\x2F\x09\x37\xB7\x7C\x48\xB8\xDE\xF2\x1D\xB9\x46\xD7\x75\xCB\x67\x84\xDB\xD8\x7D\x45\xEC\x3E\x22\x96\x6F\xC8\x3B\x36\x9F\x10\x9E\x8F\xE5\x0B\xF2\x96\xCD\x07\x84\xE7\x63\xF9\x7E\x6C\xB2\xF9\x7C\x70\xBD\xE5\xEB\x71\x86\xCD\xC7\xE3\x75\x9B\x6F\xC7\x1B\x36\x9F\x0E\x6E\x6F\xF9\x72\x6C\xB2\xF9\x70\xBC\x9D\xE0\xBB\x61\xF9\x6C\xBC\x6B\xF3\xD5\xD8\x62\xF3\xD1\xE0\xF5\xB4\xFB\x66\x58\x3E\x19\xEF\xDA\x7C\x31\xB6\x25\xF8\x60\x58\xBE\x17\xDB\x6D\x3E\x17\xC6\x3B\x5B\xF8\x5A\xBC\x6F\xF3\xB1\xE0\xF5\xB4\x7C\x2B\xDE\xB7\xF9\x54\x18\x63\x7A\xE1\x4B\xF1\xBE\xCD\x87\x82\xDB\x5B\xBE\x13\xEF\xD9\x7C\x26\xB8\xDE\xF2\x95\xF8\xC0\xE6\x23\xC1\xF3\xB1\x7C\x23\xF8\x6F\xBB\x4F\x84\xE5\x0B\x91\xE8\x03\x61\xF7\x7D\xB0\x7C\x1E\xB8\xDE\xF2\x75\xA0\x04\x1F\x07\xCB\xB7\x21\xD1\xA7\xC1\xEE\xCB\x60\xF9\x30\x70\xBD\xDD\x77\xC1\xF2\x59\xE0\x69\x2D\x5F\x05\x6E\x63\xF9\x28\x50\x82\x6F\x82\xE5\x93\x60\x8C\x11\x85\x2F\xC2\x72\x5D\xD7\x2D\x1F\x84\x8D\xBC\x9F\x0B\xDF\x03\x6E\x63\xF9\x1C\x5C\xA6\xEB\xBA\xE5\x6B\xC0\xF5\x96\x8F\x41\xDF\x3E\x47\x7C\x0B\x78\xB9\x96\x4F\xC1\x0C\x5D\xD7\x2D\x5F\x82\x82\x3E\xED\x7D\x08\x2C\xDF\x01\x6E\x6F\xF9\x0C\x0C\xD1\x75\xDD\xF2\x15\x18\x65\xF3\x11\x18\x63\xF3\x0D\xE0\xE5\xDA\x7D\x02\xEC\xBE\x00\x76\x1F\x00\x3B\xF7\xDF\xCE\xF9\xB7\x73\xFD\xED\x1C\x7F\x3B\xB7\xDF\xCE\xE9\xB7\x73\xF9\xED\x1C\x7E\x3B\x77\xDF\xCE\xD9\xB7\x73\xF5\xED\x1C\x7D\x3B\x37\xDF\xCE\xC9\xB7\x73\xF1\xED\x1C\x7C\x3B\xF7\xDE\xCE\xB9\xB7\x73\xED\xED\x1C\x7B\x3B\xB7\xDE\xCE\xA9\xB7\x73\xE9\xED\x1C\x7A\x3B\x77\xDE\xCE\x99\xB7\x73\xE5\xED\x1C\x79\x3B\x37\xDE\xCE\x89\xB7\x73\xE1\xED\x1C\x78\x3B\xF7\xDD\xE2\xBC\xF3\x6B\x67\x71\xDD\xF9\x6F\x3B\xC7\xDD\xE2\xB6\xC7\x6C\x9C\xF6\x13\x6D\x5C\xF6\x9E\x36\x0E\x7B\x89\x8D\xBB\x3E\xCC\xC6\x59\x77\x0B\xAE\x3A\x87\x8B\xCF\xFF\x05\x6F\xDD\x9D\xC0\x57\xB7\xF3\xD4\x57\xCB\xC0\x6A\xC1\x3B\x7F\x54\x06\x38\xF8\x7B\xF3\x31\x19\x78\x5C\xE8\x9F\x90\x81\x27\x44\xB9\x4F\xCA\xC0\x53\x42\xFF\xB4\x0C\x70\xE4\xF2\x39\x82\x6C\xC2\x91\x07\x3C\x2B\x9B\x38\x05\xC0\x73\xB2\x09\x6E\xBF\x46\x06\x38\xF6\x1F\xD4\xF5\xB5\x32\xB0\x4E\xE8\xD7\xCB\x00\x47\x57\x98\x5C\x79\x0E\x2F\x4C\x8E\xFC\x8B\xA2\xAD\x5E\x92\x81\x57\x84\xFD\xAB\x32\xC0\xD1\x09\xC0\x06\x19\xD8\x28\xF4\xAF\xC9\x00\xC7\x8C\x7C\xE0\x75\x1B\x9F\xDE\xCE\xA3\xB7\xF3\xE7\x2D\xDE\x7C\x72\x02\x5F\xDE\xE2\xC9\x9F\x00\x93\x1B\x6F\xF1\xE3\xED\xBC\x78\x3B\x1F\xDE\xE2\xC1\xF3\x7A\xDA\xF9\xEF\x76\xDE\xBB\x9D\xEF\x6E\xE7\xB9\x5B\xFC\xF6\xFB\x75\x5D\xB7\xF3\xDA\x2D\x3E\x3B\x7F\x3E\x58\x3C\xF6\xFD\xBA\xAE\x5B\xFC\x75\x5E\x96\xC5\x5B\x37\x38\xEB\x82\xAF\x3E\xC5\xC6\x53\x77\x0B\x7E\x7A\x91\xE0\xA7\x0F\x50\x00\x8E\xC9\x39\xC0\x40\xC5\x04\xD7\x9F\xA4\x00\x1C\x93\x72\x80\x93\x15\x60\x88\xD0\x0F\x55\x00\x8E\x91\x39\xC0\x30\x05\x28\x11\xFA\x52\x05\x18\xCE\x91\x03\x8C\x50\x80\x71\x42\x3F\x5E\x01\x38\x06\xE7\x00\x13\x6C\xBC\x78\x3B\x1F\x7E\xA6\x02\xCC\x52\xCC\xFA\x97\x2B\x26\x7A\xE4\x00\x15\x82\x1F\xCF\xF5\x16\x2F\xDE\x58\x63\x12\x7C\xF8\x4E\x36\x1E\x3C\x6F\x13\x3B\xFF\xDD\xE2\xBD\x73\x7B\x8B\xEF\xEE\x4D\xE0\xB9\x5B\xFC\xF6\x99\x79\x47\x78\xED\xFC\xB7\xC5\x67\xE7\xBF\xED\x3C\x76\x3B\x7F\xDD\xE2\xAD\x6F\xBE\xFD\x08\x5F\x3D\x39\x81\xA7\x6E\xE7\xA7\xDB\x79\xE9\x76\x3E\xFA\xE2\x3C\x60\xB1\xE0\xA1\x5F\x29\x38\xE9\xBC\xCE\x76\x2E\xBA\x9D\x83\x6E\xE7\x9E\xDB\x39\xE7\x16\xD7\xBC\xE4\x17\x5D\xB7\x38\xE6\xFC\xB7\x9D\x5B\x6E\xE7\x94\xB7\xE3\x92\x33\x60\x26\x33\xCB\xF5\x31\x13\x7C\x4C\x3E\x4B\x70\xCB\xB9\xDE\xCE\x29\xB7\xB8\xE4\xA5\x9F\xEA\xBA\xC5\x21\x37\xE6\xFA\x36\xEE\xB8\xC5\x19\xE7\xED\x60\xE7\x8A\x5B\x1C\x71\xAE\xB7\x73\xC3\x2D\x4E\x38\xD7\xDB\xB9\xE0\x16\x07\x9C\xEB\xED\xDC\x6F\x8B\xF3\xCD\xF5\x76\xAE\xB7\x9D\xE3\x6D\xE7\x76\xDB\x39\xDD\x16\x97\x9B\xA7\xB5\x38\xDC\xFC\xB7\xC5\xDD\xE6\xBF\x2D\xCE\x36\xFF\x6D\xE7\x6A\x5B\x1C\x6D\xAE\xB7\x73\xB3\xED\x9C\x6C\x3B\x17\xDB\xCE\xC1\xB6\x73\xAF\xED\x9C\x6B\x3B\xD7\xDA\xE2\x58\xF3\xFC\xED\xDC\x6A\x8B\x53\xCD\xF5\x76\x2E\xB5\xC5\xA1\xE6\x7A\x3B\x77\xDA\xCE\x99\xB6\xB8\xD2\xDC\xC6\xE2\x48\x3F\xC9\x9F\xBD\x82\x1B\xBD\x57\xD7\x75\x8B\x13\x7D\x4D\x02\x17\xDA\xE2\x40\xF3\x67\x97\xC5\x7D\xE6\xF6\x76\xCE\xB3\xC5\x75\xE6\xF9\x5B\x1C\xE7\xBB\x74\x5D\xB7\x73\x9B\xED\x9C\x66\x8B\xCB\xCC\xED\x2D\x0E\xF3\x32\x5D\xD7\x2D\xEE\x32\x7F\xA6\xD9\x39\xCB\x16\x57\xB9\x9E\x3F\xEB\x04\x47\xF9\x0A\x5D\xD7\x2D\x6E\x32\xAF\x8F\xC5\x49\x5E\xFC\xA9\xAE\x5B\x5C\x64\xCF\x27\xBA\x6E\x71\x90\xFF\xF1\x89\xAE\x5B\xDC\xE3\xBC\x04\xCE\xB1\xC5\x35\xCE\x48\xE0\x18\x5B\xDC\x62\xFE\x6C\xB1\x38\xC5\xDD\x12\xB9\xC4\x82\x43\xEC\x4D\xE0\x0E\x5B\x9C\x61\x63\xCD\x4E\x70\x85\x53\xB6\xEB\xBA\xC5\x11\xEE\xF6\x93\xAE\x5B\xDC\xE0\xDC\xED\xBA\x6E\xE7\x04\xDB\xB9\xC0\x16\x07\x98\xD7\xC1\xE2\xFE\x7E\xBE\x4D\xD7\x2D\xCE\xAF\xBC\x5D\xD7\x2D\xAE\xEF\xAE\x6D\xBA\x6E\x71\x7C\x3F\xDD\xA6\xEB\x16\xB7\x97\xD7\xC1\xE2\xF4\x1A\x6B\xB5\x82\xCB\xFB\xC2\x36\x5D\xB7\x38\xBC\xDC\xC6\xE2\xEE\x1A\xE3\x64\xC1\xD9\x35\xEE\x6B\x1B\x57\xD7\xCE\xD1\xB5\x73\x73\x4B\xE2\xC0\xF8\xB8\x69\x3F\x21\x6E\x62\xC8\x3C\x5D\x9F\x28\xB8\xBA\xC6\x5A\xA4\xE0\xE8\xF2\xDF\x16\x37\xD7\x98\x1B\x0A\x4E\x2E\xFF\x6D\x71\x71\x79\x1D\x2C\x0E\xEE\xC1\x3A\x5D\xB7\xB8\xB7\xDC\xC6\xE2\xDC\x72\x1B\x8B\x6B\xBB\x73\x9A\xAE\x5B\x1C\x5B\x6E\x63\xE7\xD6\x5A\x9C\x5A\xAE\xB7\xB8\xB4\x6B\xA6\xE9\xBA\xC5\xA1\x5D\x3D\x4D\xD7\x2D\xEE\xEC\x13\xD3\x74\xDD\xE2\xCC\xAE\x9B\xA6\xEB\x16\x57\x96\xDB\x5B\x1C\xD9\x96\x69\xBA\x6E\x71\x63\xB7\x4F\xD3\x75\x8B\x13\xFB\xDD\x34\x5D\xB7\xB8\xB0\x2B\xE4\x23\x1C\x58\x5E\xAE\xC5\x7D\x35\xD6\x4C\x05\xE7\x95\xF7\x13\x8B\xEB\xBA\x76\x9A\xAE\x5B\x1C\xD7\x8D\x75\xBA\xBE\x41\x03\x5E\x17\xE7\xF8\xAB\x06\xFC\x26\x7E\xDB\x39\xAF\x76\xAE\xAB\xC5\x71\x75\x27\x72\x5B\x05\xA7\xB5\x20\x81\xCB\x6A\x71\x58\x0B\x12\xB8\xAB\x16\x67\xB5\x24\x81\xAB\x6A\x71\x54\xDD\x09\xDC\x54\x8B\x93\xEA\x4E\xE4\xA2\x0A\x0E\xEA\xF1\x09\xDC\xD3\x4A\x02\x4A\x2B\x82\xBE\x1A\x24\x84\x6F\x08\x13\xBE\x21\x4C\xF9\x86\x70\xC1\x37\x84\x8B\xBE\x21\x94\x7D\x43\x28\xAD\x0E\x9B\xA6\xD9\x84\x6E\xD9\x84\xFC\x6C\x42\xDF\x6C\x82\x37\x9B\x30\x2C\x9B\x50\x1A\xF2\xC5\xC2\xC1\x80\x2F\x54\x36\x36\xE0\x8F\x86\xAB\x82\x0B\x23\xD5\xB5\xDC\x7E\x24\x61\xF4\x68\x42\x69\xD4\x37\x2B\x50\x0E\x20\x49\x49\x4A\x4A\x4A\x4D\xEA\x98\xD4\x2D\x29\x2F\x29\x2F\x89\x25\xB1\x24\x77\xD2\xB0\xA4\xD2\xA4\xF1\x49\xE7\x26\x85\x93\xE6\x26\x7D\x98\xF4\x71\x92\x9E\x34\xA5\xC3\xA2\x0E\x2B\x92\x1F\x4F\x7E\x32\x79\x43\xF2\xBB\xC9\xBB\x92\x3F\x4B\xD6\x93\xA7\xB4\xBE\xD0\xFA\x6E\xEB\x90\x43\x53\x0E\x2D\x3D\x74\xCB\xA1\x0D\x87\x76\x1F\x3A\x74\x28\x72\x38\x76\x78\xDE\xE1\x83\x87\xAD\xEA\xCF\x4C\x27\x5C\x96\x4E\x40\x0B\x41\x6E\x21\x38\x5A\x08\xAE\x16\x82\xA7\x85\x70\x7C\x0B\xA1\x87\x40\x2F\x81\xDE\x2D\x84\x01\x2D\x84\x93\x5A\x08\xA7\xB6\x10\x06\x09\x9C\x2E\x30\x5C\x60\x8C\xC0\x38\x81\x09\x02\x93\x5A\x08\x67\xB4\x10\xCE\x6C\x21\x9C\xD5\x42\x38\x5B\xE0\x3C\x81\x0B\x04\xA6\x0B\xCC\x10\x28\x13\xF0\xB5\x10\x66\xB5\x10\x2A\x04\xAA\x5A\x08\xB3\x5B\x08\xC1\x16\x42\xB4\x85\x10\x6B\x21\xCC\x6F\x21\x2C\x6C\x21\x5C\xD2\x42\xB8\x4C\xA0\xBE\x85\x70\x6D\x0B\xE1\xFA\x16\xC2\x9D\x2D\x84\x7B\x5B\x08\xF7\xB5\x10\x1E\x68\x21\x34\xB4\x10\x9A\x5A\x08\xCF\xB5\x10\x76\xB7\x10\x7E\x6A\xE1\xED\x5F\xE3\x0F\x05\x7C\xA1\xC4\xCB\x5D\xE4\x38\xD7\x71\x81\xE3\x3A\xC7\x8D\x8E\xA5\x8E\xAC\xD6\xEC\xD6\xD2\x79\xFE\xDA\x98\xDD\x2C\x95\x70\x72\x2A\x61\x50\x2A\x61\x58\x2A\x61\xB8\x2F\x18\x08\xF9\x6B\xFD\x00\xBA\x4F\xE8\x3E\xA5\xFB\x25\xDD\x87\xFB\x6A\xEA\x6A\xB0\x22\xBE\x37\x8E\x6A\xC2\x69\xB3\xB9\x4D\x6D\xAD\xAF\xEC\x5C\x5F\x35\x7F\x46\xCF\x26\xFC\x30\x9B\xB0\x7B\x36\xE1\x57\x23\x2E\xE6\x9B\x83\xF5\xDD\x7F\xEE\x7E\xB0\xBB\xDE\x7D\xB8\x3F\x54\xE5\x0B\x06\x6C\xF5\xA9\x4F\x59\x9C\x72\x55\xCA\x0D\x29\x4B\x53\x96\xA5\xDC\x9A\xB2\x32\xA5\x31\x65\x55\xCA\xA3\x29\x8F\xA6\x3C\x99\xF2\x4C\xCA\x9A\x94\x97\x53\x36\xA4\x6C\x4C\x79\x23\xE5\xAD\x94\x1D\x29\x3B\x52\x3E\x4C\xF9\x28\xE5\x93\x94\xCF\x52\xBE\x4C\x39\x9C\x32\xBC\xDA\x17\x98\x53\x5B\x37\x27\x90\x78\x82\x79\x84\xE4\x3C\x82\x33\x8F\x70\x4A\x1E\xE1\xB4\x3C\xC2\xA8\x3C\xC2\x94\x3C\x42\x30\x8F\x30\x3C\x1C\x09\xD7\x84\x2B\xC3\xC0\xB7\xD2\x77\x92\xA3\xA8\x7F\xD1\x8A\xA2\x75\x45\xC3\xA3\xBE\xEA\x1A\x33\xAB\x0C\xC2\xA4\x0C\xC2\x59\x19\x84\x70\x06\x61\x91\xC0\xF0\xA8\x2F\x10\x0C\xFA\x81\x02\xBD\x60\x78\x5D\x95\xD5\x26\xDD\xBA\x77\x63\xDD\x5C\xDD\x86\xD7\x55\x07\x2A\x50\x92\x3D\x35\x7B\x44\x62\x7D\x8C\x9D\xCE\xE5\x78\x08\x0F\x01\x49\x0E\xE3\x1E\xF8\x28\xE9\xA3\xA4\x4E\x1D\x3A\x75\xF8\x34\xF9\xD3\xE4\xF4\x9C\xF4\x9C\x54\x77\x27\x77\x1F\x77\x3F\xF7\x4C\x77\x85\xBB\xD2\x1D\x76\x63\xA7\xBE\x4F\x3F\xAC\x1F\xDE\xAF\xB7\xEA\x6B\x33\x08\x1C\x9B\x04\xBC\x27\x11\x4E\x3B\x89\xB0\x62\x0D\xE1\xBE\x35\x84\xDA\x2D\x84\x8B\xB7\x10\x08\xE9\x06\xDC\x48\xC7\x22\xA4\x1B\x1E\xAB\x87\xF4\x4E\x86\x17\xCF\x21\x3D\x03\x23\x7C\x21\x5F\x05\xBF\x5D\x4B\x67\x85\xA3\x81\xAA\x40\xC8\x17\x04\x8E\x5B\xD4\x75\x55\xCE\xAF\x39\x23\x7C\x51\xA3\xA7\xAC\x90\x08\x9B\x25\xC2\x08\x5F\x5D\xB9\xAF\xD6\x30\x0E\xCE\xF2\xB5\xEF\x45\x5E\x07\xA1\xDC\x41\x08\x0B\x8C\x28\x3F\x72\x8E\x23\x2A\xF1\x6F\x9D\xA7\x75\x8E\xF6\xD6\xFA\x4F\xCF\x77\x44\xB5\x6F\x4E\x8D\x0F\x40\x67\xC2\x49\x9D\x09\xA7\x74\x26\x8C\xE9\x4C\x5C\xDF\xFE\x91\xD7\x78\x4A\x63\x49\xE3\xA4\xC6\x29\x8D\x17\x34\x5E\xD8\x58\xD6\x38\xA2\xDA\x1F\x0D\xCF\xF1\xFB\xF9\xCC\xE2\xD7\xAC\x7D\x59\x87\xB2\x22\x4D\xEB\x9A\x46\x54\x87\xA3\xBE\xDA\x1A\x71\xE6\xAB\x3A\x11\xDE\xE8\x44\x18\x11\x36\x16\x5B\xF7\xB5\xBF\xBE\x09\xED\x1C\xAE\xA9\x09\xB7\xBB\xEB\xAC\xBD\xE5\x06\x34\xE1\x19\x3C\x87\x75\xD8\x81\x1D\xD8\x8B\xBD\x78\x46\xFA\x44\xFA\x42\xFA\x46\xFA\x5E\xD2\xA5\x98\x1C\x93\x2F\x93\x2F\x93\xAF\x92\xAF\x92\xAF\x91\xAF\x91\x1D\x49\x8E\xA4\xB4\xA4\xB4\xA4\xEE\x49\xDD\x93\x5C\x49\xAE\xA4\x92\xA4\x12\xA3\x3D\x79\x5B\x56\xA4\xF8\x53\x86\xA5\x0F\x4B\xDF\xD6\x69\x67\xA7\xD6\x8C\xD6\x8C\xEF\xBA\xFE\xD0\xF5\xE4\xEC\x53\xB2\xA5\x1C\x39\xC7\x91\xE3\xC8\x79\x37\xEF\xDD\xBC\x5D\x79\xBB\xF2\xBE\xC9\xFB\x3E\xAF\x25\xEF\xE7\xBC\x5F\xF3\xF6\xE6\x1D\xC8\x3B\x90\x07\x77\xAA\x3B\xDD\x68\xFB\x88\x3B\xE6\xBE\xCC\x5D\xEF\xBE\xC9\xBD\xC2\xBD\xCE\x0D\x4F\x4F\x4F\x2F\x4F\x6F\x8F\xEA\x29\xF2\x0C\xF4\x4C\xF2\x9C\xE1\x29\xF3\x5C\xEB\xB9\xDE\xB3\xCC\x73\x62\x8F\x92\x1E\xE3\x7B\xCC\xEC\xA1\xF7\x42\xEF\xDA\xBE\xF3\xFA\xDE\xD6\xF7\xF6\xBE\x7A\x5F\xF4\x3B\xAB\xDF\xEE\xFE\xAD\xFD\xE1\x55\xBC\x49\xDE\x24\x6F\xB2\xD7\xED\xF5\x7A\x4F\xF5\x0E\xF6\x0E\xF3\xDE\xE9\xBD\xCB\xBB\xC2\xBB\xC2\xDB\xEA\x3D\xE8\x5D\x56\xB4\xBC\x68\x7D\xD1\x67\x45\xEE\x01\x65\x03\x16\x0D\x78\x7B\x80\x3E\x40\x1F\x70\xFE\x40\x7D\xE0\xFA\x49\xFA\x24\x68\x1E\x6D\x89\x76\x9D\xE6\x5D\x39\x68\x65\xBF\x86\x7E\x0D\x6F\x37\xBC\xDD\x30\xBD\x69\x7A\xD3\xEC\xA6\x39\x4D\x43\x0F\x0D\x3B\x94\x71\x38\xF7\xB0\xF7\xF0\x59\x87\xCF\x3E\x5C\x79\xB8\xFA\xF0\x9C\xC3\xBC\xAF\x90\xEE\xD6\x4F\xD7\x4B\xF4\xE9\xBA\x5F\x8F\xE8\x11\xFD\x1E\x7D\xB9\xDE\xAC\x7F\xA9\x7F\xAD\xB7\xE8\xFB\xF5\x43\x66\x3F\x22\x82\x44\x84\x0E\x44\x18\x48\x84\x53\x89\x30\x8C\x08\xCB\x88\x70\x17\x11\x36\x13\xE1\x20\x11\x76\x49\x84\x56\x89\xF0\x69\x98\xF0\x59\xF8\x48\xFF\xC2\x66\xC2\xAF\x9B\x09\xD8\x42\x38\x71\x0B\xA1\xF7\x16\x42\xE5\x16\xC2\x6C\xD1\xEF\x16\x6F\x21\x5C\xB9\x85\x70\xC3\x16\x42\xC3\x16\xC2\xC3\x5B\x08\x5F\x6F\x21\x34\xBF\x43\xF8\xF9\x1D\x02\xDE\x25\x9C\xFB\x2E\x61\xE6\xBB\x84\x05\xEF\x12\xB0\x95\x70\xF6\x56\xC2\xB9\x5B\x09\x77\x6D\x25\xDC\xB3\x95\xB0\x7C\x2B\xE1\x9F\x02\x0F\x6C\x25\xC4\xB7\x12\x1A\xB6\x12\x1E\xDC\x4A\x78\x78\x2B\xE1\x99\xAD\x84\xE7\x04\xD6\x6E\x25\xBC\xB4\x95\xF0\xCA\x56\x82\x63\x1B\xA1\xC3\x36\x82\x73\x1B\xA1\xE3\x36\xC2\x71\xDB\x08\x5D\xB7\x11\xF2\xB6\x11\xD8\x36\xC2\xA0\x6D\x84\xD3\xB7\x11\x86\x6E\x23\x94\x6C\x23\x8C\xDC\x46\x18\x2D\x30\x7E\x1B\x61\xCA\x36\xC2\x59\xDB\x08\x0F\x6C\x27\xAC\xDC\x4E\x78\x63\x07\xE1\xAD\x1D\x04\x7D\x07\x61\xEE\xF7\x84\xC7\xBF\x27\xD0\x0F\x84\x21\x3F\x10\xB0\x9B\xD0\x77\x37\xC1\xBB\x9B\x70\xEB\x6E\xC2\x8A\xDD\x84\x87\x77\x13\x56\xEF\x26\xAC\xDB\x4D\x78\x61\x37\xE1\xED\xDD\x84\x2D\xBB\x09\xBF\xEE\x36\x17\x7A\x1F\xFA\x89\xF0\xE5\x4F\x04\xFD\x27\x02\xED\x21\x48\x7B\x08\x19\x7B\x08\xA7\xEF\x21\x94\xEC\x21\x8C\xDD\x43\x98\xB2\x87\x70\xE6\x1E\xC2\xCC\x3D\x04\xFF\x1E\x73\x21\x78\xC7\x6F\x84\xE6\xDF\x08\xDF\xFF\x46\xD8\xFD\x1B\xE1\xE0\x6F\xE6\xE2\x6F\xED\x5E\x42\xFD\x5E\xC2\xCE\xBD\x84\xE6\xBD\x84\xEF\xF6\x92\xF1\x21\x9A\xD4\x7D\x84\x8C\x7D\x84\x31\xFB\x08\x53\xF6\x11\x2E\xD8\x47\x98\xB9\x8F\x70\xCD\x3E\xC2\xB2\x7D\x84\x87\xF6\x11\x56\xED\x23\xAC\xDE\x47\xC0\x7E\xC2\x82\xFD\x84\x8B\xF7\x13\xDE\xD8\x4F\xD8\xB4\x9F\x30\xF5\x00\x61\xE6\x01\x42\xCD\x01\x42\xE4\x00\x21\x76\x80\xB0\xE0\x00\xE1\xE2\x03\x84\xFA\x03\x84\xAB\x0F\x10\x96\x1D\x20\xAC\x3C\x40\x58\x75\x80\xF0\xE4\x01\xC2\xFA\x03\x84\x17\x0F\x10\x36\x1F\x20\x6C\x3F\x40\x40\x2B\xE1\x96\x56\xC2\x5F\x5B\x09\xAF\xB7\x12\x76\xB7\x12\xF6\xB7\x1E\xE3\xB9\x14\x8E\xC4\xF8\x38\xE8\x53\xF9\x47\xB9\x5E\xFD\x59\xDD\xAF\xEA\xEA\x88\x5A\xC3\x51\xE9\x93\x11\x75\x21\x7F\xA0\x32\x1C\x6D\xF7\x80\x72\x13\xEE\x38\x81\x80\x1E\x84\x50\x0F\x42\xA4\x07\x21\xD6\x83\x50\xDF\x83\x30\xA2\x27\x61\xC4\xC2\x48\x34\x10\x3E\xF2\xFD\x34\x24\x13\x1C\xC9\x84\x64\x01\x67\x32\xE1\xE4\x64\xC2\xA9\xC9\x84\xD3\x92\x09\x83\x05\x86\x09\x8C\x58\x18\x0D\x04\x83\xC6\xC8\xCC\x0C\xCA\x95\xCA\x35\x4A\x7F\x47\x7D\xDE\x92\xBC\xBE\xF9\x7D\xF3\x17\xE4\x2F\xC8\x6F\x2E\xD4\x0B\x4B\xE2\xCB\xE3\xFD\x0E\xF7\x3F\x3C\xD2\x5F\xEB\x8F\xFA\x63\x80\x18\xC3\x8E\xF4\xCF\xF3\x85\x7C\x55\xBE\xA8\xF5\xE2\x4D\x99\x92\x72\x4E\x4A\x79\x4A\x65\xCA\xA2\x94\xE6\x95\xFA\xCA\x91\x81\x79\xFE\xDA\xB2\xD2\x39\x75\xD1\x3A\x33\x3E\x97\x90\x94\x4B\x48\x11\x48\xCB\x25\x64\xE5\x12\xBA\xE4\x12\xBA\xE6\x12\x72\x72\x09\x27\xE7\x12\x4E\xCD\x25\x9C\x96\x4B\x38\x3D\x97\x30\x3A\x97\x30\x25\x97\x70\x41\x2E\x61\x64\xB8\x2A\x6A\x7C\x93\x2B\x87\x70\x7A\x0E\x61\x64\x5D\x24\x18\x5E\x68\x7B\x23\xAD\x21\xCC\x5E\x43\x88\xAC\x21\x5C\xB2\x86\x50\xBF\x86\xB0\x64\x0D\x61\xD9\x1A\xC2\x1D\x6B\x08\x77\xAD\x21\x2C\x5F\x43\x18\x55\xB5\x30\x12\x3B\xC6\x70\x15\x5E\x42\xBF\x93\x8E\xBC\x67\x46\x05\x67\xF9\x6A\x79\xF6\x0E\x42\x2F\x07\x3F\x5E\x58\xE3\x0B\x94\xA3\xB9\x13\xE1\xB7\x4E\x84\x51\xB1\xEA\x40\x38\x62\x6B\xBF\xCC\xB1\x99\xE3\x33\x27\x65\x4E\xC9\x3C\x37\xF3\xFC\xCC\xF3\x33\xA7\x65\xCE\xC8\x9C\x99\xB9\x24\xF3\xBA\xCC\x1B\x33\x97\x65\xAE\xCA\x7C\x34\xF3\x89\xCC\xA7\x33\x9F\xCF\x5C\x9F\xB9\x3E\xF3\xC5\xCC\x57\x32\x37\x66\x6E\xCF\xDC\x99\x99\x91\x95\x99\xD5\x25\x2B\x27\x6B\x5A\xD6\x8C\xAC\x4B\xB2\xEA\xB3\xEE\xC8\xAA\x2F\xFC\x5B\xE1\x8A\xC2\x78\xE1\xCA\xC2\x87\x0B\x57\x15\x3E\x59\xF8\x74\xE1\xF3\x85\xEB\x0B\x5F\x2D\xDC\x58\xF8\x56\xE1\xE6\xC2\xED\x85\x3B\x0B\x3F\x2E\xA4\xA6\xA4\xA6\x94\xA6\xF4\xA6\xCE\x4D\x5D\x9B\xDC\x4D\x27\x36\x15\x34\xF5\x6B\x1A\xE3\x0F\x47\xAB\x6C\x6F\xE8\x15\x19\xAF\x64\x6C\xC8\xD8\x90\xB1\x29\x63\x53\xC6\xE6\x8C\x03\x19\x07\x33\xF4\x8C\x65\x79\xCF\xE6\xAD\xCD\x5B\x97\x87\xC2\x9E\x85\xBD\x0A\x7B\x15\xF2\x30\x26\xE8\xAB\x0A\x07\x03\x46\xDF\x84\xDA\x4F\xF5\xAA\x17\x99\x6E\x53\xCD\x84\xA4\x66\x42\x72\x33\x21\xA7\x99\xD0\xBD\x99\xE0\x69\x26\x9C\xD0\x4C\xE8\xD1\x4C\x38\xB1\x99\xD0\xA7\x99\x30\x26\x1C\xAB\xE6\x69\xBD\x32\x61\xBC\x4C\x18\x13\xF5\x85\x62\xD5\x3E\x20\x8B\x20\x67\x11\x1C\x59\x84\xB4\x2C\x42\xA7\x2C\x42\x46\x16\x21\x2B\x8B\x50\x90\x45\xE8\x93\x45\xF0\x66\x11\x06\x64\x11\x06\x66\x11\x4E\xCE\x22\x0C\xCA\x22\x0C\xCE\x22\x8C\xCC\x22\x8C\xC9\x22\x8C\xCD\x22\x4C\xC8\x22\x4C\xCA\x22\x4C\x11\x38\x4F\x60\x46\x16\xA1\x3C\x8B\x50\x99\x45\x08\x66\x11\x22\x59\x84\x18\x4F\x17\xF5\xFB\xE7\x18\xEB\xFB\xB5\x72\x9D\x3C\x5F\xBE\x58\xBE\x54\x5E\x24\x2F\x92\xAF\x94\xAF\x94\xAF\x96\xAF\x96\x97\xC8\xD7\xC9\x37\xC8\x37\xC8\x37\xC9\xF7\xCA\xF7\xC9\xBB\xE4\xDD\xB2\x2E\x9F\x98\xDF\x27\x7F\x46\xBE\x2F\xBF\x32\x7F\x76\xFE\xBA\xFC\x75\xF9\x70\x75\x71\xE5\xB8\xF2\x5D\x6E\xD7\x28\xD7\x58\xD7\x24\xD7\x14\xD7\x79\xAE\x0B\x5C\x17\xB8\xA6\xBB\xA6\xBB\x66\xB8\x66\xB8\xCA\x5C\x97\xBA\xEA\x5D\x8F\xBB\x9E\x74\xBD\xEC\x7A\xD5\xF5\xAE\x6B\xBB\xEB\x03\xD7\x47\xAE\x1F\x5D\x7B\x5C\xBF\xB8\x7E\x73\x1D\x76\x9D\xE8\x39\xD1\xE3\x6F\xF2\x37\xB5\x7B\xFB\x13\xE1\x26\x22\xAC\x10\xC0\x3B\x84\x51\xEF\x10\xC6\xD4\xCD\xF6\x45\x7D\xB1\xA3\x86\xAF\x97\x3B\x17\x3B\xAF\x72\xDE\xE8\x5C\xEA\xBC\xD9\x79\xAB\x73\xA5\xB3\xD1\xB9\xCA\xF9\xA8\xF3\x31\xE7\x13\xCE\x67\x9C\x6B\x9C\xAF\x38\x37\x38\x5F\x73\xBE\xE1\xDC\xE4\xDC\xEC\xDC\xEC\x6C\x76\x7E\xE6\xFC\xD2\xF9\x93\x73\xBF\x53\x77\x8E\xA9\x0B\xCD\xF6\x05\x7D\x65\x63\xC2\xA1\x0A\x7B\xA6\x33\xF3\x09\xFE\x7C\x42\x55\x3E\xA1\x3A\x9F\x30\x3B\x9F\x70\x53\x3E\x61\x59\x3E\xE1\xE6\x7C\xC2\xAD\xF9\x84\xBF\xE7\x13\x56\xE4\x13\x1A\xF2\x79\xBD\xA2\x35\x75\x73\xAA\xDB\xB2\x20\xA7\xEC\x74\x38\x9D\xCE\x4E\xCE\x0C\x67\x96\xB3\xC0\xD9\xC7\xE9\x75\x0E\x70\x0E\x74\x9E\xEC\x3C\xC5\x79\x9A\x73\x90\x73\xB0\x73\xB0\x73\xA8\x73\xB8\x73\x8C\x73\xAC\x73\x82\x73\x92\xF3\x4C\xE7\x99\xCE\x0B\x9C\x17\x3A\x2F\x72\x5E\xE4\xAC\x74\xCE\x73\x8E\x4D\x98\x82\xD4\xF7\xBB\xA3\xDF\x9D\xFD\x7E\xEE\x87\xFE\xDB\xFA\x3B\xBC\x0E\x6F\x07\x6F\x07\xAF\xC7\xDB\xDB\x7B\x9A\xF7\x74\x2F\x4E\x5A\x37\x09\x93\x0F\x2E\xC7\xFE\x9A\x03\x91\x03\xEF\x1D\xE0\xF6\xBB\xC3\x84\x9F\xC2\x64\x78\x11\x7C\x14\x97\x00\x4D\xC2\x49\x4F\x49\x28\x79\x4A\x42\xFE\xD3\x12\xDC\x4F\x4B\xB8\xF7\x2D\x09\xAB\xDE\x92\xD0\xFC\x9D\x04\xEC\x93\x90\x7F\xC0\xF4\x38\x18\x9F\x25\x63\xAC\x2F\x54\x55\x17\xB4\x55\xA0\xB3\xDE\xB9\x9F\xB7\xBF\xB7\xA8\xE8\xA6\x22\x0C\x60\x03\x66\x0E\xB8\x6C\xC0\xCC\x86\x4B\x1A\xF0\xE0\x7D\x3B\x56\xED\x78\x75\xC7\x1B\x3B\x5A\x77\xAC\xD0\x9F\xD7\x5F\xD4\x37\xE8\xAF\xEB\x6F\xEB\xEF\xE8\x3B\xF4\xF7\xF5\x0F\xF5\xB1\xBE\x50\xA0\x32\x50\x76\x56\xB8\x3A\x10\xAA\x5A\xE8\x3B\x92\x5F\x47\x42\xAF\x8E\x04\x6F\x47\xC2\xA0\x8E\xC4\xCB\xAB\x0B\x85\xC3\x70\x67\x9F\x94\x3D\xD6\x17\x8B\x26\xCE\xC0\x9A\x93\x09\x7B\x92\x09\xBF\x24\x13\x7E\x4D\x26\xB4\x26\x13\xF4\x64\xC2\x58\xFF\xAC\xA8\x7F\x3E\x70\xB3\x63\x83\x63\xB3\xE3\x5B\xC7\x8F\x8E\x5F\x1C\xF9\xAD\xA7\xB4\x9E\xD6\x3A\xB8\x75\x68\xEB\xD0\xD6\x92\xD6\xD2\xD6\x11\xAD\x23\x5B\x47\xB7\x9E\xD1\x3A\x36\x10\xF5\x55\xF9\x42\x3E\xA0\xD4\xFB\x37\xEF\xDD\xDE\xE5\xDE\x23\xF9\xD3\x2A\x02\x5B\x4D\x98\xB2\x9A\x70\xD6\x6A\x6A\xFB\x38\xDC\xB8\x9A\x88\x3F\x1A\xF0\x05\xCB\x4A\xA3\x3E\xE3\x81\x66\xF5\xCF\x64\xC2\x39\xC9\x84\xF3\x92\x09\x65\xC9\x84\x71\xA1\x6A\x7F\x34\x60\x38\xEA\x99\x41\x0E\xCB\x57\x29\x57\x2B\x13\x92\xCE\x49\x8A\x24\x45\x92\xCE\x4C\x39\x3B\x65\x55\xB7\xF5\xDD\x36\xE7\xBD\x93\xB7\x35\xAF\x39\xEF\xD3\xBC\xAF\xF3\x7E\xC8\xFB\x21\xEF\x97\xBC\x5F\xF2\xF6\xE5\xED\xCF\x5B\x9F\xBF\x3F\xBF\x35\x5F\xCF\x4F\x73\x77\x74\x6F\x76\xEF\x76\xF7\xF1\x16\x7A\xEF\xF0\xFE\xC3\x8B\xC3\x9D\x0E\xBB\x0F\x17\x1E\x3E\x44\x04\x8E\x66\xC9\xC4\xE9\x59\x26\xAA\xB6\x10\x02\x5B\x08\x7F\xD9\x42\xB8\x62\x0B\xE1\xAA\x2D\x84\xEB\xB7\x10\x1A\xB7\x10\x1E\xDA\x42\x00\xA5\xE3\x47\x4A\xC7\xB8\x50\x6D\x79\x34\x10\x89\x05\xC2\x21\x5F\xB0\x6C\x8A\xAF\x3A\xE8\x9B\xD7\xAE\xBF\xA7\x12\xA2\xA9\x84\x05\xA9\x84\x45\xA9\x74\x94\x7D\x34\x56\xDD\x6E\x3E\x53\x92\x4A\x38\x27\x95\x70\x7E\x2A\xA1\x2C\x95\x30\xDE\x37\xCF\x67\xCE\xED\xEA\x1B\x36\x35\x6C\x6E\x78\xAF\xE1\xE3\x86\x4F\x1A\x26\xF8\x02\x31\xF3\x9E\xA8\xCF\x20\xBC\x60\x9B\x9F\x4C\xF0\x85\x42\xBE\x8A\x23\xFD\xA1\x3E\xED\x86\xB4\x9B\xD2\x96\xA5\xDD\x92\xB6\x32\xAD\x31\xED\xB1\xB4\x27\xD2\x9E\x49\x5B\x93\xF6\x72\xDA\xAB\x69\x1B\xD3\x5E\x4F\xDB\x94\xB6\x2D\x6D\x7B\xDA\xC7\x69\x1F\xA7\x35\xA7\x7D\x96\xF6\x65\xDA\x8F\x69\x3F\xA5\xED\x49\x9B\xC0\xE7\xC8\xC6\xF5\x04\xEE\xF5\x1E\xF0\x1E\xF2\xEA\xDE\xDD\x45\x7A\xD1\xE6\x01\x87\x07\x60\xE0\x79\x03\x2B\xF5\xB0\x3E\x57\xBF\xDB\x1C\xDF\xAE\x32\x89\x03\x15\xAB\x09\x55\xAB\x79\xF9\x0B\x7D\xD5\x65\x13\x79\xDD\x1A\x0A\x1B\xFA\x37\xF4\x6F\x98\x50\xED\x8B\x86\x6B\xAB\x45\x7D\xE1\x24\xC8\x4E\x82\xC3\x49\x48\x72\x12\xD2\x9C\x84\x2C\x27\xA1\x8B\x93\x90\xED\x24\xE4\x3A\x09\x27\x3B\x09\xA7\x39\x09\xC5\x4E\xC2\x30\x27\x61\xAC\x93\x30\xC5\x49\x38\xDF\x49\x98\x50\x1D\x88\xF9\x42\x65\x53\x6B\x7C\xC1\x60\xD9\x54\xA3\x29\xDB\xF5\xE7\xCF\xC3\x64\x00\xD7\x13\xB6\xDD\xC0\xED\x6B\xFC\x51\xFB\xFD\x9E\xFD\x51\x76\x73\xF6\x37\xD9\xBB\xB3\xF7\x67\x37\xE7\xEA\xB9\x13\xAA\xC3\xB3\xDB\x4D\xDA\x33\x09\x9D\x33\x09\x59\x99\x84\xA1\x99\x3C\x7D\x5D\x85\x6F\xBE\xAF\xED\x19\xB6\x2A\x93\xF0\x6D\x26\x61\x77\x26\x61\x7F\x26\x61\x62\xFB\xDB\xA9\x8D\x9F\xDC\x88\x46\x3C\x81\x27\xF0\x2C\x9E\xC5\x7A\x6C\xC7\x4E\xFC\x86\x7D\x78\x41\x7A\x55\xDA\x22\x35\x4B\x9F\x4B\xDF\x4B\xDF\x4B\x2D\x52\x8B\x14\x91\x63\xF2\x3C\xDB\xFB\xE1\xE8\x77\xC3\xAF\xF2\x5E\xF9\x72\xE5\x3A\xA5\xBF\xC3\x5C\x2B\xB9\xC0\x31\xD3\xB1\x44\xDC\x93\x7B\x1C\xEE\xA4\xF1\x49\x21\x63\xDD\xEA\xDD\xA4\x6D\x49\xDB\x92\xBE\x48\xFA\x32\xA9\x25\xE9\xC7\xA4\x03\x49\x07\x93\xF4\x24\x3D\x29\xA3\x43\x46\x87\xCC\x0E\xFD\x3B\x4C\xEA\xF0\x40\x87\xD5\x1D\x56\x77\x78\xBD\xC3\xB7\x1D\x7E\xE9\xF0\x6B\x87\x03\x1D\x0E\x74\x40\x72\x97\xE4\x6E\xC9\xDD\x92\x7B\x24\xF7\x48\x2E\x48\x2E\x48\x2E\x49\x3E\x3F\x79\x66\xF2\xEC\x64\x6B\xDD\x4B\x49\x19\x94\x32\x24\x65\x48\xCA\x94\x94\x29\x29\xE7\xA7\xF8\x52\xE6\xA6\xD4\xFF\xEE\xDA\xC8\xDA\x94\xB5\x29\x6F\xA5\xBC\x25\xD6\x44\x76\xA5\xEC\x4E\xF9\x29\xE5\x60\xCA\xC1\x94\x3F\x7A\x56\x5B\xCF\xE5\xA8\x33\xF6\xBB\xEF\x98\xB5\xCE\xB5\xE2\xDD\xB2\xCB\xB9\xDF\xB9\xDF\xE9\x48\x4D\x4B\xED\x94\x9A\x91\x9A\x95\x5A\x90\xDA\x27\xD5\x9B\x3A\x20\x75\x60\xEA\xC9\xA9\x83\x52\x87\xA4\x0E\x49\xBD\x30\x75\x46\x6A\x59\xAA\x2F\x75\x6E\xEA\xDC\xD4\xC5\xA9\x8B\x53\xAF\x4A\xBD\x2E\xF5\xA6\xD4\x65\xA9\xB7\xA4\xDE\x96\x7A\x47\xEA\x3F\x52\xEF\x4A\xBD\x2B\xF5\x9E\xD4\xE5\xA9\xF7\xA5\xDE\x9F\xBA\x32\xB5\x31\xF5\xE1\xD4\x67\x52\x37\xA7\x6E\x4E\x75\xA4\xA5\xA5\xA5\xA7\x65\xA4\x65\xA6\x15\xA4\xF5\x49\x1B\x94\x36\x24\x6D\x48\xDA\xF9\x69\xD3\xD2\x66\xA6\xF9\xD2\xEA\xD3\xEA\xD3\xAE\x3A\xC6\x3D\xB4\x36\x6D\xAD\xB8\x6F\x76\x19\xF7\x8C\xD2\x31\xAD\x63\x7A\xC7\x8C\x8E\x99\x1D\x8B\x3B\x0E\xE9\x38\xA4\xE3\xE4\x8E\x93\x3B\x9E\xDD\xF1\xDC\x8E\x65\x1D\x7D\x1D\x2F\xEE\xB8\xA8\xE3\x55\x1D\xFF\xD6\xF1\x1F\x1D\x57\x77\x7C\xAC\xE3\x73\x1D\xD7\x76\x5C\xDB\x71\x7D\xC7\x57\x3B\x52\xBA\x37\x7D\x40\xFA\xC0\xF4\x92\xF4\xD1\xE9\x97\xA7\x5F\x91\x7E\x65\xFA\x95\xE9\x57\xA7\x5F\x97\x7E\x43\xFA\x7D\xE9\x0F\xA4\x3F\x90\xAE\xA5\xAF\x4A\x7F\x34\xFD\xB1\xF4\xB5\xE9\x6B\xD3\xD7\xA7\xBF\x9C\xFE\x6A\xFA\xAB\xE9\x1F\xA6\x7F\x92\x8E\x4E\xE8\x54\xD2\x69\x4C\xA7\x71\x9D\x82\x9D\x96\x74\xBA\xA1\x13\x32\xFA\x64\x0C\xCB\x18\x96\x31\x25\xE3\x9C\x8C\x69\x19\x33\x32\x7C\x19\xBE\x0C\x7F\x46\x65\x46\x28\x23\x92\x51\x97\x71\x79\xC6\x4D\x19\x37\x65\x1C\x3D\x0E\xFB\x9F\x8D\x13\xEB\xB3\x96\x66\x59\x2B\x14\x74\x5C\xB0\x6B\xB8\xEB\xA2\xAE\x97\x77\xFD\x47\xD7\x15\x5D\xBF\xED\xFA\x53\xD7\x7D\x5D\x91\x9D\x96\x9D\x9E\xDD\x39\xDB\x9D\x5D\x94\x5D\x92\x7D\x66\xF6\xCC\xEC\x60\x76\x28\x3B\x92\x5D\x9F\xFD\x58\xF6\x8E\xEC\x1D\xD9\x1F\x66\x7F\x98\xED\xCE\x59\x90\x53\x9F\x73\x65\xCE\x35\x39\x2B\x73\x1A\x73\x1A\x73\x56\xE5\xFC\x9A\x83\x5C\x96\x3B\x25\xB7\x26\x37\x92\x1B\xCB\xAD\xCF\x6D\xCA\x5D\x95\xFB\x5A\x2E\xBA\x75\xED\xE6\xEE\x76\x76\x37\xAD\x9B\xD6\xCD\xD1\x7D\x60\xF7\x51\xDD\x27\x74\x5F\xDC\x7D\x45\xF7\x87\xBB\x3F\xD2\xFD\xD9\xEE\x5F\x74\x47\xDE\x09\x79\x93\xF2\xCE\xC8\x9B\x96\x77\x69\x1E\x9F\x37\x58\xE3\xCB\x23\xEB\x0D\xBF\x99\xEB\x0D\x7C\x4C\xC5\xFE\xD5\x31\xD5\x1A\xD7\xF3\xAE\xE7\x5D\x2F\x1A\x63\xAB\x37\x5D\x9B\xC5\xF8\xAA\xD9\xF5\xBD\x18\x5F\x1D\x74\xCD\x75\xCF\x75\x2F\x72\x2F\x72\x2F\x73\xDF\xE5\x96\x3C\x92\xA7\x83\xA7\x83\xC7\xE9\xC9\xF2\x74\xF1\x74\xF1\xE4\x7A\xF2\x3D\x3D\x3C\x3D\x3C\x7C\x0C\x56\xE0\x29\xF0\xF4\xF1\x14\x7A\xFA\x7B\x06\x79\x06\x7B\x86\x79\x46\x79\xC6\x79\x26\x7B\x26\x7B\x16\x7B\xAE\xF4\x58\x63\xDD\x99\xEA\xE7\xEA\x77\x6A\x8B\xBA\x47\xFD\x59\xB5\x8F\x8B\xBD\x85\x55\x85\xE1\xC2\x70\xE1\xBF\x32\x1E\xEF\xDF\xAF\x7F\x3F\x87\x37\xC9\x5B\xE4\x3D\xD9\x7B\xBA\x77\xB0\xD7\x7A\x1B\xF3\x67\xF8\x41\xAF\xEE\x75\x14\xF5\x2F\xE2\xE3\x8C\x15\x45\xEB\x8A\xF8\xF3\xBC\x6D\x7C\xB3\xE2\x86\xFB\x37\xDF\x7F\xE8\x7E\x3C\x90\x16\xCF\x88\xBB\xE2\x7D\xE2\x7D\xE3\x25\xF1\x50\x7C\x51\xFC\xEE\xF8\x8A\xF8\x17\xF1\x6C\xCD\xA5\x1D\xAF\x2D\xD1\xAE\xD7\xD6\x69\x2F\x6A\xAF\x6B\xBF\x6A\xB4\x52\x5E\xE9\x58\xD9\x61\xA5\x73\x65\xDA\xCA\xE3\x57\x96\xAC\xAC\x5D\x79\xC5\xCA\xC7\x56\xEE\x59\xB9\x77\x65\xEB\xCA\xD6\x95\x87\x56\x1E\x5E\xE9\x6C\xE8\xD9\xE0\x6D\x18\xDD\xC0\xC7\x32\x57\x36\x3C\x6A\xAC\xA7\x34\x37\x7C\xDE\xF0\x65\xC3\x8F\x0D\x07\x1A\x0E\x37\xA0\xB1\xA0\xB1\xA4\x71\x78\xE3\xC8\xC6\x09\x8D\x33\x1B\xE7\x35\x5E\xDC\x78\x71\xE3\x65\x8D\x8F\x34\xAE\x6E\x5C\xDD\xF8\x44\xE3\x93\x8D\xCF\x34\xAE\x6D\x5C\xDF\xB8\xBE\xF1\xC5\xC6\x17\x1B\x3F\x68\xFC\xA8\xB1\xB9\xF1\xDB\xC6\x3D\x8D\xBF\x34\x1E\x6B\xCE\xE1\x6D\x9A\xD6\x74\x61\x53\xA0\x29\xD2\xF4\x69\x93\x7D\xDC\xD4\x36\x6E\x6B\x4D\x6A\xCD\x6A\xCD\x6E\xCD\x6F\xCD\x6F\x75\xB5\x16\xB4\xF6\x39\xE6\x38\x66\x75\xC2\xDA\x7E\x6B\xDB\xDA\xBE\x47\x2F\xD6\x4B\xF5\x69\x7A\xE5\x31\xC6\x61\x00\x21\x15\x64\x7C\x35\xFC\x44\x10\x0A\x40\x28\x06\x61\x30\x08\x43\x40\x18\x06\xC2\x24\x10\xA6\x80\x30\x03\x84\x7A\x10\x0E\x70\x29\x11\xEE\x92\xA8\x6D\x8D\x13\x32\xC1\x25\x13\x0A\x65\x42\x89\x4C\x18\x2E\x13\xC6\xC9\x84\x29\x32\xA1\x4E\x26\xD4\xCB\x84\xBB\x65\xC2\x0A\x99\xF0\x92\x4C\xD8\x28\x13\xDE\x96\xC9\x98\x07\xDF\xAD\x10\xAC\x7D\x1C\x6B\x0F\xC7\x9A\x2F\x5A\xEB\xA3\x48\x22\x9C\xD2\x81\x50\xD2\x81\x70\x4E\x07\xC2\xCC\x0E\x84\xAA\x0E\xF4\x6F\xCD\xD7\xF9\xB8\x6C\x66\x32\x61\x5E\x32\xA1\x3E\x99\x70\x4F\x32\x1D\x35\x7E\x44\x0A\xA1\x4B\x0A\xC1\x9D\x42\x18\x94\x42\xA8\x4F\x21\x3C\x95\x42\x78\x3E\x85\xB0\x2E\x85\x8C\xF7\x3F\x47\xC6\xEF\xBC\xF7\x67\x3A\x09\x97\x38\x09\xF5\x4E\xC2\x5D\x4E\xC2\x7A\x27\x61\x83\x93\xF0\x9A\x93\xF0\x39\x4F\x2B\xF6\x03\xAC\x71\x92\x35\xBE\xAA\x4F\x25\xDC\x9C\x4A\x06\xD1\x75\x6C\x1A\xC1\xDA\xBF\xB2\xF6\xAE\xF8\xB8\xF8\x84\x8E\x84\xFA\x74\x42\x43\x3A\x61\x55\x3A\x61\x75\x3A\x01\x9D\x08\x79\x9D\x08\xBD\x04\xBC\x7C\x4E\xDD\x89\x8C\x35\xD6\x97\x3B\x51\xDB\x3C\x5B\xCE\x20\x9C\x9A\x41\x58\x9C\x41\x78\x24\x83\xB0\x39\x83\xF0\x75\x06\x41\xEE\x4C\x38\xB1\x33\x61\xA4\xC0\x18\x81\x29\x9D\x09\xD1\xCE\x84\x79\x02\x8B\x3B\x13\x1E\xED\x4C\x78\xA1\x33\xE1\xE5\xCE\x84\xF7\x05\x3E\x14\xB0\x8F\x3B\xFA\x66\x12\xEA\x33\x09\x57\x67\x12\x96\x08\x5C\x97\x49\xB8\x31\x93\xB0\x34\x93\x70\x77\x26\x61\x79\x26\x61\x65\x26\x19\xE3\x91\x8F\x33\xFF\xFD\xF9\xED\x10\x01\x6B\x3E\xCB\xE7\xB1\xBE\x2C\x02\x8E\x23\x9C\x74\x1C\x61\xCC\x71\x84\xF1\xC7\x11\xCA\x8E\x23\xF8\x8E\x23\xD4\x1F\x47\x78\xE4\x38\xC2\xCB\xC7\x11\x5E\x39\x8E\xB0\x41\xA0\xBE\x0B\xE1\xE1\x2E\x84\x9D\x5D\x08\x1F\x74\x21\xA0\x2B\xA1\x7F\x57\xC2\x48\x81\xFA\xAE\x84\xC6\xAE\x84\xA7\x05\xAC\xFD\x3B\xE4\x10\xFA\xE6\x10\x56\x88\x3D\x44\x3D\xE7\xCF\xD7\x65\xFA\xE7\x12\x86\x09\x94\x0A\xAC\xC8\x25\x68\xB9\x84\xC6\x5C\xC2\xE6\x5C\xC2\x2E\x81\xCF\x04\xD0\xCD\x44\x6A\x37\xC2\x80\x6E\x84\x62\x81\x29\x02\x17\x76\x23\x5C\xDB\x8D\x70\xB7\xC0\xFA\x6E\x84\x7D\x3C\x8D\x6D\xDF\xA6\x5F\x1E\xA1\x44\x20\x9A\x47\x58\x9A\x47\x40\x3E\x21\x29\x9F\x90\x9C\x4F\x48\xC9\x27\xA4\xE6\x13\xBC\xF9\x84\xD1\x02\xC7\x9A\xE7\x5E\x2B\xE6\xB6\x1C\xCD\x8C\xB0\x87\x11\x56\xB9\x4C\x58\xEB\x6C\xD6\xBA\x9A\xB5\x0E\x64\xED\x5F\x5A\x7B\x5B\x25\xB3\x09\x17\xCD\xA6\xB6\x7D\x2D\xCC\x21\xF4\x9F\x43\x28\x99\x43\x18\x31\x87\x50\x3E\x87\x30\x7F\x0E\xE1\xD2\x39\x84\xA5\x5C\x2F\xF6\x62\x11\x26\x8C\x0F\x13\xA6\x08\xDC\x1A\x26\x2C\x0F\x13\x9A\xC3\x84\x5D\x62\xCD\x99\x03\x11\xC2\x5E\x41\x96\xE6\xE3\x67\xDC\x48\x48\xBE\xD1\x1C\xE3\xDB\xE7\x71\xD6\x78\x3F\xB2\x9A\xD0\xFA\x28\xFD\xE9\xBA\xD7\xFF\xE6\x75\xE7\xF5\xDB\x09\x2F\x6E\x27\xBC\xBF\x9D\xF0\xE1\x76\xC2\x81\xED\x84\x83\xDB\x09\xC7\xED\x20\x74\xDD\x41\x38\x69\x07\xE1\x94\x1D\x84\xC9\x3B\x08\x53\x76\x10\x42\x3B\x08\x91\x1D\x84\x25\x3B\x08\xD7\xED\x20\xAC\xDC\x41\x68\xDC\x41\x78\x71\x07\xE1\xE5\x1D\xE6\x3A\x36\x76\x11\xD4\x5D\x84\x53\x77\x11\x86\xEC\x22\x4C\x16\x58\xFF\x29\xE1\xBB\x4F\x09\xF8\x9A\xF0\xF2\xD7\x64\xEC\x77\x8F\x10\x7B\xDE\x1C\xFF\xDD\x37\x36\xF7\x8D\xFF\xD3\xF5\x95\x89\xBE\x70\xBB\xF9\xD8\xB1\xC7\xD5\x47\xC6\xD3\x1B\xD3\x37\xA5\x6F\x4E\x7F\xCF\x18\x57\x4F\xF4\xC5\x02\x89\x1B\xD4\xF6\xF9\x5C\xFB\xB9\xDC\xD3\xC6\x3C\x0E\xF9\x3D\xF3\xD5\xFC\x0B\xF3\x67\xE5\xFB\xF3\xE7\xE4\xCF\xCF\x5F\x98\xFF\x7C\x3E\x98\xCE\xEC\x63\xC3\x3E\x9E\xBE\x9E\x01\x9E\x01\xC6\x98\x6F\xA6\x67\x89\x67\xA6\xBA\x48\x3D\x5E\xBB\xC6\x36\x9E\xD2\x35\x73\x0C\x53\xD1\x54\xD9\x14\x68\xE2\xE3\x15\x6B\xCC\x31\xD1\x1F\x29\xAF\xE6\x33\xF1\xBC\x53\xF3\x4E\xCF\x1B\x67\x8C\x73\x27\x06\x6A\x66\xD5\x59\x15\xCC\x65\xB9\xEE\xDC\xBE\xB9\xDE\xDC\xD3\x73\x4B\x72\x4B\x72\x47\xE6\x9E\x91\x3B\x31\x10\xF2\xFB\xA2\x65\xA6\x8B\xCC\xEF\xBC\xF7\x85\xCD\x70\x1C\x15\xFE\xB3\xF1\xCC\xC4\x40\x6D\x1D\xB0\xF9\x7E\xFD\x7E\xB4\x3D\xD3\x26\x06\x4D\x3F\x58\x3E\x0F\xFE\xC4\x68\x33\x1D\x44\x44\x32\xC9\xE4\x20\x07\x75\xA0\x0E\x94\x42\x29\x94\x4A\xA9\xD4\x91\x3A\x52\x27\xEA\x44\x9D\xA9\x33\x65\x51\x16\x75\xA1\x2E\x94\x4D\xD9\x94\x4B\xB9\xD4\x9D\xBA\x53\x3E\xE5\x93\x8B\x5C\xE4\x21\x0F\x9D\x40\x27\x50\x4F\xEA\x49\xBD\xA8\x17\xF5\xA6\xDE\xD4\x97\xFA\x52\x21\x15\x52\x7F\xEA\x4F\x45\x54\x44\x03\x69\x20\x9D\x4C\x27\xD3\xA9\x74\x1A\x15\x53\x31\x0D\xA6\xC1\x34\x94\x86\x52\x09\x95\xD0\x70\x1A\x4E\x23\x69\x24\x8D\xA6\xD1\x34\x96\xC6\xD1\x04\x9A\x40\x93\x68\x12\x9D\x41\x67\xD0\x99\x74\x26\x4D\xA5\xA9\x74\x0E\x9D\x43\xE7\xD1\x79\x74\x01\x5D\x40\xD3\x69\x3A\xCD\xA0\x19\x54\x46\x65\xE4\x23\x1F\x95\x53\x39\xF9\xC9\x4F\x55\x54\x45\x01\x0A\xD0\x1C\x9A\x43\x35\x54\x43\x61\x0A\xD3\x5C\x9A\x4B\xB5\x54\x4B\x75\x54\x47\xF3\x69\x3E\x5D\x4C\x17\xD3\x25\x74\x09\x5D\x46\xF5\xB4\x98\x16\xD3\x55\x74\x15\x2D\xA1\x25\x74\x03\xDD\x48\xB7\xD0\x2D\x74\x1B\xDD\x46\x77\xD0\x9D\x74\x0F\xDD\x43\xF7\xD2\xBD\x74\x1F\xDD\x47\x0F\xD0\x03\xB4\x92\x56\x52\x23\x35\xD1\x43\xF4\x10\xAD\xA2\x55\xF4\x38\x3D\x4E\x4F\xD2\x93\xF4\x0C\x3D\x4B\x6B\x69\x1D\xBD\x4A\xAF\xD2\x6B\xF4\x1A\xBD\x49\x6F\xD2\x5B\xF4\x16\x6D\xA6\xCD\xF4\x0E\xBD\x43\x5B\x69\x2B\x6D\xA7\xED\xB4\x93\x76\xD2\xFB\xF4\x3E\x7D\x48\x1F\xD1\x27\xF4\x09\xED\xA2\x5D\xF4\x19\x7D\x46\x5F\xD0\x17\xF4\x15\x7D\x45\xDF\xD0\x37\xF4\x1D\x7D\x47\x3F\xD0\x0F\xF4\x23\xED\xA6\x9F\xE9\x67\xFA\x95\x7E\xA5\xFD\xB4\x9F\x5A\xA9\x95\x0E\xD1\x21\xD2\x49\x27\x92\x48\x92\x25\x59\x72\x48\x0E\xA9\x83\xD4\x41\x4A\x91\x52\xA4\x54\x29\x55\xEA\x28\x75\x94\x3A\x49\x9D\xA4\xCE\x52\x67\x29\x4B\xCA\x92\xBA\x48\x5D\xA4\x6C\x29\x5B\xCA\x95\x72\xA5\xEE\x52\x77\x29\x5F\xCA\x97\x5C\x92\x8B\x4F\x89\xA4\x13\xA4\x13\xA4\x9E\x52\x4F\xA9\x97\xD4\x4B\xEA\x2D\xF5\x96\xFA\x4A\x7D\xA5\x42\xA9\x50\xEA\x2F\xF5\x97\x8A\xA4\x22\x69\xA0\x34\x48\x1A\x2C\x0D\x96\x86\x49\x25\xD2\x70\x69\xB8\x34\x46\x1A\x23\x8D\x93\xC6\x49\x13\xA4\x09\xD2\x24\x69\x92\x74\x86\x74\xAB\x74\x9B\xF4\x88\x34\x57\x9E\x2B\xD7\xCA\xB5\xF2\x7C\x79\xBE\xFC\x17\xF9\x52\x79\x99\xBC\x4C\x7E\x50\x7E\x4B\xDE\x2C\x6F\x91\xB7\xC9\x3B\xE4\xF7\xE4\xF7\xE4\x0F\xE4\x0F\xE4\x8F\xE4\x8F\xE4\x4F\xE4\x4F\xE4\x5D\xF2\x2E\xF9\x33\xF9\x33\xF9\x0B\xF9\x0B\xF9\x2B\xF9\x2B\xF9\x1B\xF9\x1B\xF9\x3B\xF9\x3B\xF9\x07\xF9\x07\xF9\x47\xF9\x67\xF9\x57\xF9\x57\x79\x9F\xBC\x4F\x6E\x95\x0F\xCA\x5E\xA5\x4C\xF1\x29\x3E\xA5\x5C\x29\x57\xFC\x8A\x5F\xA9\x52\xAA\x94\x80\x12\x50\xE6\x28\x73\x94\x1A\xA5\x46\x09\x2B\x61\x65\xAE\x32\x57\xA9\x55\x6A\x95\x3A\xA5\x4E\x99\xAF\xCC\x57\x16\x2A\x0B\x95\xBF\x28\x7F\x51\x2E\x55\x2E\x55\x16\x29\x8B\x94\xCB\x95\xCB\x95\xEB\x95\xEB\x95\x1B\x95\x1B\x95\xA5\xCA\x52\xE5\x66\xE5\x66\xE5\x56\xE5\x56\xE5\x36\xE5\x36\xE5\x76\xE5\x76\xE5\x0E\xE5\x0E\xE5\x4E\xE5\x4E\xE5\x6E\xE5\x6E\x65\xB9\xB2\x5C\xB9\x57\xB9\x57\xB9\x4F\xB9\x4F\x79\x40\x79\x40\xD1\x14\x4D\x69\x50\x1A\x94\x26\xA5\x49\x79\x48\x79\x48\x79\x44\x79\x44\x59\xAD\xAC\x56\x1E\x53\x1E\x53\x9E\x50\x9E\x50\x9E\x52\x9E\x52\x9E\x51\x9E\x51\x9E\x53\x9E\x53\xD6\x2A\x6B\x95\x75\xCA\x3A\xE5\x45\xE5\x45\xE5\x65\xE5\x65\xE5\x55\xE5\x55\x65\xA3\xB2\x51\x79\x5D\x79\x5D\x79\x53\x79\x53\x79\x4B\x79\x5B\xD9\xA2\x6C\x51\xDE\x55\xDE\x55\xB6\x29\xDB\x94\x1D\xCA\x0E\xE5\x3D\xE5\x3D\xE5\x03\xE5\x03\xE5\x23\xE5\x23\xE5\x13\xE5\x13\x65\x97\xB2\x4B\xF9\x4C\xF9\x4C\xF9\x42\xF9\x42\xF9\x4A\xF9\x4A\xF9\x46\xF9\x46\xF9\x4E\xF9\x4E\xF9\x41\xF9\x41\xF9\x51\xF9\x51\xF9\x49\xF9\x49\xF9\x59\xF9\x59\xF9\x55\xF9\x55\xD9\xAB\xEC\x55\xF6\x2B\xFB\x95\x56\xA5\x55\x39\xA4\x1C\x52\x74\x45\x57\xF8\xCD\x2B\x3B\x64\x07\x0F\x1D\x1C\x1D\x1C\x29\x8E\x14\x47\xAA\x23\xD5\xD1\xD1\xD1\xD1\xD1\xC9\xD1\xC9\xD1\xD9\xD1\xD9\x91\xE5\xC8\x72\x74\x71\x74\x71\x64\x3B\xB2\x1D\xB9\x8E\x5C\x47\x77\x47\x77\x47\xBE\x23\xDF\xE1\x72\xB8\x1C\x1E\x87\xC7\x71\x82\xE3\x04\x47\x4F\x47\x4F\x47\x2F\x47\x2F\x47\x6F\x47\x6F\x47\x5F\x47\x5F\x47\xA1\xA3\xD0\xD1\xDF\xD1\xDF\x5C\xDF\xCA\x38\x90\x71\x28\x43\xCF\xD8\x97\x75\x28\x8B\xCF\xE9\x91\xDF\x57\x3C\x5F\xFF\x91\x4F\x8C\x98\xCC\x64\xE6\x60\x0E\xD6\x81\x75\x60\x29\x2C\x85\xA5\xB2\x54\xD6\x91\x75\x64\x9D\x58\x27\xD6\x99\x75\x66\x59\x2C\x8B\x75\x61\x5D\x58\x36\xCB\x66\xB9\x2C\x97\x75\x67\xDD\x59\x3E\xCB\x67\x2E\xE6\x62\x1E\xE6\x61\x27\xB0\x13\x58\x4F\xD6\x93\xF5\x62\xBD\x58\x6F\xD6\x9B\xF5\x65\x7D\x59\x21\x2B\x64\xFD\x59\x7F\x56\xC4\x8A\xD8\x40\x36\x90\x9D\xCC\x4E\x66\xA7\xB2\x53\xD9\x20\x36\x88\x9D\xCE\x4E\x67\x43\xD8\x10\x36\x8C\x0D\x63\xA5\xAC\x94\x8D\x60\x23\xD8\x28\x36\x8A\x8D\x61\x63\xD8\x38\x36\x8E\x4D\x60\x13\xD8\x24\x36\x89\x9D\xC1\xCE\x60\x67\xB2\x33\xD9\x54\x36\x95\x9D\xC3\xCE\x61\xE7\xB1\xF3\xD8\x05\xEC\x02\x36\x9D\x4D\x67\x33\xD8\x0C\x56\xC6\xCA\x98\x8F\xF9\x58\x39\x2B\x67\x7E\xE6\x67\x55\xAC\x8A\x05\x58\x80\xCD\x61\x73\x58\x0D\xAB\x61\x61\x16\x66\x73\xD9\x5C\x56\xCB\x6A\x59\x1D\xAB\x63\xF3\xD9\x7C\xB6\x90\x2D\x64\x7F\x61\x7F\x61\x97\xB2\x4B\xD9\x22\xB6\x88\x5D\xCE\x2E\x67\x8B\xD9\x62\x76\x15\xBB\x8A\x5D\xC3\xAE\x61\xD7\xB2\x6B\xD9\xF5\xEC\x7A\x76\x23\xBB\x91\x2D\x65\x4B\xD9\xCD\xEC\x66\x76\x2B\xBB\x95\xDD\xC6\xEE\x66\xCB\xD9\x72\x76\x2F\xBB\x97\xDD\xC7\xEE\x63\x0F\xB0\x07\x98\xC6\x34\xD6\xC0\x1A\x58\x13\x6B\x62\x0F\xB1\x87\xD8\x23\xEC\x11\xB6\x9A\xAD\x66\x8F\xB1\xC7\xD8\x13\xEC\x09\xF6\x14\x7B\x8A\x3D\xC3\x9E\x61\xCF\xB1\xE7\xD8\x5A\xB6\x96\xAD\x63\xEB\xD8\x0B\xEC\x05\xF6\x12\x7B\x89\xBD\xC2\x5E\x61\x1B\xD8\x06\xF6\x1A\x7B\x8D\xBD\xC1\xDE\x60\x9B\xD8\x26\xF6\x36\x7B\x9B\x6D\x61\x5B\xD8\xBB\xEC\x5D\xB6\x8D\x6D\x63\x3B\xD8\x0E\xF6\x1E\x7B\x8F\x7D\xC0\x3E\x60\x1F\xB1\x8F\xD8\x27\xEC\x13\xB6\x8B\xED\x62\x9F\xB1\xCF\xD8\x17\xEC\x0B\xF6\x15\xFB\x8A\x7D\xC3\xBE\x61\xDF\xB1\xEF\xD8\x0F\xEC\x07\xF6\x23\xFB\x91\xFD\xC4\x7E\x62\x3F\xB3\x9F\xD9\xAF\xEC\x57\xB6\x97\xED\x65\xFB\xD9\x7E\xD6\xCA\x5A\xD9\x21\x76\x88\xE9\xAC\x83\x2B\xC3\xD5\xC5\xE5\x76\xF5\x72\x79\x5D\xA7\xBA\x4A\x5C\xA3\x8C\x35\x9B\x99\xAE\x2A\x57\xC4\x58\xAB\xB9\xC6\xB5\xCC\x75\xBB\x6B\x85\x4B\x73\xAD\x32\xD6\x6D\x9E\xB2\xAD\xDB\x6C\x10\xEB\x36\x3B\x5C\xCD\xAE\xAF\xC4\xBA\xCD\x5E\x97\xD3\xE3\xF4\xA4\x7B\x3A\x79\xB2\x3C\x59\x9E\xFE\x9E\xFE\x9E\x93\x3C\x27\x79\x06\x19\x6B\x33\x43\x3C\xA3\xC5\xDA\xCC\x95\x9E\x2B\x3D\x5E\xF5\x22\xD5\xA7\xFA\x54\xBF\x5A\xA9\x56\xAB\xD5\xEA\x6C\x75\xB6\x1A\x54\x83\xEA\x5C\x75\xAE\x5A\xAB\xC6\xD4\x79\xEA\x5F\xD4\xCB\xD5\xCB\xD5\xC5\xEA\x62\xF5\x2A\xF5\x2A\xF5\x1A\xF5\x1A\xF5\x5A\xF5\x5A\xF5\x7A\xF5\x7A\xF5\x46\xF5\x46\x75\xA9\xBA\x54\xBD\x59\xBD\x59\xBD\x55\xBD\x55\xBD\x4D\xBD\x4D\xBD\x5D\xBD\x5D\xBD\x43\xBD\x43\xBD\x53\xBD\x53\xBD\x5B\xBD\x5B\x5D\xAE\x2E\x57\xEF\x55\xEF\x55\xEF\x53\xEF\x53\x1F\x50\x1F\x50\x35\x55\x53\x1B\xD4\x06\xB5\x49\x6D\x52\x1F\x52\x1F\x52\x1F\x51\x1F\x51\x57\xAB\xAB\xD5\xC7\xD4\xC7\xD4\x27\xD4\x27\xD4\xA7\xD4\xA7\xD4\x67\xD4\x67\xD4\xE7\xD4\xE7\xD4\xB5\xEA\x5A\x75\x9D\xBA\x4E\x7D\x41\x7D\x41\x7D\x49\x7D\x49\x7D\x45\x7D\x45\xDD\xA0\x6E\x50\x5F\x53\x5F\x53\xDF\x50\xDF\x50\x37\xA9\x9B\xD4\xB7\xD5\xB7\xD5\x2D\xEA\x16\xF5\x5D\xF5\x5D\x75\x9B\xBA\x4D\xDD\xA1\xEE\x50\xDF\x53\xDF\x53\x3F\x50\x3F\x50\x3F\x52\x3F\x52\x3F\x51\x3F\x51\x77\xA9\xBB\xD4\xCF\xD4\xCF\xD5\xEF\xD5\xEF\xD5\x16\xB5\x45\xFD\x39\x61\xFD\xA9\x34\x5E\x1A\x1F\x11\x1F\x11\x1F\x15\x1F\x15\x1F\x13\x1F\x13\x1F\x17\x1F\x17\x9F\x10\x9F\x10\x9F\x14\x9F\x14\x3F\x23\x7E\x46\xFC\xCC\xF8\x99\xF1\xA9\xF1\xA9\xF1\x73\xE2\xE7\xC4\xCF\x8B\x9F\x17\xBF\x20\x7E\x41\x7C\x7A\x7C\x7A\x7C\x46\x7C\x46\xBC\x2C\x5E\x16\xF7\xC5\x7D\xF1\xF2\x78\x79\xDC\x1F\xF7\xC7\xAB\xE2\x55\xF1\x40\x3C\x10\x9F\x13\x9F\x13\xAF\x89\xD7\xC4\x2F\x8F\x5F\x1E\x5F\x1C\x5F\x1C\xBF\x2A\x7E\x55\xFC\x9A\xF8\x35\xF1\x6B\xE3\xD7\xC6\xAF\x8F\x5F\x1F\xBF\x31\x7E\x63\x7C\x69\x7C\x69\xFC\xE6\xF8\xCD\xF1\x5B\xE3\xB7\xC6\x6F\x8B\xDF\x16\xBF\x3D\x7E\x7B\xFC\x8E\xF8\x1D\xF1\x3B\xE3\x77\xC6\x4F\xD0\x4E\xD0\x7A\x6A\x3D\xB5\x5E\x5A\x2F\xAD\xB7\xD6\x5B\xEB\xAB\xF5\xD5\x0A\xB5\x42\xAD\xBF\x56\xA4\x0D\xD4\x06\x6A\x27\x6B\x27\x6B\xA7\x6A\xA7\x6A\x83\xB4\x41\xDA\xE9\xDA\xE9\xDA\x10\x6D\x88\x36\x4C\x1B\xA6\x95\x6A\xA5\xDA\x08\x6D\x84\x36\x4A\x1B\xA5\x8D\xD1\xC6\x68\xE3\xB4\x71\xDA\x04\x6D\x82\x36\x49\x9B\xA4\x9D\xA1\x9D\xA1\x9D\xA9\x9D\xA9\x4D\xD5\xA6\x6A\xE7\x68\xE7\x68\xE7\x69\xE7\x69\x17\x68\x17\x68\xD3\xB5\xE9\xDA\x0C\x6D\x86\x56\xA6\x95\x69\x3E\xCD\xA7\x95\x6B\xE5\x9A\x5F\xF3\x6B\x55\x5A\x95\x16\xD0\x02\xDA\x1C\x6D\x8E\x56\xA3\xD5\x68\x61\x2D\xAC\xCD\xD5\x16\x68\x17\x6B\x17\x6B\x97\x68\x97\x68\x8B\xB4\x45\xDA\xE5\xDA\xE5\xDA\x62\x6D\xB1\x76\x95\x76\x95\x76\x8D\x76\x8D\x76\x83\x76\x83\x76\x93\x76\x93\x76\xB3\x76\xB3\x76\xAB\x76\x9B\x76\xBB\x76\xBB\x76\x87\x76\x87\x76\xA7\x76\xA7\x76\xB7\x76\xB7\xB6\x5C\x5B\xAE\xDD\xAB\xDD\xAB\xDD\xA7\xDD\xA7\x3D\xA0\x3D\xA0\xF1\xD0\xA0\x35\x68\x8F\x68\x8F\x68\x4F\x68\x4F\x68\x4F\x69\x4F\x69\xCF\x68\xCF\x68\xCF\x69\xCF\x69\x6B\xB5\xB5\xDA\x3A\x6D\x9D\xF6\x92\xF6\x92\xB6\x51\xDB\xA8\xBD\xAE\xBD\xAE\xFD\xA6\xFD\xA6\x1D\xD0\x0E\x18\x63\xC9\x99\x4D\xD5\x4D\x91\xA6\x75\x4D\xD6\xDA\x17\x1F\x4B\xC2\xCE\xFD\x15\xEB\x44\x6D\xDC\x5E\x31\x0F\xB7\xB8\xBB\xDD\xB6\x12\x06\x6E\x25\x4C\xB6\xCD\xDB\xAA\xB6\x12\xAE\xD8\x4A\xB8\x73\x2B\xE1\xC9\x3F\x98\x9F\xBD\xBD\x95\xF0\xED\x56\x82\x2C\xE6\x63\xA7\x8A\xF9\xD6\x9C\x6D\x84\xAB\xB7\x11\x96\x6F\x23\x3C\xBB\x8D\xF0\xEE\x36\x42\x0B\x9F\xCB\x6D\x27\x1C\xBF\x9D\x70\xFA\x76\xC2\xB9\xDB\x09\xE1\xED\x84\xEB\xB6\x9B\xF3\x33\xFB\xBC\x6C\x57\xC2\xBC\xAC\x7B\xC2\xBC\xEC\x9C\x84\x79\xD9\xD2\x84\x79\xD9\x6B\x62\x6E\xC6\x61\x71\x91\x27\x26\x10\x90\x57\x49\x47\xEF\xD1\xC4\xE4\x98\x7C\xB1\x7C\xB1\x7C\x81\xE3\x02\x47\x49\x52\x89\xB1\xC7\x62\xED\xA1\xD8\xF7\x4F\xE6\xA6\xCC\x4D\x19\x9D\x3E\xDA\x98\x57\x1C\xCC\x38\x98\xB1\x23\x7B\x47\xF6\x88\x9C\x11\x39\x5A\x37\xAD\xDB\x82\xBC\x4B\xF3\xD4\xFC\xD9\x06\x9F\xE5\xCE\xFC\x75\xF9\xF6\xF9\xC1\x25\xEA\xA5\x6A\xB8\x30\x2C\xD6\x6F\x1D\x62\xFD\xF6\x74\xEF\xDD\xDE\x7B\xBC\x07\xBD\x87\xBD\x5D\x56\x74\x59\xB1\xEF\xFE\x43\xF7\xA7\xC5\xD3\xE2\x8B\xE2\x8B\xE2\x77\xC5\xEF\x36\xD6\x64\x23\x5A\x44\x5B\xA2\x2D\xD1\xF6\x69\xFB\xB5\xB7\x1B\xDE\x6E\xF8\xB2\xE1\xCB\x86\x48\x63\xA4\xF1\xA3\xC6\x8F\x1A\x7F\x6E\xFC\xA5\xF1\xC2\xA6\xB2\xA6\x40\x53\xA0\xC9\xE2\xA6\x19\xFB\x8B\x62\x9D\xE0\xF7\xE6\xFF\xD6\x7C\xD5\x9A\x8F\x4E\x6C\x3F\x9D\x6A\x9B\x07\x3D\x47\xCF\xD1\x7A\x7A\x89\xFE\x2A\xFD\x55\x3A\xB2\x17\x65\xF2\xA8\xED\xFB\x51\x7F\xB6\x17\x85\xE4\x2E\x7F\xBA\xEF\x14\xFD\xEF\xBE\xD3\xFF\xA5\x7D\xA7\x51\xFF\xD2\xBE\xD3\x7F\xBA\xE7\x84\xCE\xFF\xF3\x3D\xA6\x7F\x77\x6F\xC9\xDC\x57\x1A\x9E\x33\xF2\xDF\xDC\x5B\xFA\xE3\x7D\xA5\xF9\x47\xED\x25\x9D\xEC\x39\xCD\xE3\x2D\xAC\xFA\x97\xF6\x65\x92\xBC\x49\xDE\xC1\x62\x3F\x66\xB9\xD8\x8F\xD1\xFF\x70\x3F\xE6\xB8\x15\x5D\x8D\x3D\x99\xBD\xF7\xE3\x81\xD4\xB6\x3D\x99\x50\x3C\x64\xEC\xC7\x2C\xD5\x96\x6A\x7B\xB5\xBD\x5A\xEB\x7F\xB0\x0F\xD3\xDC\xF0\x79\xC3\x57\xC7\xDC\x83\x09\x37\xCE\xFD\xD3\x7D\x98\x0F\xC5\x3E\xCC\x9E\x63\xEE\xC3\xAC\xFF\x9D\xFD\x97\xFF\x74\xDF\xC5\xE2\x19\x1C\x8B\xFF\xF2\x3F\x5D\xAF\xF8\x3F\xB5\xFF\x32\x45\xEC\xBF\xFC\xBF\xBE\xDF\xF2\xDF\xFD\x95\xFF\x9D\xFB\x2B\xFF\xDD\x57\xF9\x9F\xED\xAB\x24\xEE\xA7\x24\xEE\xA3\xFC\x5F\xDB\x2F\x11\xFB\x07\xBF\xBB\x6F\xF0\xDF\x7D\x82\xFF\xD9\x3E\x81\xA0\x8D\xBD\x42\xAF\xD0\x46\xDA\x48\x6F\xD0\x1B\xB4\x87\xF6\xD0\x12\xD7\x52\xD7\xDF\x5D\xCB\x5D\x2B\x5D\x8F\xB8\xD6\xB8\xD6\xB8\xDE\x74\xBD\xE9\x3A\xE8\x3A\xE8\x9A\x58\x77\x64\x3F\xC0\xDC\x03\xF8\x98\xBF\x7A\x48\x22\x89\x14\x52\x28\x89\x92\x28\x99\x92\xC9\x49\x4E\x4A\xA3\x34\x4A\xA7\x74\xCA\xA0\x0C\xCA\xA4\x4C\x3A\x8E\x8E\xA3\xAE\xD4\x95\x72\x28\x87\xBA\x51\x37\xCA\xA3\x3C\x62\xC4\xC8\x4D\x6E\x3A\x9E\x8E\xA7\x1E\xD4\x83\x4E\xA4\x13\xA9\x80\x0A\xA8\x0F\xF5\x21\x95\x54\xEA\x47\xFD\xC8\x4B\x5E\x1A\x40\x03\xE8\x24\x3A\x89\x4E\xA1\x53\x68\x10\x0D\xA2\xD3\xE9\x74\x1A\x42\x43\x68\x18\x0D\xA3\x52\x2A\xA5\x11\x34\x82\x46\xD1\x28\x1A\x43\x63\x68\x3C\x8D\xA7\x89\x34\x91\x26\xD3\x64\x9A\x42\x53\xE8\x2C\x3A\x8B\xCE\xA6\xB3\xE9\x5C\x3A\x97\xCE\xA7\xF3\x69\x1A\x4D\xA3\x0B\xE9\x42\xBA\x88\x2E\xA2\x99\x34\x93\x66\xD1\x2C\xAA\xA0\x0A\xAA\xA4\x4A\xAA\xA6\x6A\x9A\x4D\xB3\x29\x48\x41\x0A\x51\x88\x22\x14\xA1\x28\x45\x29\x46\x31\x9A\x47\xF3\x68\x01\x2D\xA4\xBF\xD0\x5F\xE8\x52\xBA\x94\x2E\xA7\x2B\xE8\x4A\xBA\x92\xAE\xA6\x6B\xE8\x5A\xBA\x9E\x6E\xA2\x9B\xE9\x56\xFA\x2B\xFD\x8D\xFE\x4E\x77\xD1\xDD\xB4\x9C\x56\xD0\x3F\xE9\x9F\x74\x3F\xDD\x4F\x71\xD2\xA8\x81\x1A\xE8\x41\x7A\x90\x1E\xA6\x47\x68\x35\x3D\x46\x4F\xD0\x13\xF4\x14\x3D\x4D\x6B\x68\x0D\xBD\x4C\x2F\xD3\x06\xDA\x40\xAF\xD3\xEB\xB4\x89\x36\xD1\xDB\xF4\x36\x6D\xA1\x2D\xF4\x2E\xBD\x4B\xDB\x68\x1B\xED\xA0\x1D\xF4\x1E\xBD\x47\x1F\xD0\x07\xF4\x31\x7D\x4C\xCD\xD4\x4C\x9F\xD2\xA7\xF4\x39\x7D\x4E\x5F\xD2\x97\xF4\x35\x7D\x4D\xDF\xD2\xB7\xF4\x3D\x7D\x4F\x2D\xD4\x42\x3F\xD1\x4F\xF4\x0B\xFD\x42\xBF\xD1\x3E\x3A\x40\x07\xE8\x20\x1D\xA4\xC3\x74\x98\x20\x41\xE2\x41\x91\x14\x29\x49\x4A\x92\x92\xA5\x64\xC9\x29\x39\xA5\x34\x29\x4D\x4A\x97\xD2\xA5\x0C\x29\x43\xCA\x94\x32\xA5\xE3\xA4\xE3\xA4\xAE\x52\x57\x29\x47\xCA\x91\xBA\x49\xDD\xA4\x3C\x29\x4F\x62\x12\x93\xDC\x92\x5B\x3A\x5E\x3A\x5E\xEA\x21\xF5\x90\x4E\x94\x4E\x94\x0A\xA4\x02\xA9\x8F\xD4\x47\x52\x25\x55\xEA\x27\xF5\x93\xBC\x92\x57\x1A\x20\x0D\x90\x8A\xA5\xD3\xA5\x21\xD2\x50\xA9\x54\x2A\x95\x46\x48\xA3\xA5\xB1\xD2\x58\x69\xBC\x34\x5E\x9A\x28\x4D\x94\x26\x4B\x93\xA5\x88\x1C\x91\xA3\x72\x54\x9E\x27\xCF\x3B\x06\xDF\x6F\xA9\x7C\xB3\xC1\xF9\x6B\x92\xDF\x96\xDF\x96\xDF\x91\xB7\xCA\x3B\xE5\x9D\xF2\xFB\xF2\xFB\xF2\x87\xF2\x87\xF2\xC7\xF2\xC7\x72\xB3\xDC\x2C\x7F\x2A\x7F\x2A\x7F\x2E\x7F\x2E\x7F\x29\x7F\x29\x7F\x2D\x7F\x2D\x7F\x2B\x7F\x2B\x7F\x2F\x7F\x2F\xB7\xC8\x2D\xF2\x2F\xF2\x2F\xF2\x5E\x79\xAF\xBC\x5F\x3E\x20\x1F\x92\xFB\x2B\x33\x95\x99\xCA\x2C\x65\x96\x52\xA1\x54\x28\x95\x4A\xA5\x52\xAD\x54\x2B\xB3\x95\xD9\x4A\x50\x09\x2A\x21\x25\xA4\x44\x94\x88\x12\x55\xA2\x4A\x4C\x89\x29\xF3\x94\x79\xCA\x02\x65\x81\x72\xB1\x72\xB1\x72\x89\x72\x89\x72\x99\x72\x99\x52\xAF\xD4\x2B\xD7\x29\xD7\x29\x37\x28\x37\x28\x37\x29\x37\x29\xCB\x94\x65\xCA\x2D\xCA\x2D\xCA\x5F\x95\xBF\x2A\x7F\x53\xFE\xA6\xFC\x5D\xF9\xBB\xF2\x0F\xE5\x1F\xCA\x5D\xCA\x5D\xCA\x3D\xCA\x3D\xCA\x0A\x65\x85\xF2\x4F\xE5\x9F\xCA\xFD\xCA\xFD\x4A\x5C\x89\x2B\x2B\x95\x95\x4A\xA3\xD2\xA8\x3C\xA8\x3C\xA8\x3C\xAC\x3C\xAC\xAC\x52\x56\x29\x8F\x2A\x8F\x2A\x8F\x2B\x8F\x2B\x4F\x2A\x4F\x2A\x4F\x2B\x4F\x2B\xCF\x2A\xCF\x2A\x6B\x94\x35\xCA\xF3\xCA\xF3\xCA\x7A\xE5\x05\xE5\x25\xE5\x25\xE5\x15\xE5\x15\x65\x83\xB2\x41\x79\x4D\x79\x4D\x79\x43\x79\x43\xD9\xA4\x6C\x52\x36\x2B\x9B\x95\x77\x94\x77\x94\xAD\xCA\x56\x65\xBB\xB2\x5D\xD9\xA9\xEC\x54\xDE\x57\xDE\x57\x3E\x54\x3E\x54\x3E\x56\x3E\x56\x9A\x95\x66\xE5\x53\xE5\x53\xE5\x73\xE5\x73\xE5\x4B\xE5\x4B\xE5\x6B\xE5\x6B\xE5\x5B\xE5\x5B\xE5\x7B\xE5\x7B\xA5\x45\x69\x51\x76\x2B\xBB\x95\x3D\xCA\x1E\xE5\x17\xE5\x17\xE5\x37\xE5\x37\x65\x9F\xB2\x4F\x39\xA0\x1C\x50\x0E\x2A\x07\x95\xC3\xCA\x61\x05\x0E\x38\x24\x87\xE4\x50\x1C\x8A\x23\xC9\x91\xE4\x48\x76\x24\x3B\x9C\x0E\xA7\x23\xCD\x91\xE6\x48\x77\xA4\x3B\x32\x1C\x19\x8E\x4C\x47\xA6\xE3\x38\xC7\x71\x8E\xAE\x8E\xAE\x8E\x1C\x47\x8E\xA3\x9B\xA3\x9B\x23\xCF\x91\xE7\x60\x0E\xE6\x70\x3B\xDC\x8E\xE3\x1D\xC7\x3B\x7A\x38\x7A\x38\x4E\x74\x9C\xE8\x28\x70\x14\x38\xFA\x38\xFA\x38\x54\x87\xEA\xE8\xE7\xE8\x67\xF0\x37\xED\x3C\xBC\x15\x59\xBF\x66\xB5\xF9\x42\x30\xF0\x6E\xC6\x14\xA6\xB0\x24\x96\xC4\x92\x59\x32\x73\x32\x27\x4B\x63\x69\x2C\x9D\xA5\xB3\x0C\x96\xC1\x32\x59\x26\x3B\x8E\x1D\xC7\xBA\xB2\xAE\x2C\x87\xE5\xB0\x6E\xAC\x1B\xCB\x63\x79\x8C\x07\x37\x73\xB3\xE3\xD9\xF1\xAC\x07\xEB\xC1\x4E\x64\x27\xB2\x02\x56\xC0\xFA\xB0\x3E\x4C\x65\x2A\xEB\xC7\xFA\x31\x2F\xF3\xB2\x01\x6C\x00\x3B\x89\x9D\xC4\x4E\x61\xA7\xB0\xD3\xD8\x69\xAC\x98\x15\xB3\xC1\x6C\x30\x1B\xCA\x86\xB2\x12\x56\xC2\x86\xB3\xE1\x6C\x24\x1B\xC9\x46\xB3\xD1\x6C\x2C\x1B\xCB\xC6\xB3\xF1\x6C\x22\x9B\xC8\x26\xB3\xC9\x6C\x0A\x9B\xC2\xCE\x62\x67\xB1\xB3\xD9\xD9\xEC\x5C\x76\x2E\x3B\x9F\x9D\xCF\xA6\xB1\x69\xEC\x42\x76\x21\xBB\x88\x5D\xC4\x66\xB2\x99\x6C\x16\x9B\xC5\x2A\x58\x05\xAB\x64\x95\xAC\x9A\x55\xB3\xD9\x6C\x36\x0B\xB2\x20\x0B\xB1\x10\x8B\xB0\x08\x8B\xB2\x28\x8B\xB1\x18\x9B\xC7\xE6\xB1\x05\x6C\x01\xBB\x98\x5D\xCC\x2E\x61\x97\xB0\xCB\xD8\x65\xAC\x9E\xD5\xB3\x2B\xD8\x15\xEC\x4A\x76\x25\xBB\x9A\x5D\xCD\x96\xB0\x25\xEC\x3A\x76\x1D\xBB\x81\xDD\xC0\x6E\x62\x37\xB1\x65\x6C\x19\xBB\x85\xDD\xC2\xFE\xCA\xFE\xCA\xEE\x61\xF7\xB0\x15\x6C\x05\xFB\x27\xFB\x27\xBB\x9F\xDD\xCF\xE2\x2C\xCE\x56\xB2\x95\xAC\x91\x35\xB2\x07\xD9\x83\xEC\x61\xF6\x30\x5B\xC5\x56\xB1\x47\xD9\xA3\xEC\x71\xF6\x38\x7B\x92\x3D\xC9\x9E\x66\x4F\xB3\x67\xD9\xB3\x6C\x0D\x5B\xC3\x9E\x67\xCF\xB3\xF5\x6C\x3D\x7B\x91\xBD\xC8\x5E\x66\x2F\xB3\x57\xD9\xAB\x6C\x23\xDB\xC8\x5E\x67\xAF\xB3\x37\xD9\x9B\xEC\x2D\xF6\x16\xDB\xCC\x36\xB3\x77\xD8\x3B\x6C\x2B\xDB\xCA\xB6\xB3\xED\x6C\x27\xDB\xC9\xDE\x67\xEF\xB3\x0F\xD9\x87\xEC\x63\xF6\x31\x6B\x66\xCD\xEC\x53\xF6\x29\xFB\x9C\x7D\xCE\xBE\x64\x5F\xB2\xAF\xD9\xD7\xEC\x5B\xF6\x2D\xFB\x9E\x7D\xCF\x5A\x58\x0B\xDB\xCD\x76\xB3\x3D\x6C\x0F\xFB\x85\xFD\xC2\x7E\x63\xBF\xB1\x7D\x6C\x1F\x3B\xC0\x0E\xB0\x83\xEC\x20\x3B\xCC\x0E\xB3\x64\x57\x27\x83\x87\x58\xE0\xEA\xEF\x3A\xCD\x35\xCC\xE0\x22\xB6\xE7\x21\x96\xB9\xAA\x5D\x61\xD7\xD3\xAE\xE7\x5C\x1B\x5D\x6F\xB8\x76\xBA\x3E\x70\x7D\xED\xFA\xDE\xB5\xCF\xD5\xEA\xB2\xB8\x86\xA9\x9E\x8E\x9E\x0C\x4F\xE6\xEF\xF2\x0D\xBD\x9E\x81\x9E\xA1\x06\xDF\x70\x94\x67\xB1\x67\xB1\xC1\x35\x9C\xA9\xCE\x54\x67\xA9\x15\x6A\x95\x5A\xA5\x06\xD4\x80\x3A\x47\x9D\xA3\xD6\xA8\x11\x35\xAA\x46\xD5\x3A\xB5\x4E\xBD\x4C\xAD\x57\xAF\x50\xAF\x50\xAF\x54\xAF\x54\xAF\x56\xAF\x56\x97\xA8\x4B\xD4\xEB\xD4\xEB\xD4\x1B\xD4\x1B\xD4\x9B\xD4\x9B\xD4\x65\xEA\x32\xF5\x16\xF5\x16\xF5\xAF\xEA\x5F\xD5\xBF\xA9\x7F\x53\xFF\xAE\xFE\x5D\xFD\x87\xFA\x0F\xF5\x2E\xF5\x2E\xF5\x1E\xF5\x1E\x75\x85\xBA\x42\xFD\xA7\xFA\x4F\xF5\x7E\xF5\x7E\x35\xAE\xC6\xD5\x95\xEA\x4A\xB5\x51\x6D\x54\x1F\x54\x1F\x54\x1F\x56\x1F\x56\x57\xA9\xAB\xD4\x47\xD5\x47\xD5\xC7\xD5\xC7\xD5\x27\xD5\x27\xD5\xA7\xD5\xA7\xD5\x67\xD5\x67\xD5\x35\xEA\x1A\xF5\x79\xF5\x79\x75\xBD\xBA\x5E\x7D\x51\x7D\x51\x7D\x59\x7D\x59\x7D\x55\x7D\x55\xDD\xA8\x6E\x54\x5F\x57\x5F\x57\xDF\x54\xDF\x54\xDF\x52\xDF\x52\x37\xAB\x9B\xD5\x77\xD4\x77\xD4\xAD\xEA\x56\x75\xBB\xBA\x5D\xDD\xA9\xEE\x54\xDF\x57\xDF\x57\x3F\x54\x3F\x54\x3F\x56\x3F\x56\x9B\xD5\x66\xF5\x53\xF5\x53\xF5\x3B\xF5\x3B\xF5\x07\xF5\x07\x75\x8F\xBA\x47\x2D\x89\x97\xC4\x87\xC7\x87\xC7\x47\xC6\x47\xC6\x47\xC7\x47\xC7\xC7\xC6\xC7\xC6\xC7\xC7\xC7\xC7\x27\xC6\x27\xC6\x27\xC7\x27\xC7\xA7\xC4\xA7\xC4\xCF\x8A\x9F\x15\x3F\x3B\x7E\x76\xFC\xDC\xF8\xB9\xF1\xF3\xE3\xE7\xC7\xA7\xC5\xA7\xC5\x2F\x8C\x5F\x18\xBF\x28\x7E\x51\x7C\x66\x7C\x66\x7C\x56\x7C\x56\xBC\x22\x5E\x11\xAF\x8C\x57\xC6\xAB\xE3\xD5\xF1\xD9\xF1\xD9\xF1\x60\x3C\x18\xAF\x8F\xD7\xC7\xAF\x88\x5F\x11\xBF\x32\x7E\x65\xFC\xEA\xF8\xD5\xF1\x25\xF1\x25\xF1\xFF\x8F\xBD\x2F\x81\x8F\xA2\xC8\x1E\xAE\x9A\x99\x1C\x93\xC9\x64\x26\x10\x72\xCC\x04\x99\x18\xC8\xA4\x03\x8C\x43\x40\x84\x10\x42\x38\x45\xE5\x88\x04\x41\x0E\x19\x3A\x33\x9D\xA4\xC9\x4C\xF7\xD0\xDD\x03\x89\xAB\x10\x0E\x0F\xF0\x62\x5D\x45\x3C\x56\xF1\xD8\x74\x12\x3C\xF0\xC6\x5D\x84\xE8\x0A\xE8\xAE\xAE\xAC\x8B\xAE\xA2\xAE\x88\x8A\x27\x82\xF7\xC9\xCC\xF7\xEB\xEE\xEA\x99\xEE\x9E\x99\x24\xAC\x06\xF7\xF3\x2F\xFE\xCA\x49\xBD\x7A\x55\xF5\xAA\xEA\xD5\x7B\xAF\xAE\xD7\x1B\xDB\x36\xB6\x5D\xDD\x76\x75\xDB\xB5\x6D\xD7\xB6\x6D\x6A\xDB\xD4\x76\x7D\xDB\xF5\x6D\x37\xB4\xDD\xD0\xB6\xB9\x6D\x73\xDB\x96\xB6\x2D\x6D\xB7\xB4\xDD\xD2\x76\x3A\x7F\x3A\x3F\x98\x1F\xCC\x97\xF0\x25\x7C\x29\x5F\xCA\x97\xF1\x65\xFC\x30\x7E\x18\xEF\xE2\x5D\x7C\x39\x5F\xCE\x8F\xE2\x47\xF1\xA3\xF9\xD1\xFC\x18\x7E\x0C\x5F\xC1\x57\xF0\x95\x7C\x25\x5F\xC5\x57\xF1\xD5\x7C\x35\x3F\x89\x9F\xC4\x4F\xE1\xA7\xF0\xD3\xF8\x69\xFC\x74\x7E\x3A\x7F\x2E\x7F\x2E\x3F\x83\x9F\xC1\xCF\xE2\x67\xF1\x35\x7C\x0D\x3F\x87\x9F\xC3\xCF\xE5\xE7\xF2\xF3\xF8\x79\xFC\x85\xFC\x85\xFC\x42\x7E\x21\xBF\x98\x5F\xCC\x2F\xE1\x97\xF0\x4B\xF9\xA5\x7C\x1D\x5F\xC7\xFB\x78\x1F\x5F\xCF\xD7\xF3\x8D\x7C\x23\xBF\x8C\x5F\xC6\xFB\x79\x3F\x4F\xF1\x14\xDF\xC2\xB7\xF0\xBF\xE3\x7F\xC7\x5F\xCA\xAF\xE2\x5B\xF9\x56\x7E\x2D\xBF\x96\x5F\xCF\xAF\xE7\x2F\xE7\x2F\xE7\xAF\xE2\xAF\xE2\xAF\xE1\xAF\xE1\x37\xF1\x9B\xF8\xEB\xF9\xEB\xF9\xCD\xFC\x66\x7E\x0B\xBF\x85\xBF\x85\xBF\x85\xBF\x8D\xBF\x8D\xBF\x9D\xBF\x9D\xDF\xCA\x6F\xE5\xEF\xE2\xEF\xE2\xEF\xE1\xEF\xE1\xDB\xF8\x36\xBE\x9D\x6F\xE7\x3B\xF9\xFB\xF9\xED\xFC\x23\xFC\x63\xFC\x63\xFC\x0E\x7E\x07\xFF\x67\xFE\xCF\xFC\x4E\x7E\x27\xBF\x8B\xDF\xC5\x3F\xCD\x3F\xCD\x3F\xC3\xEF\xE3\x9F\xE3\x9F\xE3\xBF\xE2\xBF\xE2\x8B\x22\x15\xAA\xF7\xD7\xB2\xDF\x0A\xF9\x7E\xA3\x7C\xAF\x4F\xF6\x43\x11\xFD\x16\xDA\x01\x08\x0A\x0E\x40\x30\xEA\x00\x04\x33\x0F\x40\xD0\x78\x00\x82\x35\xE8\xAE\x51\x6F\xEF\x1B\x3D\x7A\x00\x82\xFD\x07\x20\xF8\xF0\x00\x04\x86\x97\xBB\xBF\x67\x34\xA6\x97\xF7\x8C\xFC\x2F\x43\x70\xD9\xCB\x10\x6C\x7D\x19\x82\x27\x5E\x86\xE0\xC0\xCB\x10\x1C\x7D\x19\x82\xF4\x57\x20\x28\x7A\x05\x82\xCA\x57\x20\xB8\xE0\x15\x08\x82\xAF\x40\xB0\x41\x71\x1F\xE9\x30\xBA\x87\x54\x88\xEE\x1F\xCD\x43\xF7\x8E\x36\xA1\xFB\x46\x7F\x43\x41\xF6\xAF\x31\xA3\xC5\x2B\xBE\x23\x91\xF7\x35\x66\xB4\xF8\xC4\xB8\xBC\x2F\x30\x01\x85\x99\xDA\xEB\x1E\x7A\x5A\xBF\xCE\xB0\xC1\xF0\xFB\x94\x27\x53\x76\xA7\xEC\x4E\x79\x2A\xE5\xE9\x94\x67\x52\xF6\xA4\xEC\x4B\xD9\x97\x62\x4D\xB5\xA5\x9E\x97\xEA\x11\xDF\xD9\xBC\x92\xFA\x46\xEA\x5B\xA9\xEF\xA6\xBE\x9F\xFA\x41\xEA\x47\xA9\x47\x53\xB3\xD3\xB2\xD3\xDC\x69\xE7\xA6\xB5\xA5\x6D\x4F\xFB\x38\xED\x8B\xB4\x13\x69\x27\xD2\x72\xD3\x0B\xD2\xED\xE9\xC5\xE9\x43\xD2\x9D\xE9\x58\xFA\xF0\xF4\x05\xE9\x8B\x90\xEF\x0C\xBD\xB1\xC2\x58\x69\xAC\x32\xCE\x36\x9E\x6F\x9C\x6F\xAC\x33\x7A\x8D\x6B\x8C\xEB\x8C\x3B\x8D\x3B\x8D\xBB\xA2\x3E\x15\x5E\x10\x7D\x2A\x1C\x36\xBE\x63\x0C\x1B\xC3\x46\x98\xA1\x4F\xF8\x96\x2B\x98\xB1\x3C\x23\x94\x11\xCA\x58\x93\xB1\x2E\x63\x67\xC6\xCE\x8C\x5D\x8A\x37\x68\x87\x33\xDE\xC9\xF8\x3E\x23\x92\x01\x4D\x7A\x53\xA5\xA9\xD2\x54\x65\x9A\x62\x3A\xDB\x34\xDD\x74\x9E\x69\xA6\xE9\x02\xD3\x7C\x53\x9D\xC9\x6B\x5A\x6B\x5A\x6B\xDA\x65\x7A\xDA\xB4\xD7\xF4\xAC\xE9\x6F\xA6\x17\x4C\xFF\x36\xFD\xDB\x04\x32\x0D\x99\x55\x99\x53\x32\xA7\x65\x4E\xCF\x3C\x37\x73\x66\xE6\x05\x99\xF3\x32\xEB\x32\xBD\x99\x6B\x32\xD7\x65\xEE\xCC\xDC\x99\xB9\x4B\xF3\x5E\xE6\x70\xE6\x3B\x99\xC0\xAC\x37\x8F\x33\x57\x9A\xAB\xCC\x53\xCC\xD3\xCC\xD3\xCD\xE7\x9A\x67\x9A\xE7\x9B\xE7\x9B\xEB\xCC\x5E\xF3\x1A\xF3\x3A\xF3\xDF\xCC\x7F\x33\xBF\x68\x3E\x60\x7E\xC5\xFC\x8A\xF9\x55\xF3\x5B\xE6\xCF\xCD\x5F\x98\x47\x64\x8D\xC8\x1A\x95\x55\x91\x75\x76\xD6\xAC\xAC\x07\xB3\x1E\xCC\x7A\x24\x6B\xA7\x78\xFF\x28\xDF\x52\x60\x39\xD3\x72\xA6\xE5\x2C\xCB\x59\x96\xB1\x96\xB1\x96\x2A\xCB\x04\xCB\x72\xCB\x7A\xCB\xE5\x96\x2B\x2C\xD7\x58\x6E\xB2\xDC\x6C\xD9\x69\xD9\x6B\xD9\x6B\x19\x6A\xAD\xB2\xCE\xB3\x2E\xB0\x2E\xB1\x2E\xB5\xD6\x59\x7D\xD6\x06\x6B\xC0\xBA\xDC\xCA\x59\xD7\x5A\xAF\xB1\x5E\x67\xBD\xCE\x7A\x8B\xF5\x8F\xD6\x8B\xFA\x7B\xFA\xF7\xCB\xCB\xC9\x2B\xCF\x1B\x95\x37\x27\xAF\x36\x8F\xC9\x63\xF3\x1E\xC9\xFB\x57\xDE\x9B\x79\x6F\xE6\x99\xF2\xCD\xF9\x97\xE5\x5F\x9E\xDF\x91\xDF\x91\x2F\xDF\x19\xCA\xB3\xD9\x6D\x17\xD8\x96\xD8\x96\xDA\x2E\xB1\xAD\xB6\xAD\xB6\x6D\xB7\x75\xD9\x80\xDD\x60\x1F\x65\x9F\x62\x6F\xB2\xB3\xF6\x56\xFB\x5A\xFB\x9D\xF6\xFB\xEC\x47\xEC\x5F\xD8\x07\x17\x9E\x55\x28\xBD\xB3\x8A\xBD\xB1\xFA\x46\xF1\xC6\x6A\xBF\xE3\xB8\xE3\xD8\xB0\xCF\x86\xAD\x1E\xBE\x7A\xF8\xA1\xE1\x91\xE1\x65\xEE\x33\xC4\xB7\x56\x74\x1B\xD3\xC6\xB5\x5D\xDA\x76\x7B\xDB\x1D\x6D\xC7\xDB\x3E\x6B\xD3\xB5\xEB\xDA\x53\xDB\x53\xDB\x4D\xED\xA6\xF6\xE2\x76\xA7\xA0\x22\xDA\x5B\xDB\xD7\xB4\x3F\xD2\xBE\xA7\xFD\x50\xFB\x67\xED\x91\xF6\x48\x7B\x49\xC7\xF0\x8E\xB3\x3B\x6A\x3B\x5A\x3B\xD6\x75\x3C\xDC\xD1\xD5\xF1\x5E\xC7\x7B\x1D\x58\xE7\xE8\xCE\xC9\x9D\x93\x3B\x67\x74\xCE\xEC\xFC\x5D\xE7\xA5\x9D\xDB\x3B\xB7\x77\x3E\xD4\xF9\x48\xE7\xE3\x9D\x3B\x3A\x77\x75\xEE\xEE\x7C\xAA\xF3\xA9\xCE\x8F\x3B\x8F\x75\x7E\xD5\xF9\x75\xE7\x3B\xDB\x3E\xDA\xF6\xC9\xB6\xA3\xDB\x4E\xFB\xE1\xB4\x1F\xA4\xB7\x5E\x67\x88\x3E\x5A\xB4\xEF\xBD\x56\xE8\x21\xB8\x58\x0F\x01\x4C\xF0\x2E\xC9\xA2\x79\x87\x24\x84\xF7\x32\x20\x38\x92\x01\xC1\x60\xF4\xCE\x6E\x5B\x16\x04\xF7\x66\x41\x30\xCD\x02\x41\x8D\x05\x8A\xFE\x3A\x74\x56\x08\xC6\x58\x21\x98\x86\x7C\x75\xAC\xB5\x42\xB0\xDD\x0A\xC1\x9F\xAD\x50\xF4\x07\xA1\xCB\x86\xC0\x89\xFC\x42\x4C\xCD\x86\x60\x5A\x36\x04\x2C\x0A\xAD\xD9\x10\xAC\xCD\x86\xE0\xE1\x6C\x08\xBA\xB2\x21\x78\x2E\x1B\x82\xE7\xB3\x21\xF8\x47\x36\x04\x2F\x66\x43\x30\xAC\x1F\x04\x67\xA1\xB7\x49\x42\x78\x0B\xBD\x47\x92\xDF\xB9\x8E\x43\x6F\x57\xAB\x92\xBC\x5F\x95\xDF\xAD\xD6\x25\x79\xB7\x7A\x66\x0E\x04\xD3\x72\x20\x58\x82\xC2\xF6\x1C\x08\xFE\x9A\x03\xC1\x03\x03\x20\x78\x74\x00\x04\x3B\x06\x40\xD0\x35\x00\x82\x37\x06\x40\xF0\xE6\x00\x08\xDC\xB9\x10\x54\xE7\x42\xB0\x2D\x17\x82\xC7\x73\x63\x7E\x79\x86\xE5\x43\x50\x91\x0F\x81\x3B\xC1\x7B\xEA\xAA\x02\x08\xAA\x51\x98\x54\x00\xC1\xE4\x02\x08\xFE\x59\x00\xC1\xBF\x0B\x20\x38\x58\x00\xC1\xA1\x02\x08\xDE\x45\x01\xDA\x20\xC8\xB0\x41\x30\xD2\x06\xC1\x58\x1B\x04\xE3\x6C\x10\x54\xD9\x20\x38\x1B\x85\xF3\x6D\x10\x2C\xB2\x41\xB0\xD1\x06\xC1\xCD\x36\x08\xCE\x50\xF8\x59\x99\x50\x08\xC1\xF5\x85\x10\xF0\x85\x10\x74\x14\x42\xF0\x58\x21\x04\x23\x06\x42\x30\x7A\x20\x04\x15\x28\x54\x0E\x84\x60\xFC\x40\x08\x26\x0C\x84\x60\xEA\x40\x08\xCE\x46\x61\x63\x82\x77\xA9\x37\x0D\x84\xE0\x8B\xD3\x20\xF8\xFA\x34\xC9\x9F\xCC\x97\xCB\x20\x70\x37\x41\x30\xBA\x09\x82\xD9\xB4\x14\xCE\xA7\x21\xB8\x82\x86\xE0\x3A\x1A\x82\xEB\xD1\x5B\x34\x21\xC8\xEF\x46\xFF\xB8\x13\x82\xDB\x77\x42\x40\xA0\xF7\x85\x81\x7F\x42\xC0\x74\xF3\xCE\x70\xD2\x4B\x10\x4C\x79\x09\x02\x70\x10\x82\xD1\x07\x21\x18\x77\x10\x02\xFF\x41\x08\x42\x28\xAC\x47\xE1\xD6\x83\x10\xDC\x71\x10\x82\x3B\x0F\x42\xF0\xC0\x41\xD8\xEB\x77\xD3\xEE\xB7\x21\x18\xFD\x36\x04\x9F\x1C\x86\xE0\xD8\x61\x08\xF6\x7F\x00\xC1\x2B\x1F\x40\x30\xE5\x43\x08\xCE\xFD\x30\xF6\xCE\x71\x26\xDE\x88\x2F\xC3\x29\xF1\x19\x5C\x0D\xDA\x73\x9D\x89\x37\xE1\x2C\xCE\x88\x7B\x6E\xDF\x9E\x26\xC4\xFD\x78\x0B\xAE\xF1\xE9\x14\x3D\x9F\x92\xE5\xE4\x6C\xF3\x5C\xB3\xD7\x5C\x6F\x5E\x6D\x9E\x89\x53\x3E\x9C\xF4\x82\xEA\xF4\x45\xE9\x4B\xD2\x97\xA4\xCF\xC4\x29\xD2\xDB\x88\x13\xCA\xF7\x90\x5D\x68\xBE\x7D\x9C\x01\xC1\xD7\x19\x42\x1D\x8C\xB7\x91\x88\x21\x04\xD1\xBE\xA1\x76\x9C\x67\x0A\x94\xE1\x81\xB8\xF7\xC7\x89\xF6\x17\x93\xF1\x83\xC0\x07\x35\x03\x21\x58\x30\x10\x82\x99\xE8\x85\xAA\xDE\xA8\x37\x8E\x33\x8E\x33\x56\x19\xAB\x8D\xE7\x18\x67\x18\x67\x19\x67\x1B\xD7\x1A\xD7\x19\x77\x19\xBB\x90\x1E\x7B\x5E\xD4\x63\xFA\x0C\x7D\x46\x55\x46\x75\xC6\xBA\x8C\x75\x19\xBB\x32\xBA\x32\x9E\x13\x75\xD4\xF3\x19\x3A\x93\xDE\x54\x65\xAA\x32\x55\x9B\xAA\x91\x6E\x9A\x61\x9A\x6F\x9A\x6F\xDA\x65\xDA\x6D\x7A\x2A\xAA\x97\x9E\x17\xF5\x12\xCC\xD4\x67\x4E\xCC\x9C\x92\xB9\x36\x73\x5D\xE6\xAE\xCC\x5D\x99\x5D\x99\xCF\x64\xEE\x13\xF5\xD0\xDF\x45\x3D\xA4\x33\xEB\xCD\x55\xE6\x6A\xD4\xAF\x33\x44\xFD\xB3\xD6\xBC\xCE\xFC\xA2\xF9\x9F\x51\x9D\x23\xE8\x92\xD5\x96\xD5\x96\xA1\xD6\x61\xD6\x11\xD6\x11\xD6\x31\xD6\x31\xD6\x71\xD6\x4A\xEB\x3C\xEB\xFC\xA8\x0E\x59\x67\x5D\x6F\xBD\xC2\x7A\x35\xD2\x21\xB7\x59\x1F\xCB\x7B\x2C\x6F\x57\xDE\x9E\xBC\x7D\x79\xCF\xE6\x15\x17\x94\x14\x60\xA2\xAE\x18\x51\x30\xB2\x60\x4C\x41\x81\xCD\x66\xBB\xC0\x76\x81\x6D\xBE\x6D\xBE\x0D\xB7\xE1\x36\xAF\xCD\x67\x0B\xD8\x18\x9B\xC1\x6E\xB0\x9F\x69\x3F\xD3\x3E\xCE\x3E\xCE\x3E\xDE\x3E\xD1\x3E\xD9\x3E\xC5\xBE\x56\xD4\x1D\x77\xDA\xDB\xEC\xBC\xBD\xD3\xDE\x69\x7F\xDF\xFE\xBE\xFD\x23\xFB\x27\xF6\x4F\xED\x9F\xDA\x3F\x17\xF5\xC9\xD0\xC2\x51\x85\x67\x8A\xBE\x61\xBE\x29\xFC\xA6\xD0\xE5\x3E\xC3\x5D\xDC\x3E\xB8\xDD\xD9\xEE\x44\xFA\xE0\xAF\xED\x73\x3A\x6A\x3B\xD6\x75\xAC\xEB\x78\xA4\xE3\xD1\x8E\x3F\x77\xFC\xA5\x63\x57\x47\x57\xC7\x19\x9D\xEE\xCE\x91\x9D\xA3\x3A\x67\x8A\x7A\x40\xD0\x04\x97\x76\x7E\xDC\xF9\x71\xE7\xA7\xA2\xEC\xFF\xAA\xF3\x9D\x6D\xEF\x6E\x3B\xB2\xED\xFD\x6D\x1F\x8A\x3A\xE0\x13\xD5\xFB\x77\x41\x3E\xCB\x32\x5A\x87\x64\xB3\x2C\x9F\x1F\xB2\x42\xF0\xB8\x15\x82\x1D\x56\x08\x86\x65\x4B\x41\x96\xCF\x6B\x15\x72\xF9\xD1\x6C\x08\x76\x23\xF9\xFC\x0F\x14\x04\xB9\xEC\xEA\x07\x41\x79\x3F\x08\x46\xF6\x83\xE0\x4C\x14\x0E\xF5\x83\xE0\x70\x3F\x08\x74\x48\x3E\x0B\x72\x79\x42\x7F\x08\x26\xF6\x52\x3E\x0B\xF2\xF8\xAC\x1C\x08\xAA\x73\x20\x98\x98\x03\xC1\x54\x14\x04\xB9\xFC\x50\x0E\x04\x4F\xA0\xF0\x97\x1C\x08\x76\xE5\x40\xF0\x14\x0A\x82\xBC\x7E\x10\xC9\xEB\xBF\x0C\x80\x60\x17\x0A\x82\xBC\x2E\xCF\x85\x60\x5C\x2E\x04\x95\xB9\x10\x54\xA1\x70\x2F\x0A\xF7\xE7\x42\xF0\x40\x2E\x04\x8F\xA1\xE0\xC8\x83\xA0\x28\x0F\x82\x12\x14\x04\xB9\xEE\xCA\x87\x60\x0C\x0A\x5A\xF9\x3E\x1E\x05\xA5\x7C\x9F\x84\xE4\xFB\xBF\x0A\x20\x78\xA3\x00\x82\xB7\x14\xF2\x7D\x2C\x92\xEB\xF3\x6D\x10\x5C\x68\x83\xE0\x26\x14\x04\x39\x2E\x84\x2A\x14\x3A\x50\x78\x10\x85\x47\x50\x90\xE5\xB4\x20\x9F\x6F\x18\x08\xC1\x66\x14\xBE\x42\x72\x5A\x96\xC7\xB2\xFC\x25\x90\xFF\x1C\x59\xEE\xCE\xD4\x7C\xAA\xF6\x4A\xC3\x06\xC3\x2E\xDB\x2E\xDB\x9B\x8E\x43\x8E\xC3\x8E\x77\x1D\xC1\x36\xA6\x6D\x26\xE1\x23\xEA\x71\xD2\xC7\x90\x94\xB8\xDE\xB8\x85\x12\xF2\x11\x1C\x41\x7A\x66\xE2\x2D\x44\x13\x00\x87\x3A\xBF\xEE\xEC\xDA\x76\x74\xDB\xF1\x6D\xDF\x6D\x9B\x49\x50\x3E\xC2\x73\x1E\xD9\x84\x37\x85\xC8\xE8\x7E\xFD\x3E\x24\x57\x67\x12\x0C\x4D\x72\xA4\xD7\x33\x39\xC4\xB0\xE4\x0A\xED\x87\x72\xC1\x56\x74\x7E\xB7\xD3\x08\xC1\x8B\x46\x08\x5E\x32\x42\x10\x31\x2A\xF2\x69\xFC\x85\xB4\x1A\x21\xB8\x43\x48\x27\xD1\xC5\x77\x74\x3E\x31\x5B\xA1\x87\xEE\xA0\x21\x98\x49\x29\xED\xFD\x2B\xFA\xD8\xDE\xD7\x19\x2B\x44\x8B\xBF\xD2\x38\xD1\x38\xDD\x38\xD3\x38\x53\x61\xF7\xAF\x11\xED\xFE\xA7\x8C\xCF\x18\x5F\x30\xBE\xA0\xB0\xF7\x75\xA2\xBD\x3F\x31\xA9\xBD\xBF\x56\xB4\xF7\x9F\x12\xED\xFD\x67\x33\x5E\x50\xD9\xFB\x50\xB4\xF7\x27\x98\x26\x98\x26\x9A\xA6\x98\x66\x8A\xF6\xFE\xBC\xA8\xBD\xDF\x65\xEA\x32\xBD\x60\x7A\xC1\x04\x32\x05\x4B\x5F\xB0\xF5\xAB\x13\xD8\xFA\x6B\x44\x5B\x7F\x77\xE6\xEE\xCC\xBD\x99\x7B\x33\x9F\xCF\x7C\x01\xD9\xF9\x50\xB4\xF3\x27\x9A\xA7\x98\x67\x9A\x67\x22\xFB\x7E\x8D\x68\xDF\xBF\x84\xEC\xFB\xDE\xDA\xF5\xCB\x2D\xAB\x2C\xAD\x09\xEC\xFA\xE1\x56\xB7\xB5\xDC\x7A\x96\x75\xAC\xB5\xC2\x3A\xDE\x5A\x65\xBD\x10\xD9\xF8\x92\x6D\xBF\xD6\x7A\x99\xF5\x72\xEB\x35\xD6\x6B\xAC\x7F\x4C\x6A\xDF\x3F\x9A\xF7\x78\xDE\x93\x79\x7B\xF3\xF6\xE6\x3D\x97\xD0\xD6\x3F\xBD\xC0\x59\x50\x5A\x50\x5E\x50\x5E\x30\x56\xB4\xF9\xF3\x6D\x76\x9B\xDD\x36\xCF\x36\xCF\x76\xA1\x68\xFB\x2F\xB5\xD5\xD9\xEA\x6C\x84\xCD\x6F\x63\xA3\xEB\x80\x27\x6D\xBB\xC5\xB5\x80\xDE\x3E\xCA\x3E\xCA\x3E\xDA\x5E\x61\xAF\xB4\x57\xDA\x27\xD9\x27\xA1\x75\xC1\x1A\xFB\x5D\xF6\x3F\xD9\xDB\xED\x1D\xF6\x6D\xE2\xFA\xE0\x88\xFD\x03\xFB\x87\xF6\xA3\xF6\xA3\xF6\x63\xF6\xCF\xEC\xC3\x0A\x47\x16\x8E\x8E\xAE\x17\x92\xFB\x65\xD8\xEF\x78\xC3\xF1\xB6\xE3\x6D\xC7\x7B\x71\x6B\x87\xE1\x68\xED\x40\x27\x5D\x3B\x0C\x69\x2F\x11\xD7\x0E\xCF\xC4\xAD\x1B\xCE\xEF\x68\xED\x58\xDB\xF1\x70\xC7\xC3\x1D\x8F\x75\x3C\xD1\xB1\xB3\xE3\x49\xB4\x7E\x70\x75\x8E\xE8\x2C\xEF\x3C\x33\xBA\x8E\x98\xD1\x79\x49\xE7\x25\x09\xD7\x11\x9F\x74\x1E\xED\xFC\xBA\xF3\xEB\xCE\xF7\xB6\xBD\xB7\xED\x83\x6D\x1F\x6C\x3B\x9A\x60\x2D\x71\x2A\xD6\x13\xD0\x2A\x05\xE5\x7A\x62\x8D\x15\x82\x87\xAD\x10\x3C\x66\x85\xE0\x89\x04\xEB\x8A\xA1\xD9\x10\x0C\x47\xEB\x0B\xE5\xBA\x62\x4D\x36\x04\x8F\x65\x43\xB0\x4B\xB1\xAE\x78\x11\x85\x33\xFA\x41\x30\xA2\x1F\x04\xA3\x50\x18\x9D\x60\x9D\x21\x84\x77\x14\xEB\x0D\xA8\x58\x6F\x54\xA3\xA0\x5D\x4F\x8C\xC9\x81\x60\x42\x0E\x04\x93\x72\x20\x98\x82\xD6\x16\xCA\xF5\xC5\xC3\x39\x10\xEC\xC8\x81\xE0\xCF\x28\xEC\xCE\x81\xA0\x2B\x07\x82\xA7\xD1\xBA\xE3\x21\xB4\xEE\xD8\x39\x00\x82\x27\x07\x40\xB0\x5B\xB3\xFE\x18\x99\x0B\x41\x45\x2E\x04\xE3\x51\x98\xA0\x58\x8F\x08\xE1\x3E\x14\xB6\xE7\x42\xF0\x28\x5A\xA3\xC8\xEB\x94\x41\x79\x10\x9C\x9E\x07\xC1\x90\x3C\x08\x9C\x68\xDD\x72\x46\x3E\x04\x67\xE5\x43\x30\x16\xAD\x5F\x84\xF5\x4A\x25\x5A\xB3\x54\xA1\xB5\x8A\x10\x0E\x28\xD6\x2B\xAF\xA3\x35\xCB\x21\xCD\x7A\x65\x4C\x92\xF5\xCA\x3C\x1B\x04\x0B\x14\xEB\x96\xCD\x36\x08\xB6\xA0\xF5\x8B\x5B\xB1\x7E\x19\x8F\xD6\x30\xCA\x75\x4C\x67\x21\x04\xDB\x0B\x21\x78\xA8\x10\x82\x87\x0B\x21\x78\xF4\x24\xD7\x35\xF2\x7A\xE6\x46\x14\x6E\x52\xAC\x6B\xBE\xEC\x66\x5D\xA3\x5D\xCF\xC8\xEB\x98\x9E\xFC\xA4\xFC\xCF\xAC\x5F\x68\xB4\x0C\x40\xF7\x26\x6A\x72\x21\x58\x90\x0B\xC1\x4C\x9A\x6A\x10\xFD\xC6\xCA\x13\x3A\x1F\xE6\x1B\xF2\x0D\xF9\xA9\xF9\x59\xF9\xD6\xFC\x82\x7C\xC9\x47\x41\x67\xFE\xD2\x5C\x08\xFC\x02\x3E\xA3\xBE\x10\x29\xDF\x1B\x58\xBA\x0C\x02\x72\x19\x04\xD4\x32\x08\xE8\x65\x10\xCC\x0C\xF9\x39\x61\xB9\xD4\x9B\x7B\x26\x1D\xFD\x20\x98\xD9\x82\x53\x01\x9C\x01\xC0\x7A\x87\xF5\x50\x47\xB8\x63\x69\xE7\xEA\xCE\x59\xE8\x7B\x7D\x0F\x81\x87\xC1\x13\xE0\x09\xB0\x13\xEC\x02\x4B\x53\xC9\xD4\xE3\xA9\xDF\xA5\x76\xA5\x3D\x97\x56\x6F\xA4\x8D\x47\x8C\xC7\x8C\x5F\x1A\xBF\x33\xD6\x67\xD0\x19\x47\x32\x8E\x65\xD4\x9B\x68\x13\x63\x5A\x69\x3A\x62\xFA\xDC\x54\x9F\x49\x67\x36\x67\xAE\x12\xFD\xBA\x5C\x68\x5E\x62\xAE\x37\x37\x9B\x8F\x98\x8F\x99\x6B\xB2\x16\x88\x6F\xDE\x1C\x96\x91\x96\x6A\xEB\x39\xD6\x4D\xD6\x9B\xAD\x64\xFF\x4B\xFA\x7F\x9A\x7B\x3C\x57\xF6\x8D\x22\xB4\x7D\x5A\xC1\xEC\x82\xFD\x05\x07\x0B\x5A\x6D\x1B\x6C\x9B\x6C\x37\xDB\x6A\xEC\x0B\xEC\xDB\xED\x4F\xD8\xAB\x0B\xCF\x29\xAC\x29\x5C\x50\x18\x14\xFD\x36\xB6\x38\x5A\x1D\x1B\x1C\x35\x45\x6B\x8B\x2E\x2B\xDA\x50\xB4\x74\xF0\xAD\x83\x3F\x1A\x1C\x19\xBC\xC2\xF9\x07\xE7\x89\x61\x27\x86\xC5\xFC\x29\x55\xB8\xAF\x1F\x71\xE3\x08\x47\x39\x56\x3E\xBD\x7C\x76\xF9\xF9\xE5\x9E\xF2\xD6\xF2\x0D\xE5\x0F\x96\xEF\x2E\x77\xB4\x61\x6D\x47\xDA\x8E\xB5\xB9\xDB\xCF\x6C\xDF\xDF\xFE\x5A\x3B\xE8\x30\x8A\x9E\x6E\x8E\x77\x7C\xD7\x51\xD3\xB9\xA0\x53\xB0\xAA\xAC\x91\x02\x71\x5F\x58\xF6\xA3\x58\x0D\x21\x68\x86\x10\x6C\x84\x10\x5C\xA5\xF0\x9F\xE8\xD0\x43\x50\xAC\x87\x60\x22\x0A\xE7\xA2\xF0\x4F\x3D\x04\x2F\xEB\x21\xD8\x6A\x80\xA0\xC3\x00\xC1\x85\xC8\xBF\x50\x4B\x3A\x04\xAB\xD3\x21\xE0\xD3\x21\x78\x40\xE1\xFF\x28\xD7\x08\x81\x1D\xD9\x61\x4F\x1A\x21\xE8\xD2\xD8\x63\xD5\xC8\x2F\xCD\xA5\x19\x10\xAC\xCA\x80\xE0\x8F\x19\x10\xDC\x81\xD6\xA9\xC7\x32\x62\xFE\x7B\x64\xBF\x3F\x1D\x26\x08\x1E\x30\x41\xF0\x7D\x26\x04\x91\xCC\x98\x7F\x26\xD9\x5F\xF0\x40\x0B\x04\x25\x16\x08\xCE\xB7\x40\x30\xD7\x02\xC1\x1E\xE4\xE3\x53\xF6\xFB\x7A\xDC\x0A\xC1\x77\x56\xC9\x8F\xE8\x84\x6C\x08\xF6\x67\x43\xF0\x5A\x36\x04\x6F\x67\x43\xF0\x65\x76\xCC\x1F\x4D\x4D\x0E\x04\x0B\x72\x20\xD8\x9F\x03\xC1\x6B\x39\x31\x3E\xEF\xCA\x85\xE0\xB9\x5C\xC9\x5F\xF2\xB8\x3C\x08\x0E\xE5\x43\xF0\x79\x7E\xCC\xAF\x9D\xEC\x8F\x56\x5E\xEF\xCA\xFE\xB7\xBA\x06\x41\x70\x60\x50\xCC\xFF\x9F\xCC\xE7\x35\x4D\x10\x2C\x68\x82\x60\x51\x13\x04\x78\x13\x04\xAD\x14\x04\x9B\xA9\x98\xDF\x49\xD9\xDF\xA4\xEC\xCF\xB1\xFA\x6D\x08\xCE\x79\x1B\x82\xE3\x87\x21\xF8\xEE\xB0\x64\x0F\xBF\xF8\x41\xCC\xCF\xF3\xF2\x4F\x20\xD8\xF6\x09\x04\xF7\x7D\x02\xC1\x03\x9F\x40\xF0\xA0\xC2\xFF\xE3\xF0\xA3\x10\x9C\x21\xFB\x81\xFC\x0C\x82\xCC\xCF\x62\xFE\x0D\x67\xE1\x75\x38\xA7\xD8\x32\x90\xEF\xC9\xC9\xE3\x39\x0B\xA7\x7C\xA4\xC2\x41\xA0\xF2\x5E\x93\x2C\xBF\x85\x75\xC8\x2C\xE4\x60\x4A\x98\x6F\x89\xE6\x98\x72\x7E\x1D\x31\x1D\x13\xE7\x96\x30\xAF\xEA\xCD\xB4\x66\x4E\x61\xD1\x39\x25\xCC\xA5\xD8\x3C\x7A\x2D\xE9\x3C\x12\xF8\xBF\x27\xBE\x8F\xAE\x0F\x10\xFF\xCA\xFC\x53\xDF\x03\x7F\x9C\x0C\x5F\x8C\x45\x7C\xF1\xA1\x86\x2F\x16\x24\xE0\x0B\x2D\x1F\x24\x1B\x67\x79\x7C\xA3\xE3\x45\xAC\xF4\xCC\xC5\x49\xCF\x8C\x90\xB4\xF4\x91\xEF\xE1\x0A\x72\xE6\x3F\x05\x6F\x15\xCC\x22\x56\xE2\x2A\x01\x9B\x03\xC1\xA2\x1C\x08\x2E\x42\xF7\xE4\x66\x35\xD1\xA0\x2B\xED\xFB\xB4\x13\x69\x91\xB4\x59\x7E\x00\x04\x99\xB5\x54\x94\x3D\x57\x16\x29\x65\x8D\x20\x4F\x00\xF2\x77\xC7\xC1\xE4\xF2\x40\xE6\xEB\x59\x48\xAC\x2B\x65\xAD\x20\x57\x05\x59\x7A\xDC\xF4\xB9\x49\x90\xA3\x82\x0C\x0D\x9A\x9B\xCD\x65\x96\x91\x16\x41\x5E\x0A\x72\xF2\x60\xC1\xC1\x02\xB5\x1C\xF4\x14\x6D\x48\x20\x07\x93\xC9\x3E\x41\xE6\x29\x9B\x2B\xCB\xB7\x50\x0F\xF2\xED\x97\x96\x5F\xBD\x91\x5B\x84\x35\x26\x9F\x2A\x90\xDC\xF9\x08\xC9\x9D\x85\x48\xDE\xC8\xF2\xA5\x27\x39\x22\xCB\x8B\x93\x95\x13\xB3\x42\x6C\x63\x48\xD1\xBF\x6F\xA3\xF7\x0E\xF2\x3D\xBC\x59\x2D\x24\xDE\x44\x50\x0D\x9E\x9A\x10\xEE\x6D\x0C\x11\x9E\xE9\x01\x9A\x6A\x50\x66\x90\xEE\xE1\xB9\xD1\xBB\x08\x99\xBF\x67\xBD\x0D\xC1\x6C\x21\x34\x34\xE2\x01\xD0\x9A\x7B\x5B\xEE\x6C\xBF\x67\x72\x23\xD9\x44\x02\x50\x53\xB8\xBA\x70\xB6\xDF\xE7\x99\x1E\xA2\x1A\x90\x2B\xEB\xE8\x3F\xAD\x5F\x12\xB9\x3F\x05\xFC\x73\x38\x5C\xE9\x09\x15\xAD\xD7\xA5\xF1\x1E\xAE\x87\xE0\x0C\xBD\x84\x37\x8B\x66\xB8\x46\x8F\xE8\xD1\x5E\x2C\xBB\x15\x8D\x9D\x90\x56\x43\x30\x01\xA1\x8C\x1A\xB4\x0E\x41\x30\x56\xA6\x42\x79\x7F\xF9\x65\x94\x5E\x4B\x37\x48\x87\x85\xE8\x8E\xAE\x04\x0B\xA9\xEA\x58\x9A\x01\xC1\x6A\x54\xC7\xDC\x10\xD3\x24\xD2\x89\xEE\xFE\xCE\x66\xC8\xA8\x07\x42\x68\xD2\x9B\x92\xBD\x95\xA8\xD4\x9C\xC7\x49\xEF\x26\xBC\xA6\x7A\xD3\x4A\xD3\x6C\x16\x6F\x10\x24\x82\xD6\x8F\xCD\x6C\x36\x80\x53\x0A\xFF\x86\xAD\xE8\xBE\xB5\x2C\x07\x6B\x34\xDD\x55\x04\x8A\xC1\x10\x50\x06\x86\x89\xDF\x51\x1E\x07\x26\x88\xFE\xAE\x2F\x12\xBF\x77\xFC\x3B\xF0\x3B\x70\x29\xB8\x14\xDC\x09\xEE\x04\x3C\xE0\xC1\x36\xB0\x0D\x3C\x06\x1E\x07\x7F\x11\xBD\x60\xEF\x06\xAB\xF4\xAB\x44\x5F\xD7\x0B\x53\x3C\x29\x1B\x52\x36\xA6\xEC\x4A\xD9\x95\xD2\x95\xD2\x95\xF2\xD7\x94\xBF\xA6\xEC\x4D\xD9\x9B\xF2\x45\xCA\x97\x29\xC6\xD4\x8C\xD4\xCC\x54\xB3\xE8\x07\xFB\xB4\xD4\x41\xA9\xCB\x52\x03\xA9\x07\x52\x0F\xA4\x82\x34\x73\xDA\x37\x69\xDF\xA5\xB9\xD3\xAB\xC4\xBD\x6C\x9F\x91\x30\x06\x8D\x41\xE3\x09\xE3\x09\xE3\x8A\x8C\x15\x19\xC7\x33\x8E\x67\xAC\xCC\x5C\x99\xB9\x3E\x73\x7D\xE6\x97\xE6\x2F\xCD\xB3\xB3\x66\x67\x2D\xCC\x5A\x94\x65\xB0\xF4\xB3\xE4\x58\x72\x2C\x15\x96\xF1\x96\xCB\x2C\x97\x59\xF6\x5B\x0E\x58\x5E\xB3\x1C\xB4\x9C\x6B\x9D\x6D\xFD\xC1\xFA\x83\x75\x69\xFF\xC6\xFE\x20\x07\xE4\x50\xB9\x54\xEE\xAD\xB9\xB7\xE5\xCA\xBE\xB5\x0F\xE4\xBD\x92\xF7\x6A\xDE\xC1\x3C\x90\x9F\x91\x3F\xA5\x60\x6A\xC1\x69\xB6\x41\xB6\xAD\xB6\x36\x5B\xBB\xED\x3E\xDB\x42\xFB\x52\xD1\xBB\xFD\xB8\xC2\x09\x85\xAB\x0A\x57\x17\x76\x15\xEE\x2B\xFC\x57\xE1\xBF\x0A\xAD\x0E\xA7\xC3\xED\x98\xEC\x98\xEA\x38\xDF\x51\xEB\x58\xE2\xB8\xD4\xB1\xCA\x71\x8D\xE3\x5A\x47\x7A\xB1\xA9\x18\x2B\x2E\x2B\x6E\x74\x86\x9C\x7B\x9C\x7B\x9D\x47\x9C\xC7\x9C\xEB\xB0\x2D\xD8\xAB\xD8\xEB\xD8\x8F\xD8\x09\xEC\xBB\x61\x3F\x0E\x0B\x0F\x8B\x0C\x0B\x0E\x0F\x0E\x07\x2E\x97\xCB\xED\x9A\xED\x9A\xE3\x9A\xE3\x82\x6E\xBD\x3B\xDD\x9D\xED\xCE\x71\x0F\x72\x0B\xFF\xC6\xBB\xC7\x23\x5F\xDB\x3F\xB8\xC3\xF7\x44\xEE\x31\xB7\x59\xDA\xD8\x36\xB6\x6D\x55\xDB\xAA\xB6\xCF\xDB\xBE\x69\xE3\xDA\x57\xB6\xFF\xA3\xFD\xC5\xF6\x6F\xDB\xBF\x6F\xFF\xB1\xFD\xC7\x76\x57\xC7\x19\x1D\x9E\x0E\x4F\xC7\x53\x1D\x2F\x88\x5E\x0D\x17\x77\x7A\x3A\xFF\xD3\xF9\x56\xE7\xF1\xCE\xCF\x3A\x3F\xDE\xF6\xB1\xC6\xEF\x36\x1E\xF6\x86\xBD\xE1\xC6\x70\x63\x78\x59\xB8\x29\x0C\x23\xFA\x48\x4A\x24\x23\x92\x19\xB1\x44\x6C\x11\x7B\x64\x90\xE8\x8B\x7B\x7C\x64\x42\x64\x42\x64\x51\x64\x51\xE4\xA2\xC8\x45\x11\x4F\x84\x88\xC8\xFE\xB7\xEF\xD0\x4B\x61\x3F\x0A\xB2\xBF\xF8\xF9\xC8\xD7\xE4\x20\xA3\x14\xE4\x73\x72\xD9\xDF\xE0\x6A\x14\x8E\xA3\xF3\x07\xF9\x3B\x04\x37\x9B\x20\xB8\xCD\x04\xC1\x7D\x59\x52\xB8\xC0\x02\xC1\x02\x0B\x04\x67\x23\x7F\xFD\x7F\xB1\x42\xB0\xD3\x0A\xC1\x2E\xE4\x9F\xB1\x3A\x1B\x82\xC9\xD9\x10\x70\xD9\x10\x84\xB2\x21\xD8\x93\x0D\xC1\xB3\xD9\x10\xBC\x80\xC2\xEB\x28\xBC\x99\x0D\xC1\x5B\xD9\x10\x8C\xE9\x07\xC1\x78\xB4\x36\x10\xC2\x79\x39\x10\xCC\xCE\x81\x60\xA1\x42\x1F\x0A\x61\x2F\x0A\x4F\x0D\x80\xE0\xDF\x03\x20\x98\x98\x0B\xC1\xE4\x5C\x08\xE4\xF5\x4A\x65\x1E\x04\x55\x79\x92\xAF\x62\xD1\x5F\x31\xF2\x67\x7C\x18\x85\x09\x36\x08\xA6\xD9\x20\xB8\xC5\x06\xC1\x6D\x36\x08\x6E\xB7\x41\x70\x97\x0D\x82\x89\xE8\x3B\x05\xC1\x42\x08\x96\x17\x42\xF0\x0D\x3A\xE3\x89\x0C\x92\x82\xEC\x07\x5A\x5E\xEF\x7C\x85\xC2\x59\x4D\x10\x8C\x6B\x82\x60\x0A\x0A\x37\xA1\xFD\xD2\xC3\xB4\x14\xEE\x40\xFE\x8F\xAF\x38\x08\xC1\x55\x07\x63\xDF\xF7\xA8\xC1\x1B\xF1\x95\x8D\x71\xD2\x57\xBC\x87\x3D\xB5\x29\xDE\xDE\xD4\xDE\xCB\xAE\xC1\xFD\x81\x16\x86\xA0\x04\x19\xB2\x14\xE9\xC4\x1A\x3C\xE4\x99\x4C\x52\x9E\xE9\x78\x28\x7A\xFF\xBC\x46\x23\x5F\x05\xB9\x30\xC1\x51\xED\x98\xEB\x98\xEB\x18\x19\x1E\x15\x9E\x19\x9E\x1D\x16\x78\xA8\x06\xD9\x83\xC3\xC1\x70\xB0\x11\xC9\x02\x61\x1E\xA6\xE6\xA7\xE6\x5B\x1D\x03\x1C\x79\xAE\x3C\x97\xCD\x65\x73\x55\xB8\xC6\xB9\xAA\x5D\xD5\xAE\x42\x77\xA1\x38\x07\x04\xFE\x1F\x11\x2E\x0F\x5F\x18\xBE\x50\xE4\x57\x73\xC4\x1C\x91\x79\xA4\x86\x00\x00\x03\x18\xB8\x08\x5C\x24\xCA\xA1\x71\x96\x71\x96\xF1\x96\xF1\x96\xDB\x72\x6F\xCB\x9D\xE6\x98\xE6\x58\xE5\x58\xE5\xB8\xD6\x71\xAD\xC3\x58\x6C\x2C\x36\x15\x9B\x8A\xCB\x8A\xCB\x8A\x49\x27\xE9\x6C\x72\x36\x39\x03\xCE\x80\x93\x76\xD2\xCE\xE5\xCE\xE5\x4E\xD6\xC9\x3A\x43\xCE\x90\x73\xAF\x73\xAF\xF3\x7D\xE7\xFB\xCE\x0F\x9D\x1F\x3A\x3F\x76\x7E\xEC\x3C\xEA\x3C\xEA\x3C\xE6\x3C\xE6\x5C\x8F\xAD\xC7\x2E\xC7\x2E\xC7\xAE\xC4\xAE\xC4\x36\x62\x1B\xB1\xAB\xB1\xAB\xB1\x6B\xB1\x6B\xB1\x4D\xD8\x26\xEC\x7A\xEC\x7A\xEC\x06\xEC\x06\x6C\x33\xB6\x19\xDB\x82\x6D\xC1\x5E\xC3\x5E\xC3\x5E\xC7\x5E\xC7\x4E\x60\x27\xB0\x62\x57\xB1\x6B\x88\x6B\x88\xCB\xE9\x72\xBA\x30\x17\xE6\x32\xBA\x8D\x6E\x93\xDB\xE4\x36\xBB\xCD\x6E\x8B\xDB\xE2\xCE\x76\x67\xBB\x07\xB8\x07\xB8\xF3\xDC\x79\xEE\x02\x77\x81\xDB\xEE\xB6\xBB\x4F\x73\x0F\x72\x57\x9D\xA8\x3A\x91\x1F\xCE\x0F\x8F\x0E\x8F\x0E\x8F\x09\x8F\x09\x57\x84\x2B\xC2\x95\xE1\xCA\x70\x55\xB8\x2A\x5C\x1D\xAE\x0E\x4F\x0A\x4F\x0A\x4F\x09\x4F\x09\x4F\x0F\x4F\x0F\x2F\x0C\x2F\x0C\x2F\x0E\x2F\x0E\x2F\x09\x2F\x09\x1B\x23\xC6\xC8\xF8\xC8\x78\x71\xAE\x2E\x8D\x2C\x8D\x78\x23\xDE\x48\x4D\xBD\x7A\x7C\x04\xD9\x5C\xE0\x28\x70\x0C\x74\x0C\x74\x54\x38\x2A\x1C\x7A\x97\xDE\x95\xE2\x4A\x71\x65\xB8\x32\x5C\x66\x97\xD9\x35\xD0\x35\xD0\x55\xE4\x2A\x72\xD5\x34\xE2\x0D\xAC\xA7\x06\x07\xA0\xBA\x7D\x65\x7B\x4D\x23\x4D\x50\xA4\x37\xC1\xF7\x46\x00\xB2\x8B\xE4\xF9\x5E\xA3\xF1\x78\x2C\xE8\x84\x7C\x47\xBE\xC3\xEE\x28\x74\x0C\x72\x0C\x72\x8C\x75\x8C\x75\xE8\x5C\x3A\x97\xC1\x65\x70\x19\x5D\x46\x57\xA6\x2B\xD3\x55\xE8\x2A\x74\x39\x5C\x0E\x57\x0D\x2D\xEB\x1B\x27\x28\x13\x75\xCE\x30\xE0\x52\xE8\x9D\xC5\x60\x71\x54\xD7\x08\x7A\x26\x5E\xC7\x6C\x38\x65\xFA\x25\x99\x6E\x11\xF4\x4A\xBC\x4E\x49\xC9\x4F\x3B\x09\xBD\x92\xEB\xC8\x73\x38\x44\xDD\x32\xC6\x31\xCE\x51\xE5\x98\xE8\x98\xEC\x38\x5B\xD4\x31\xB5\x8E\x0B\x1C\x4B\x1C\x6A\xFD\x01\x5D\xA9\xAE\x74\x97\xC9\x65\x72\x65\xB9\x72\x5D\xF9\xAE\x02\x97\xDD\x65\x77\x9D\xE6\x1A\xE4\x2A\x13\x75\xCB\x58\x57\xA5\x6B\x82\x6B\xA2\x6B\xA2\x6B\xB2\x42\xCF\x08\xBA\xE5\xE7\xD2\x2B\xD6\x70\x6E\xB8\x40\xD4\x2B\xEE\xF0\xD4\xF0\xB4\xF0\x39\xE1\x19\xE1\x1A\x51\xC7\xCC\x0F\x7B\xC2\xB8\x46\xC7\xA4\x45\x32\x44\x3D\x93\x19\xC9\x52\xE8\x9A\xCA\x48\x65\x04\x8F\xE0\x11\x9F\xA0\x63\x94\xFF\xFA\x58\xDF\xFC\xA6\x67\x7E\xA2\x9E\x61\xD5\xF3\xBD\x14\x94\x82\x45\x60\x91\x68\x1B\x56\x58\x2A\x2C\x95\x96\x4A\xCB\xAD\xB9\xB7\xE6\xDA\x1C\x36\xC7\x69\x8E\xD3\x1C\x53\x1D\x53\x1D\x97\x3A\x2E\x75\x5C\xE3\xB8\xC6\x91\x5E\x9C\x5E\x9C\x51\x9C\x51\x8C\x15\x63\xC5\x8D\xCE\x46\xE7\x32\xE7\x32\xA7\xDF\xE9\x77\x52\x4E\xCA\x19\x74\x06\x9D\x8C\x93\x71\x72\x4E\xCE\xB9\xC7\xB9\xC7\x79\xC4\x79\xC4\xF9\x81\xF3\x03\xE7\x47\xCE\x8F\x9C\x9F\x38\x3F\x71\x7E\xEA\xFC\xD4\xB9\x0E\x5B\x87\x5D\x86\x5D\x86\x5D\x81\x5D\x81\x6D\xC0\x36\x60\x57\x61\x57\x61\xD7\x60\xD7\x60\xD7\x61\xD7\x61\xBF\xC7\x7E\x8F\xFD\x01\xFB\x03\x76\x23\x76\x23\x76\x13\x76\x13\xF6\x2A\xF6\x2A\x76\x10\x3B\x88\xFD\x88\xFD\x88\x9D\xEE\x3A\xDD\x35\xD8\x35\xD8\x55\xE2\x2A\x71\x95\xBA\x4A\x5D\x93\x5C\x93\x5C\xE9\xEE\x74\x77\x86\x3B\xC3\x9D\xE9\xCE\x74\x67\xB9\xB3\xDC\x56\xB7\xD5\x9D\xE3\xCE\x71\xE7\xBA\x73\xDD\xF9\xEE\x7C\xB7\xCD\x6D\x73\x0F\x74\x0F\x74\x4F\x38\x31\xE1\x44\x5E\x38\x2F\x7C\x66\xF8\xCC\xF0\x59\xE1\xB3\xC2\x63\xC3\x63\xC3\xE3\xC2\xE3\xC2\xE3\xC3\xE3\xC3\x13\xC2\x13\xC2\x13\xC3\x13\xC3\x93\xC3\x93\xC3\x67\x87\xCF\x0E\x2F\x08\x2F\x08\x2F\x0A\x2F\x0A\x5F\x14\xBE\x28\x9C\x1E\x49\x8F\x8C\x8B\x8C\x13\x6D\x29\x4F\xC4\x13\xA9\x8B\xD4\x45\x6A\x58\xDC\xCF\x11\x8C\xCA\xCB\xB5\xFC\x0E\x50\xE6\x4F\x79\x9D\x38\x87\x58\x86\x0B\x9A\xDC\xDD\x51\x2B\xCE\xC5\x39\x21\x8A\xF4\x82\xAD\xB9\x1F\xE5\x7E\x9A\xFB\x6D\x6E\x6D\xB4\xFF\x07\x83\xC1\x60\x28\x18\x0A\x2A\x41\x15\x58\x02\x96\x80\xA5\x60\x29\xB8\x04\x5C\x02\x56\x81\x55\xE0\x2E\xD0\x06\xDA\x41\x07\xB8\x17\xDC\x0B\xEE\x07\x0F\x82\x47\xC0\x23\x60\x07\xD8\x11\xFD\x5E\xCD\xD3\xBA\x3D\xBA\x97\xC4\x6F\xD6\x7C\xAC\x3B\xAA\x3B\xAA\x3B\xA6\x8B\xE8\x42\xFA\x90\x7E\xBD\xFE\x32\xFD\xD7\xFA\xAF\xF5\x6B\x0D\x6B\x0D\xD7\xA4\x5C\x97\x92\x9A\x9A\x9E\x6A\x4A\x35\xA5\x66\xA5\x5A\x52\xFF\x93\xFA\x9F\xD4\x0F\x53\x3F\x4C\x3D\x91\x1A\x4E\xFD\x3A\xED\xEB\xB4\x70\x5A\x24\xED\x73\xE3\x17\xC6\xEF\x8D\x3F\x18\x3F\xCB\xF8\x2C\x23\x68\x0A\x9A\xBE\x30\x7D\x6F\x5A\x9D\xB9\x3A\x73\xB6\x79\xB6\xB9\xC5\xDC\x62\x9E\x90\x35\x21\x0B\x5A\xF4\x96\xFE\x96\xFE\x96\x01\x96\x3C\x8B\xCD\x32\xC8\x32\xCA\x32\xCA\x32\xDA\x32\xDA\x32\xC6\x32\xC6\xB2\xCB\xB2\xC7\xB2\xCF\xF2\xBC\xE5\x1F\x96\x17\x2D\x2F\x5B\x5E\xB5\xDC\x6E\xBD\xC3\xBA\xA9\xFF\xCD\xFD\x03\xB9\x81\xDC\xD7\xF3\x5E\xCF\xAB\x2E\xA8\x2E\xF8\x4F\x41\xA4\x00\xB7\x2F\xB3\x73\xF6\x4B\xEC\x4F\x0E\x7A\x72\xD0\xEE\x41\x4F\x0D\x7A\x61\xD0\x8B\x83\xDE\x1C\xF4\xD6\xA0\xA3\x83\x8E\x0D\x3A\x31\x28\x3C\x68\x8A\x63\x8A\x63\x8E\x63\x8E\xE3\x62\xC7\x25\x8E\x8D\x8E\xAB\xD1\xB7\x70\x60\x91\xBE\x28\xB5\x28\xBD\xC8\x58\x94\x53\x94\x53\x94\x5B\x94\x5F\x74\x5A\x51\x71\xD1\x10\xF1\x1B\x39\xCE\x22\xAC\x08\x13\xD4\x5F\x51\x45\xD1\xB8\xA2\xEA\xA2\x29\x45\xE7\x8A\xDF\xCB\x99\x5D\xB4\x51\xFC\x5E\x4E\x5A\x71\x66\x71\x69\xF1\xD0\x62\xE9\xBB\x39\xB7\x0D\xFE\x70\x30\x18\xD2\xE0\xBC\xC1\xF9\x8C\x73\x9F\xF3\x3D\xE7\x71\xE7\x5A\xEC\x66\xEC\xDF\xD8\x1B\xD8\x0F\x58\x58\xF1\x3D\x9D\xF7\x86\x7D\x34\xAC\xC6\x75\xBE\x4B\xE9\x1B\xFF\xF8\x19\x3F\x9C\x61\x70\x1B\xDC\xFD\xDC\xFD\xDD\x0E\xB7\xC3\x3D\xDA\x7D\x96\xBB\x0A\x7D\x5F\x67\xD3\x88\xDF\x8F\xD8\x8C\xBE\xAD\x03\xCA\x4F\x2B\x2F\x2B\x3F\xBB\xBC\xA6\xBC\xA6\x7C\x69\xF9\xEA\xF2\x8D\xE5\xDB\xCB\xBB\xCA\xA5\x6F\xEC\x6C\xBA\x67\xEF\x3D\x80\xCF\xE5\x1D\x7C\x11\xBF\x81\xDF\xC8\x97\xB6\x0F\x6D\x1F\xDD\x3E\xB6\x7D\x65\x67\x4B\xA7\xFC\xAD\x1D\xEC\x07\xEC\x87\x87\x7E\x78\xEA\x87\x1F\x4F\x9C\x38\x51\x17\xAE\x0B\xFB\xC2\xF5\x61\x32\x4C\x86\x0D\x11\x43\xC4\x14\x31\x45\x0A\x23\xA7\x45\xAA\x22\x55\x91\xEA\x48\x75\x64\x71\x64\x71\x64\x49\x64\x49\xF4\x9B\x3B\x3F\x46\x4E\x44\xE4\xEF\xEB\xB4\x40\x08\x36\x40\x08\xAE\x46\xDF\x3B\x90\xBF\xB7\x23\x7F\xF7\x40\xFE\xEE\xCE\xCA\x74\x08\x9A\xD3\x21\x78\x36\x43\x0A\x13\xD0\xF7\xE4\x5E\x1E\x04\xC1\x67\x83\x20\xA8\x6C\x82\x60\x02\xB2\x41\x85\x70\x1B\xFA\x86\x47\xA2\xEF\xF2\xF8\xD0\x77\x79\xFC\xDD\x7C\x97\x07\xBC\x04\xC1\x44\xF4\xCD\x85\xA9\x8A\xEF\xF3\x3C\xF5\x8A\x14\x5E\x47\xE1\x07\x14\x06\xFC\x5B\x0A\x67\xA2\x30\x1B\x05\x1A\x85\x0D\x28\x74\xA0\xF0\x57\x14\xC0\xAB\x10\x44\x5E\x83\xE0\xAC\x83\x10\x54\x1C\x84\x20\x70\x10\x02\xEE\x20\x04\x2B\x0E\x42\xB0\xEE\x20\x04\x97\x1D\x84\xE0\xF2\x83\x68\x0F\xE3\x6D\x08\x22\x87\xA5\x70\xEF\x27\x52\xD8\x8E\x82\xEB\xA8\x14\xE4\xEF\xB8\x9D\xEC\x77\x78\xCC\x8A\xEF\xF0\xFC\x5F\xFA\xFE\x4E\x2D\x1E\xC0\x19\x52\xFC\x92\x5D\xFA\xF0\x74\xC1\x6A\xAB\xC5\x43\x0C\xCE\x36\x72\x0C\x0E\x40\x6B\xFB\x9E\xF6\x7F\xB4\xBF\xD6\x5E\xAB\xDD\xED\x11\xE5\xE1\x5D\xE0\x4F\xE0\xBA\x94\xEB\x52\x04\x99\x25\xCB\xA7\x1F\x90\x7C\xFA\xCE\xF4\x9D\x49\x90\x47\x82\x5C\x11\xE4\xC3\x98\xF6\x31\xED\x3F\x9E\xF8\xF1\x84\x3C\x47\x0E\x45\xDE\x8E\xBC\x17\x39\x12\xB5\x49\xDE\x1C\x04\xC1\xA1\x41\xB1\xF1\x95\xC7\xB5\xB6\x11\x67\x70\x1F\x2E\x9E\x0B\x0B\x76\x40\x6D\x23\xBE\x42\x30\x97\x6B\x0C\x10\xAC\x36\x40\x50\x4B\xFA\x7C\xE2\xCE\x96\xE2\xDE\xE7\x9B\x03\x04\x78\x03\x35\x9F\x21\x39\x52\x5E\xA6\xBD\x2A\xE9\x57\xED\xF9\x5D\x2D\x49\x35\xE2\x7E\x1C\xAC\x31\xAF\x4B\xE8\x83\x42\x7B\x3F\xFA\x88\xF9\x98\xF9\x73\xF3\x97\x66\x79\xBF\xB0\xB6\x49\x2A\x5E\xD6\x09\xED\xA0\x1D\x3C\x00\x1E\x88\xEA\x80\xEE\x64\x7F\x32\xF9\x2A\xC8\x2A\xA5\xFC\x91\x65\x8E\x20\x6F\x64\x99\xF2\x4E\xE4\x9D\xC8\x0F\x5F\x40\x10\xF9\x02\x82\xDA\x00\x88\xEA\x26\x59\x27\x09\xBA\xE8\x41\xF0\x60\x54\x07\x09\x7A\x46\xD0\x2F\x5A\xD9\x9D\x5F\x94\x2F\xCA\xE3\xF3\x8A\xCE\x2B\xDA\x54\x74\x43\xD1\x2D\x45\xB7\x16\x6D\x2D\xDA\x5A\x74\x77\xD1\xDD\x45\x6D\x45\x6D\x45\xF7\x17\xDD\x5F\xF4\x8F\xA2\x17\x8B\x5E\x2A\x7A\xA9\xE8\x40\xD1\x81\xA2\x2F\x8B\x22\xA7\x3B\x8A\x8B\x8A\x2F\x29\xBE\xA4\xF8\xD6\xE2\x87\x8B\xDF\x28\x7E\xBB\xF8\xF1\x21\x8F\x0F\x79\x6A\xC8\x53\x43\xBE\x1D\x12\x19\x42\x97\xD0\x25\x5D\x51\xB9\x1D\x71\x02\x4C\x29\xBB\x23\x65\xEE\xA1\x53\x86\x9E\x3D\x74\xC6\x50\x41\x7E\xCA\x72\x53\x96\x97\xB2\x9C\x3C\x1C\x39\x1C\xF9\x30\xF2\x89\xCA\x5E\xFD\xB9\xE5\x8E\x2C\x27\x6A\xD1\x7E\x7A\x1B\x68\x03\x1D\xA0\x03\xDC\x0F\xEE\x07\xDB\xC1\x76\x20\xE9\xE4\x6B\x53\x12\xE9\xE2\xEF\x8D\xDF\x1B\x25\xFD\xFB\xAD\xE9\x7B\x8D\x0E\xFE\x6F\xF5\xAF\x56\xF7\xC6\xEB\xD3\xBC\xA4\xFA\xF4\xDC\xA2\x73\x8B\x66\x28\xF4\xE9\x8D\x45\x37\x17\xDD\x56\x74\x47\xD1\x9D\x45\x77\x15\xDD\x53\xF4\xA7\x22\xBE\xE8\xBE\xA2\x07\x8A\x5E\x28\xDA\x5F\xF4\xCF\xA2\x7F\x15\xFD\xAB\xE8\xE5\xA2\x2F\x8A\x40\xB1\xA0\x73\x07\x15\x9F\x2E\xEA\xDD\xDF\x15\x5F\x5A\x7C\x4B\xF1\x23\xC5\xAF\x17\x1F\x56\xE9\xE0\xC7\x86\xEC\x18\xD2\x35\xE4\xE9\x21\xDF\x0C\x01\x25\x54\x49\xB0\x44\xD0\xC9\xBB\x9D\xA0\x34\x52\x0A\x86\x9E\x31\x74\xEA\xD0\x69\x43\x67\x0E\xFD\xEF\xF4\xF1\xC9\xE8\x62\x49\xFF\x9E\xD5\x3E\x16\xE9\xE0\x13\x27\x4E\x9C\x78\x37\xF2\x6E\xE4\x83\xC8\x07\x91\xA3\x7D\xA0\x4F\xDF\x18\x04\xC1\xDB\xA7\x58\xAF\x9E\xAC\x1E\x94\xF5\x9F\xAC\xF7\x4E\x85\xBE\xFB\xFE\x0B\x08\xC0\x97\xBF\x12\xBD\x87\xCE\x12\xDC\x68\x9D\x5A\x4B\x33\xB8\xA7\x96\x0E\x04\x09\x49\x61\xC8\x7E\x3D\xE4\xF3\xCA\x5A\xBA\x85\x0E\xD4\xD1\xA2\xCF\x08\x61\x3D\x58\x1B\xA2\x7C\xD2\x07\x6F\x5A\xED\xBB\xED\x5D\x85\xFB\x0A\x6B\x5B\xFC\x34\x47\x7A\x66\xE1\x0D\xE2\x11\x6E\xFB\xB0\xF6\xDA\x16\x86\xC4\x65\xCD\x99\x66\x4E\xB3\xA4\x9D\x9B\x36\x33\x6D\x76\xDA\xD2\xF4\x65\xE9\x73\xF1\x06\xDC\x4F\x37\x00\xC9\x47\x50\x4E\xDE\x5C\xBC\xA1\x0E\xA7\x56\xE2\x00\xC8\x5E\x82\x98\x3C\x36\x4F\x3C\x81\x24\x00\x90\xBD\x01\x09\xF1\xB9\xE2\x97\x39\x1D\xD1\xB7\x30\xD2\x59\xED\x56\xDB\x7D\x36\x21\x75\x1E\x49\x70\x00\xB4\x8A\x5E\x70\xDE\xEA\x9C\x8B\x37\xA1\xF3\xE4\x56\xE4\x77\x43\x3E\x47\x9D\x8B\x07\x48\xE5\xE7\x9D\xD6\xF6\xD2\x1F\x94\xF2\x4D\xD2\x7E\xD3\x7E\xF1\xFE\xF7\x11\xD3\xF7\xA6\x2E\x34\x5F\xE4\xF5\xF0\x5C\x9C\x6A\x08\x29\x3E\x79\x73\x08\xF9\xF0\x52\xFA\xA1\x88\x6C\x8C\xF9\xA1\x98\x4B\xF8\x43\x0D\xCA\x03\xB0\x38\x9F\x53\xEA\x77\x4F\x92\xFF\x29\x6F\x66\x7D\x26\x9D\xB9\x32\x73\x75\xE6\xDC\x46\x5C\xFC\x12\x50\x6B\xDA\x83\x69\x73\x1B\x71\x12\x00\x98\x55\x91\x55\x9D\xB5\x28\x6B\x2E\x59\x47\x70\xF1\x1B\x6C\xC8\x47\xD4\x72\x74\xBF\x71\x97\x28\x97\xA5\x7D\xA7\xB9\x64\x3D\x49\xE1\x0D\x8D\x00\x48\x5F\xB5\x08\x0E\x5F\x3D\x7C\xF5\xF0\xB9\x24\xD3\x18\xE2\xE4\x13\x9E\x56\xE4\xF7\x44\x3E\xA7\xBE\xA0\x41\x30\xA3\x62\xA7\x62\xB2\x1F\x1E\x79\x7F\x65\x9E\x40\xD2\x9F\x86\xB6\xCD\xC7\x29\x6F\x23\x0D\x44\x3F\x19\xDF\x1D\x8E\xD9\x3B\xF3\x71\x06\xA7\x1A\x3C\x93\x49\xF4\x69\xB3\xAD\xE8\xFC\x31\x82\xC2\x02\xE2\x62\x52\xF9\x40\x40\xF6\x5F\xB3\x0D\xED\xD9\xCA\x7E\x6C\x16\x08\x38\x5B\xAF\xBE\x47\x90\x9B\x0B\x15\xCD\x75\x00\x07\xD8\x0A\xB6\x82\xD6\xDC\xD6\x5C\xE0\xC8\x70\x94\x3A\x30\xC7\x19\x8E\x33\x1C\x1E\x87\xC7\x01\xDC\xC0\xBD\x10\xA7\xF0\x3A\xFC\x62\x9C\xF1\xD4\x2E\x0F\xE1\x8C\x74\xE8\x8D\xEE\x82\x2D\xF4\x03\x50\xEA\x28\x75\x2C\x0C\x02\x80\x39\x30\xC7\x42\x36\xBE\x3C\x65\x59\x42\x56\xFF\x0C\xF4\x2D\x76\x99\xDE\x19\x00\xE8\x01\x00\x5B\x34\xF0\xAD\x08\xBE\x43\x03\xDF\x8F\xE2\xAF\xA2\xDF\x43\x33\x00\xC8\x05\x00\x54\xCF\x04\xA0\x18\x00\x70\xE1\x2C\x09\x1E\x9C\x05\x80\x41\x59\xCF\x2C\x75\x39\x9B\x66\x01\xA0\x13\xEA\x41\xF0\x4E\x14\x97\xFF\x3D\x84\xE0\x3B\xD0\xEF\x4E\xF4\xFB\x8C\xA6\x9C\xFD\x28\xDF\x21\x04\x3F\xA2\x29\xE7\x53\x19\x7F\x36\x10\xBF\x22\x2C\xFF\x1B\x33\x5B\x5D\x4E\xCD\x6C\x89\xDE\x20\x82\x5F\x3C\x5B\x6A\xBF\xFC\xEF\x72\x0D\xFE\x26\x94\xDE\x8E\xE0\xDB\x35\xE9\x8F\xA0\xF8\xAE\xD9\x6A\x7A\xF6\x22\xF8\xF3\x1A\xFC\xFD\x28\x7E\x68\x36\x00\x56\x81\x2F\x6A\x00\x30\x02\x00\x1A\x6B\x24\xF8\x25\x35\xEA\x72\xD6\xD7\xA8\xF3\x5F\x8D\xE2\x37\x68\xE0\xDB\x51\xBE\x2E\x04\x7F\x46\x53\xCE\xF3\x08\xBE\xBF\x06\x00\x33\x00\xC0\x7A\x3E\x00\x69\x00\x80\xE9\xE7\x6B\xFA\xE7\x7C\x29\xDF\x52\x04\x0F\x9E\xAF\x19\x5F\x04\xDF\x78\xBE\xBA\xDF\x36\x6B\xCA\xB9\x07\xC5\xEF\x45\xBF\xDB\xD1\x6F\x17\x2A\x7F\x3F\x8A\x1F\x3A\x1F\x80\x45\x42\xBD\xB5\x00\xCC\x11\xE2\x17\x48\xF0\x0F\x2E\x50\x97\xFF\x25\x82\x7F\x7B\x81\xBA\x9E\x1F\x35\x71\xEB\x3C\x89\x5E\xF7\x3C\x35\xBC\x7A\x1E\x00\xA9\x42\x7B\x10\xBC\x75\x1E\x00\xE9\x8A\xF4\xAD\x1A\xFC\x76\x14\xDF\x8E\x7E\xBB\xE6\xA9\xFB\x61\xBF\x06\xFF\x10\x2A\xCF\x31\x5F\x0D\x2F\x45\x71\xB7\x06\x5E\x3D\x1F\x80\x14\x61\xDC\x35\xF0\xD6\xF9\x52\xFF\x6C\xD2\xC0\xB7\xA0\xF8\x56\x0D\xBC\x1D\xC5\xB7\xA3\xDF\xAE\xF9\xD2\x37\xFD\xA3\xF5\x5C\x28\xC1\x6B\x2E\x04\x20\x43\x01\x6F\x46\xF0\xF5\x17\xA2\xFA\x2E\x94\xE8\xD9\x71\xA1\xBA\xFC\x2E\x14\x7F\x56\x03\xDF\x7F\x21\x00\x16\x81\x7F\x16\xA0\xF2\x17\x00\x50\x24\xF4\xC3\x02\x69\xDC\xBE\x45\x70\xEB\x42\xF5\x7C\x9C\xBE\x50\x5D\xCE\xD2\x85\xD2\xB8\x6C\x42\xF0\xAD\x0B\xA5\x79\x11\xED\x57\x04\x3F\xBE\x10\x00\x93\xD0\xBF\x8B\xA4\x7E\x5E\xBA\x08\xF1\xE7\x22\x75\xF9\xED\x8B\x34\xF4\x2F\x42\xFC\x86\xE0\xAF\x6A\xD2\xDF\xD0\xC4\x8F\x2F\x92\xE8\x4F\x5F\x8C\xE8\x5F\x2C\xCD\x4F\xF9\xDF\x28\x04\xAF\x5A\x2C\x95\x5B\xB3\x58\xC2\x6F\x5C\xAC\x19\x47\x94\xBE\x09\xC1\xB7\x2E\x06\x20\x47\x28\x7F\xB1\x44\xBF\xFB\x22\xCD\xBC\xBB\x08\xCD\x3B\x0D\xBC\x15\xC1\x37\x21\xF8\x2D\x17\xA9\xE7\x45\x9B\x06\x7F\x3B\xC2\xEF\x42\xF0\xFD\x17\x49\xF3\x5D\xFE\x67\x58\x82\xDA\xB5\x44\x9A\xFF\xEE\x25\x12\x7E\x35\x82\x9F\xBB\x44\x2D\x37\xE6\x2C\xD1\x8C\xD7\x12\x89\xFE\xAD\x1A\x78\x17\xCA\xB7\x1F\xC1\x0F\x2D\x51\xCF\x97\xE3\x1A\x7C\xE0\x91\xF0\xAD\x1E\x35\xDC\x81\xE0\x6E\x04\xAF\xF6\x00\x70\x09\x04\xC0\x4D\x00\xF0\xB9\x50\x8F\x1F\xB5\xCB\x2F\xF1\x83\xFC\xEF\x47\x04\xB7\x06\xD4\xFC\xBF\x29\x20\xC1\xB7\x04\xD4\xFD\x76\x0F\x82\x6F\x0F\x48\x7C\x2F\xFF\x7B\x26\xA0\xE1\x73\x94\xEF\x03\x0D\x1C\x50\x52\xFF\x8D\xA1\x10\x3F\xA0\xDF\x29\xE8\x77\x06\xFA\xAD\xA1\x00\xF0\xEA\x00\x38\xB4\x12\x80\xD3\x85\x7E\x68\x46\xE5\xB4\x00\x70\x96\xD0\x4F\x2D\x12\xFF\x5B\x2F\x96\xE0\x8E\x8B\x01\x38\x1D\x02\xB0\x7D\x15\x00\xFB\x84\x7A\xD7\x23\xFE\x5F\xAF\x9E\xBF\xF9\x97\x21\xFC\xCB\x00\x58\xA0\x03\x60\xD3\xB5\x00\x0C\x11\xE6\xEF\x75\x9A\x71\x41\xF1\x67\x35\xF0\xFD\xD7\x21\x7D\x8A\xE0\xC7\xAF\x03\xE0\x2F\x42\xFC\x7A\x00\x04\x03\x3C\x7F\xB3\x1A\x7F\x30\x8A\x0F\xD3\xC0\x47\xA1\x78\x25\xFA\xAD\xD9\x2C\xF5\x8B\xFC\xCF\x8F\xE0\xDC\x66\x35\x5F\x5D\xA2\x29\x67\x13\x4A\xDF\xAA\x81\x6F\xDF\x2C\xF5\xFF\xB3\x1A\xF8\x21\x54\x4F\xFE\x4D\xA8\x1F\x6E\x02\x20\x20\xC0\xB7\x00\x30\x49\x68\xCF\x2D\x08\xFF\x56\xF5\xF8\xE6\xDC\xAA\xE1\x37\x14\x2F\x45\xBF\xA3\x6F\x55\xF3\xC9\x24\x0D\x7E\xCD\xAD\x68\x9E\x6A\xE0\xAD\x28\xDF\x16\x0D\x7C\x3B\x8A\xEF\xD0\xC0\xBB\x10\xFE\xAB\x08\x7E\xE8\x56\x00\x5E\x12\xF8\xFE\x0E\x00\x04\x33\xFC\xD5\xBB\x34\xED\xBD\x4B\xAA\xF7\xB8\x0C\xBF\x5B\xE2\x9B\xFC\xBB\x51\xFB\xEF\x56\xE3\xBB\xEF\x96\xCA\x9F\xAE\x81\xD7\xDC\x2D\xF5\x47\x33\x82\xB7\xDE\xAD\x6E\xEF\xD5\x08\xBE\xE9\x6E\x00\x26\x0B\xE3\x71\x8F\xD4\x9F\xDB\xDB\x34\xF4\xB7\xA1\xF9\xAE\x81\x1F\x6A\x43\xF2\x5F\x86\xF3\x12\x5F\x6E\xE5\x01\xC8\x17\xFA\xAD\x1D\xC9\xED\x76\x8D\x5D\xD1\xAE\xE1\x87\x76\xC4\x0F\x08\x7E\x4F\xBB\x9A\x7F\xEE\x45\xF0\xED\xED\x12\x1F\xEC\x6F\x97\xDA\xF5\xAD\x5C\x4E\x07\x00\x03\x85\x72\x3B\x00\x28\x13\xD2\x3B\x35\x74\x76\xA2\xFE\xD4\xC0\xAD\xDB\x90\xFD\xB0\x0D\xF1\xC3\x36\x75\x7A\x05\x8A\x4F\xD1\xC0\xA7\x6B\xE2\x4B\xB7\x49\xFD\xD0\xAC\x81\x6F\xDA\x86\xDA\xA5\x81\x3F\x82\xE2\x3B\x35\xF0\x67\x51\x7C\xBF\x06\x7E\x08\x95\x73\x5C\x03\xFF\x11\xC5\x0D\xF7\xAA\xE1\xF9\x28\xEE\xD0\xC0\x87\xA1\xF8\x28\xF4\x5B\x7D\xAF\x5A\x9F\x36\x6B\xF0\x5B\xEF\x45\xFA\x48\x03\xDF\x8A\xE0\xDB\x11\xBC\xEB\x5E\x00\xD6\x0B\x74\xDF\x0F\xC0\x99\x42\x39\xDB\x35\xFD\xB0\x1D\xD9\x1B\x1A\xF8\x33\x28\xFE\xBC\x06\xFE\x2A\x8A\x1F\x42\xBF\xC7\xB7\x4B\xFC\x2F\xFF\x4B\x7F\x10\xC9\xFF\x07\x01\xC8\x06\x00\xCC\x45\xF1\xC5\xE8\x77\xE9\x83\x00\x4C\x57\xB6\xE3\x21\xC4\xE7\x0F\xA9\xE5\xEA\x0E\x04\x7F\xFA\x21\x75\xFD\x7B\x35\xF1\x43\x0F\xA1\xFE\x97\xE1\x0F\xAB\xF5\xD1\xB0\x87\x25\xB8\xFB\x61\x00\xD6\x08\xFD\xFA\x28\x00\xA3\x84\x7C\x8F\x21\xFA\x1F\x03\xE0\x3C\x05\x7E\xE1\x0E\xCD\xFC\xDD\x81\xE6\x2F\x82\xD7\xEC\x50\xF3\xFF\x85\x08\xBE\x44\x03\xAF\x47\xF0\xE0\x0E\x00\x6E\x10\xC6\xE1\xCF\x00\x9C\x2F\x94\xF3\xA4\x04\x9F\xF3\xA4\x1A\x7F\x21\x82\xD7\x6B\xE0\x14\x82\xB7\x3E\x09\x40\x3F\x61\x1C\x9F\x44\x7A\xF0\x49\x8D\xBD\x84\xE2\xDF\x6A\xE0\x60\x97\x14\x4F\xDF\xA5\x99\x5F\xBB\xA4\x7A\x1C\x1A\xB8\x7B\x17\x6A\x2F\x82\xD7\xEC\x02\x20\x53\xA8\x1F\xC5\xAF\x44\xBF\x9B\x76\x49\xF3\xDA\xB0\x5B\x8A\x67\xA2\x5F\xEB\x6E\x89\xCF\x8E\xEF\x96\xC6\x61\x7A\x17\x2A\xA7\x0B\x80\xA0\x90\xFE\x34\x00\xD3\x84\x71\x78\x46\x33\x6F\x51\xBC\x46\x03\x5F\xFA\x8C\x44\x67\x50\x03\x6F\x46\xF1\x56\xF4\xBB\xF1\x19\x75\xBF\x5D\x8F\xE0\x5B\x35\xF9\xEE\x41\xF1\xED\xCF\xA8\xE5\x5E\x17\x82\x3F\xAB\x29\x67\x3F\x82\x1F\x7C\x46\x2D\x9F\x8F\x20\xF8\xA7\x1A\xFC\xAF\xE5\xFA\xF6\x68\xF4\xDD\x1E\x09\x7E\xDA\x1E\x35\x7E\xC9\x1E\x4D\xFF\xA3\xF4\x6A\x04\xAF\xD9\x23\xD9\xE1\x9B\x10\x7C\xAB\x06\x7F\xFB\x1E\xA9\x1D\xFB\x11\xFC\xD0\x1E\x69\xBC\xA2\xE3\xBC\x57\x82\xE7\xEF\x55\xE7\x2B\x44\xF1\xD3\xF7\xAA\xE9\x29\x43\xF0\xEA\xBD\x6A\xFB\x61\x31\x82\xD7\xEF\x55\xF7\x03\xA3\x29\xB7\x15\x95\xB7\x49\x03\xDF\x8A\xE0\xDB\x11\xFC\x11\x4D\xFA\x0E\x4D\xBC\x0B\xE1\xEF\xD7\xC0\x0F\x21\xF8\x71\x0D\x5C\x30\xD2\x04\xBA\xF2\xF7\x49\xF0\xC2\x7D\xEA\x76\x0D\x46\x70\xF7\x3E\xF5\x7A\xB3\x66\x9F\xBA\x1C\x1F\x8A\xFB\xD1\x6F\x50\x93\xCE\xA1\x78\xB3\x06\x7E\x09\x8A\xB7\xEE\x53\xF7\x5B\x97\xF0\xBF\x89\xDB\x1F\xD6\x9B\xBA\xF4\xA2\x3E\x00\xC9\xFF\x89\xF3\xB4\x87\x74\x6B\x0F\xE9\xD5\xDD\xA4\x0B\xFF\x5A\xBB\x49\x9B\x92\x0E\x40\xFA\x1B\x62\x39\xAB\x20\x00\xCD\x10\x00\xBF\xC0\xC3\x83\x80\xB4\x76\x39\x1A\x89\xAC\x1C\x2D\xD5\xF3\x35\x00\xD6\x71\x10\x48\x7D\x6C\x40\x74\x8B\xFB\x28\x88\x86\x56\x99\xA0\x3E\x44\x10\x12\x05\x9D\xE1\x13\xE4\xA1\x1E\x00\xAB\x13\x80\xAD\x97\x43\x50\x3D\xC9\x02\x5A\x37\x6F\x01\xE0\xED\xAF\x52\xC0\xDF\x6E\x19\x27\x14\x60\x41\x99\x05\x9D\xD4\x0F\x15\x2C\x84\xDB\x46\x20\xFD\xF4\x3A\xD2\x4F\xAF\xCB\xE3\x1A\x11\xBF\x15\x1B\x89\x58\x81\xD0\x66\x61\x8D\x00\x26\x1E\x7F\x4C\x6F\xDA\x09\x7B\xE8\xE2\xFF\xED\x7F\x77\xBE\xA1\x33\x50\x78\x80\x80\x5B\xDE\xD0\xFD\x29\x05\x18\xE9\x20\xEE\xC1\xEB\x68\x86\x83\x99\xC2\x9F\x75\x21\xD2\xCF\x91\x94\x5B\xA7\x8C\x8D\xD0\x2B\x63\xE5\x06\x65\x6C\x64\x8A\x32\x36\x2A\xD5\x2C\x16\xD8\xD0\xE0\xF1\xD2\x21\x8A\x4B\x33\xC9\x51\x36\x14\x48\xB7\xC8\x91\x20\x43\xFB\x42\x5E\xCE\x18\x4D\x0D\xE0\xCD\x19\xB1\x08\x49\x99\x32\xA3\xF9\x68\x86\xCB\x8C\x26\xE1\x7E\xBF\x39\x16\xA1\x5A\xB2\xA4\xEA\x18\x92\x6B\xF4\xE0\x75\xAC\xC5\x12\x8B\x32\x74\x88\xF2\x59\xB3\x62\x00\x2F\x41\xFA\xB3\x15\x08\xF5\x7E\x9A\x66\xFA\x29\x10\x82\xFE\x10\xDB\x5F\x81\x10\x20\xA9\x10\x9B\xD3\x4F\x01\x08\xF9\x39\x32\xE8\x6F\x19\x60\x8D\xC1\x7C\xE4\x0A\xD2\x47\xE4\x2A\xE8\x60\x88\x40\x1E\x42\x60\xF0\x16\x8F\x97\xA6\xBC\x38\x97\x6F\x89\x41\x58\x3F\xE9\x25\x0A\xC4\x66\xD4\x91\x1C\xEB\xA1\x19\x5B\x66\x34\x82\x53\x3E\xBB\x25\x1A\xA3\x88\x06\x9C\x23\x0A\x63\xC9\xCD\x34\x33\xB0\x5F\x34\xC6\x36\x92\xF5\x9C\x9F\xA8\xE7\x4E\xEB\xAF\x86\x31\x64\x43\x23\x37\x28\x5B\x00\x7A\x49\x1F\x23\x10\xC1\xE1\x24\xC5\x3A\x32\x82\x38\xC3\x12\x22\xB0\x28\x5D\xFA\x9B\x0C\x9E\xDE\x3F\x8A\x48\x52\x1C\xC1\xB0\x84\x97\x63\x8B\xC5\x36\x10\x2B\x70\xBF\xC7\xCB\x35\x7B\x28\x62\xE5\xE0\x5C\x15\x84\x25\x38\x0F\x49\x05\x43\xDC\x90\x01\x71\x70\x1F\xCE\xE1\x25\xF6\x38\x30\x41\x71\x4C\x4B\x90\x26\x29\xCE\x99\xA7\x4A\x6C\x20\x38\x0F\x43\xB0\x21\x3F\x57\x6A\xF7\x78\xEA\x69\xC6\x4B\x78\xC8\x40\x90\x66\x38\x8F\x82\xBB\x58\x4C\xEC\x66\x8E\xF6\x50\xA1\x40\x1D\xC1\x94\x49\xAD\xC6\x59\x62\xF4\x28\x0F\xC9\x7A\x56\xE0\x7E\xD2\x37\x34\x1B\x01\x1A\x08\xCA\xE3\x23\xBC\xB4\x8F\x18\x96\xAD\xC0\x93\x40\xC3\x95\x20\x82\x12\x40\x2E\x65\x46\x09\x74\xC6\x00\x05\x56\x88\xF1\xA3\xCC\x6E\x2D\x58\xC2\x1E\x21\x0E\xCC\x32\x96\xA6\x3C\x21\x2A\x80\x33\x6C\x23\xEE\x2F\xB7\x46\x61\x08\x32\x52\xAC\x99\x09\x51\x1C\x19\x20\x3C\x04\xC3\xD0\xCC\xA8\x3C\xD4\x40\x4F\x03\x83\x07\x05\x0E\xC2\xBD\x8D\x78\x9D\x9F\x38\x33\x3F\x9A\xDB\x4F\x34\x0B\x70\x1F\x6A\xFA\xE8\x04\x29\x2C\xC7\x90\x54\xC3\x59\xD9\x71\x29\x63\x06\x44\x41\xD2\x90\x4B\x98\x63\x73\x34\x60\x8E\x6E\x22\xA8\x8A\x7E\x5A\x64\x82\x1B\xA7\x2D\x80\xAE\x5B\x46\x78\xB9\xCA\x2C\x35\x78\xBC\xC8\xB8\x2B\x70\x7F\x88\x90\x00\x55\x85\x51\x84\x95\x0C\xC9\x11\x8C\x87\x08\x90\x9C\xA7\x8E\xA6\xFD\x04\x4E\x4D\xB0\x25\x4C\xAD\xF7\xD3\x38\x57\x9D\x38\xA7\xC0\x9E\x0D\x04\x33\xD1\x9E\x30\x55\xEA\x9A\x49\x89\x13\xA5\x36\x4F\x3E\x3D\x61\xA2\x34\x31\x09\x3F\x11\x20\x28\x6E\x4A\x62\xBA\xC4\x76\x4D\x1D\x94\x30\xCD\x4B\xFB\xFD\x84\x97\x23\x69\x6A\x9A\x23\x71\xED\x02\xF7\x4B\xC5\x9F\x9D\x1C\xC3\x2F\x44\x71\xFF\xF4\xE2\x84\x18\x52\xA7\xCB\xC5\x9C\x33\x40\x8B\x24\xFE\x9C\x6B\x8E\x82\x7D\xA1\x40\xF0\xBC\xAC\xD8\x90\x08\xF1\x19\x22\x3F\x36\x12\x78\xD0\x13\xE4\x18\x61\xDE\xCD\x54\x43\x58\x82\x9B\x95\x21\x40\x02\xB8\xDF\x4F\x7B\x67\xA7\x0B\x7F\xD7\x33\x04\x51\x23\x0A\x2C\x86\x10\xC1\xE7\x0F\x50\x4C\x4B\x8F\x17\xF7\x36\x12\x42\x59\x73\x12\x80\x59\x82\xAB\x15\xAB\x08\x10\x01\x9A\xBC\x98\xF0\x90\x14\xC9\xCD\x55\x41\x82\x21\xB6\xF1\x02\x8B\x0A\x42\x07\xE7\xF5\x53\x67\x62\x09\x86\x9B\xAF\x42\x6A\x20\xB8\x0B\x2D\x81\xA0\xCF\xE3\x23\xEA\xF1\x90\x9F\x13\x24\xC9\x02\x93\x00\x08\xE0\xCD\x42\x64\xA1\x48\xB2\x84\xE1\x5F\x24\x56\x29\xB1\x88\x20\x42\xEA\xEA\x17\x8B\x90\xBA\xFA\x98\x40\xB9\x28\x5F\x0B\xF1\x50\xB4\xD8\xFA\x25\xD9\xB1\x94\xBA\x7A\x49\xD2\xCA\x20\x31\x22\xC2\x97\x1A\x96\xE3\x75\x2C\x9E\xBE\x1C\xF7\xF9\x3C\x34\x45\xD4\x19\x84\xBF\xBC\xE9\xCB\xD9\x50\x9D\x10\xF7\x19\x96\x07\x42\x7E\xC2\xB0\x1C\xA7\x7C\xF5\xE9\xC2\xFF\x3D\x14\xCD\x35\xE8\x97\xD3\x4C\xA3\x61\x79\x33\xCD\x90\x86\xE5\x14\xD1\xB0\x2C\x3B\x46\x2A\xEB\x61\x70\xAA\x81\x68\xCA\xF6\x78\x82\x38\xD7\x28\x56\x24\x31\x82\x3F\xD3\x83\x66\xA3\x90\x10\xB0\x7A\x44\x29\x8A\x66\x23\xD7\xC8\x52\x66\x8F\x14\x63\x88\x00\xBD\x82\xA0\xE5\x68\x3D\xE9\xE7\x08\x26\x38\x40\x1E\x26\xC4\x56\x12\x78\x79\x3F\x0D\xB8\x81\xE0\x18\x2D\xAA\x54\x20\x9B\xA3\x01\x87\x28\x92\xA6\xB8\x34\x8F\x27\x40\x30\x0D\x44\xA8\xBF\x9C\xAC\x20\x62\x85\x1A\x28\xD5\xB9\x52\x54\x6F\xA2\xDA\x20\xEB\xEB\x9B\x73\xE4\x58\x54\x17\x91\x34\xD5\x32\x00\x41\x59\x15\xF8\x62\xB3\x8C\x2C\x56\xFE\xBB\xAC\x28\x96\x18\xBF\x44\xE4\x64\x96\x63\xFC\x04\x75\xA9\x09\xFD\x4D\x79\x03\xC1\x55\x62\x84\x64\x7D\x64\x03\xC9\xAD\x46\x11\x36\x88\x7B\x89\x56\x68\x94\x62\x8D\x44\xF3\x1A\x28\x72\x3F\xC9\xD1\xF8\x5A\x28\x16\x85\x73\x34\x39\x7A\xD4\xBA\x68\xA4\x7E\xF4\xA8\xF5\xB0\x1F\x2A\x98\xA4\x1A\x58\xA4\xEE\x2F\x83\x39\x1A\xA0\xA8\x7E\x2F\x57\x83\x09\xCA\xC7\xAE\x24\xB9\xC6\x2B\x60\xAE\x12\x5C\x4F\x33\x01\x5C\x6C\xFE\x95\xB0\xBF\x32\x81\xA4\x7C\x44\x33\x5D\xBF\x41\x0D\x65\x88\xA0\x1F\xF7\x12\x1B\xE1\x80\x04\x50\x0F\x75\x15\xCC\x56\xC2\xD9\xA0\x9F\xE4\xAE\x56\x57\xC8\x72\x38\xC3\x89\x94\x5C\xA3\x2E\x84\x0D\xD5\x49\x7F\x5E\x0B\xAD\x4A\x38\xC7\x90\x81\xEB\xD4\xA8\x02\xC8\x23\x98\x22\x9B\xD4\x65\x8B\x70\xD1\x1C\xF9\x3D\xCC\x8B\x4B\x08\x32\x44\x3D\xD9\x7C\x7D\x82\x14\x36\x54\x5F\x4F\x36\xFF\x21\x41\x61\xE2\x30\xDD\x00\x33\x62\x91\x1B\xD5\x4D\xF4\xD3\x2B\x09\x66\xB3\x1A\x16\x0A\x06\x09\xE6\x26\xA9\xE3\xB8\x96\x20\xC1\x0A\xF6\x82\x34\xC3\xB6\x68\xA0\x52\x96\x9B\xA5\xA1\x8A\x42\x91\xCE\xBA\x45\x1A\xED\x28\x58\x54\x1C\xB7\x4A\xBD\x13\x2B\x81\xE0\x6E\xD3\x14\x2A\x4D\x91\x3F\x4A\x44\x29\x08\xF0\xFB\x6F\x87\x59\x31\x98\x60\x99\xDF\x21\x75\x6C\x88\x22\x05\xA3\xC2\xC3\x86\x18\x86\x16\xAC\xC0\xAD\x52\x2F\xC9\x70\xC9\x14\x11\xA2\xDC\x9D\xB0\x30\x41\x4A\x34\xE3\x5D\x89\x33\x72\xF5\x63\xEE\x56\xA7\x48\x66\x8C\x98\x72\x8F\x9A\x08\x3F\xCE\x72\x22\xFC\x4F\x52\xB7\xC8\x70\xA1\xB1\xC2\x08\xB4\xA9\xC1\x1C\x2D\x0D\x02\x1F\x07\x16\xC7\xA1\x1D\x2A\xB4\x92\xD0\xF0\x0E\x25\xA0\x11\x67\x1B\x3B\xA5\x8E\x92\x00\x5E\x3A\x10\xC4\x19\x62\x9B\x84\x14\x93\x4B\xF7\x42\x93\x3C\xFB\x1B\x08\xEE\x3E\x69\x64\x64\x01\xCF\xB4\x08\x13\xE8\x7E\x68\x8E\x95\xD3\x40\x70\x0F\xC0\xFC\xB8\x72\x51\x96\xED\x89\x92\xA4\xCA\x1E\x94\x3A\x43\x9D\xC4\x12\xDC\x43\xD0\xA2\xA8\x52\xA0\xFB\x61\x98\x15\xB3\xF3\x05\xC0\x23\x12\x0A\x22\x5A\x80\x3C\xAA\x6C\xAB\xA0\xB7\x1F\x93\xB8\x47\x02\xF8\x09\xAA\x81\x6B\x7C\x5C\x95\xAB\x89\x68\x61\x77\x40\xC5\x92\x02\x0F\x06\x09\xCA\xF7\x84\xB2\x20\x41\x47\xFD\x19\x2A\xEC\x2F\x51\x06\xFF\x45\xEA\x46\x54\x8E\xA4\x43\x77\xC2\x6C\x8F\x47\xD9\x91\x0C\xBD\xF2\x49\xA9\x27\x11\x8F\xEF\x82\x4A\x55\x29\x9A\x63\xBB\xA5\xBA\x10\x88\x21\xEA\xBB\x54\x00\x92\xE2\x9E\x82\x19\xB1\xF9\xF6\x34\x1C\x18\xA3\x44\x30\x7A\xFD\xF4\x4A\x8F\x97\x0E\xB6\xA0\x4A\xFF\x0A\x6D\x49\xD2\x59\x82\x7B\x26\x36\xAC\xB8\xCF\xB7\x07\x66\x49\xE4\x8A\xC3\xCC\xD0\x2B\xF7\x4A\x22\x21\x3E\xEF\x3E\x69\x16\x20\xAE\x62\x70\x8A\x25\x39\x72\x05\xE1\xF1\xFA\x69\x36\xC4\x10\xCF\xC2\xD3\xA4\x82\x92\x21\x3C\x27\x49\x7B\x61\x4E\xFE\x4D\x35\xB0\x2C\x79\x31\xF1\x77\x89\x3B\x62\x1D\xE0\x11\xED\x20\x9C\x23\x7C\xCF\x4B\x53\x3D\xDA\x15\x92\x99\xFA\x82\x52\x3C\x7A\x38\x82\x09\x90\x94\x80\xFD\x0F\xA5\x0A\x88\x15\xF2\xA2\xA4\x76\xC4\xF1\xDD\x0F\xFB\xC5\xC6\x7A\xA5\xB8\x74\xC5\x83\xFF\x94\xFA\x57\xEA\xBF\x97\x60\x1A\xEA\x92\x7F\x21\xC1\x4C\x70\x51\xCC\x03\x4A\x6E\x45\x43\x2A\x20\xBC\xAC\x64\x70\xB9\x69\x92\x9A\x7D\x45\xC9\xB6\xC2\x8A\xFB\xDF\x12\x09\x12\xAE\x60\xCC\x08\x16\xC5\xAB\xCA\x82\x25\x7D\x2E\xC2\x5F\x83\x0D\x14\xED\x23\x2A\x2A\x18\xA2\xBC\x94\xE5\x7C\x15\x15\x1E\xCF\x88\x8A\x8A\x3A\x9C\x25\xBD\xA8\xA1\x95\xDE\x46\x9C\x19\xE6\x88\x25\x0A\x71\x61\x0C\x48\x8E\x15\xD3\xAA\x94\x89\xA8\x57\x68\x46\x4A\x72\x54\x39\xBC\x34\xC5\x72\x25\xD8\x41\xE8\x23\x58\x2F\x1E\x24\xFA\xB6\x9A\xD7\xE1\x21\x5D\x83\x9F\xAE\x93\x98\xC4\x8F\x73\x44\xA9\x80\x21\x25\x97\x0D\x73\x84\x28\x96\x6C\xA0\x08\x9F\xC3\x4F\x53\x0D\xCA\x12\x57\x10\x5E\xA1\xB8\x3E\x21\x2E\x71\x62\xDF\xF4\x43\xAC\x2F\x94\x78\x3F\x63\x0D\x65\xD8\x1B\xB0\xD4\x4F\x34\x13\x4C\x45\x85\xF8\xD3\x4D\x07\x63\x6F\x42\x0B\x42\x5D\x25\xE1\x62\xFF\x81\xFD\x11\x84\x22\x9A\xB9\x52\x71\xDD\x5A\x86\xBD\x15\x85\xD6\x13\x9C\xB7\x51\x10\xAE\x81\x52\xEC\x10\xCC\x51\x41\x45\x73\xBA\x14\x7B\x1B\x0E\x54\x81\x39\xA1\x20\x92\xE2\x10\x0D\xD8\x61\x68\x96\x98\x7A\x95\xF0\x53\x8A\xBD\x03\xFB\x4B\x71\x49\x8A\x96\x0A\x91\x32\xEC\x5D\x68\x13\x19\x45\xB4\xC3\xA5\x06\x95\x0D\x73\x88\x69\x65\xD8\x7B\xD0\x2E\xC2\x19\x4F\x00\x27\xA9\x52\x96\xC3\x39\xA2\x6C\x98\x43\xC2\xC2\x8E\xC0\x42\x94\x2A\x51\xA4\x49\x7E\x5F\x9A\x92\x62\xE9\x01\x9C\xF3\x36\x7E\x00\xEF\xD3\xAF\xA0\x49\xDF\xAF\x93\xDD\x84\x34\x61\x19\xE8\xA9\xC3\xBD\x4D\x1E\x56\x90\xE9\x82\x68\xE9\xA3\x0A\x25\xDE\xAE\xEA\x5B\x29\xF2\x21\x6C\x4F\x8B\xA1\x79\x44\xE5\x2F\x2A\x7C\x4D\x4F\xA2\x14\x81\x69\xE2\xA1\x31\x1B\xA9\x52\x5A\x4E\x37\x11\x2D\x7D\x34\x27\x85\x31\x17\x18\xAC\xAC\xAA\xAC\xCA\x11\x47\x08\x87\xD7\xF9\x7F\x79\xFA\x94\xF5\x87\x28\x9A\xF1\x11\x0C\x21\x2C\xF7\x83\x22\x35\x04\x93\x98\x88\x5F\x9A\x56\xA1\xF6\x58\x95\x55\xC3\x1C\x1C\x13\x22\xBA\x6D\x0D\xB1\x3C\x84\xFB\xFF\x37\x1B\x23\x92\xE6\xE1\xE8\xEE\x1B\x94\x48\x64\xFC\x12\x94\x4B\xB2\xA5\x9E\xA4\x7C\x0A\x7A\x4B\xA3\x7F\xCA\x73\xF5\x23\xB8\x57\x17\x2B\x27\x88\x93\x49\x38\xE9\x67\xA5\x4D\x5B\x51\x5F\x29\xDB\x92\x61\x8E\x7A\xDC\xCF\x12\x8A\x76\xF7\x59\x55\xD8\xC7\x70\xA7\x51\xD3\x93\xFF\xBF\x0A\xC1\x61\x0E\xC1\x7A\xFE\x4D\x16\xFE\x26\x0B\x7F\x4D\xB2\xD0\xE3\x21\x02\xD2\xD6\x60\x88\x22\x97\x87\xC4\x2A\x3D\x38\xD3\xC0\x26\xA6\xE0\x14\xC9\x42\x47\xBC\x50\xFE\x05\xA8\x28\x29\xC1\x3E\x81\xC7\xE1\xAF\x69\x66\x55\x54\xD0\x41\x49\xDA\x96\x62\xF1\x7A\x4F\xFA\x3D\x0A\x6B\xBB\xE5\xE9\x6E\xCB\x18\xE6\x48\x52\xEA\xA7\xF0\x9D\x94\xDF\x44\xE7\x6F\xA2\xF3\xD7\x23\x3A\x19\x42\xA8\xB9\x54\xBD\x3B\x71\x0C\xBE\xF7\x1B\x9B\xFF\xC6\xE6\xBF\x22\x36\xF7\x78\x12\x32\xFA\x71\x69\x4F\x98\x21\x1A\x88\xE6\xE8\x6D\x9B\xCF\xA4\x9D\x72\x09\x28\xEE\x53\x7D\x0E\x73\xBC\x74\x20\x48\xFA\x55\x9B\xA6\xD8\x17\xD0\xCA\x10\x21\x96\x28\x65\x88\xF2\x8A\x8A\x39\x53\xCB\xCB\xB0\x2F\xE1\xA7\x19\x3F\xE7\xDA\xA0\x4F\x3A\x65\x98\x23\x4A\xF0\x4F\xDC\x1F\x39\x05\xE4\xF5\x38\xE1\xFB\x88\x86\xFF\x99\x66\x8B\xB2\xA3\xCF\xA6\x75\x2F\x05\xD1\xAF\xBD\x93\xA3\x32\xED\xD4\x75\x74\xEF\x04\x64\x9F\xF7\x82\x62\x37\xA9\x8F\xDA\xDE\xB7\x5B\xD2\x5F\xC1\x48\xA6\x66\x51\xF5\xFF\xA9\x00\xFE\x09\x7B\x33\xBF\xC9\xE1\xDF\xE4\xF0\xAF\xA2\x93\xFF\xAF\xCB\xE1\xE4\x3B\x59\x7D\x3E\xF2\x1A\xD1\xD9\x67\xCD\xEC\x53\x75\x70\xEA\x1B\x54\x52\x82\x7D\x0D\x4F\x8F\x2D\x18\x04\x4D\xEA\xC1\xFD\x7E\xF9\x3A\x0C\x1B\xAA\x13\xD7\x10\xDF\xC0\xDF\xA7\xFF\x26\xD9\xFF\x27\x85\xCE\x6F\x92\xFD\x37\xC9\xDE\xE7\x92\x3D\xE1\xFE\xC3\xB7\xF0\x0F\xBF\x09\x85\xFF\x4D\x7E\xFD\x4D\x28\xFC\x26\x14\x4E\x81\xB9\x97\x50\x2C\x7C\x07\xD3\x48\x16\xF7\x07\x1B\xF1\xEF\x85\xBF\xC4\x0B\xE6\x3F\x08\x7F\x89\x17\xD3\x7F\x84\x29\x7E\xBA\x61\x84\xFB\x04\x34\xB2\x54\x90\x21\x29\xAE\xDE\x13\x86\x19\x9E\x15\x72\x2C\x02\x4D\x1E\x3A\xC4\x79\xEA\x42\xF5\xF5\x04\xD3\xAA\x33\x8A\x31\x2A\xE4\xF7\xAF\xD1\x65\x7A\x28\x8E\xC6\xD1\x7B\x8C\xB5\xBA\x14\x4F\x3D\x47\xE3\xEB\x74\x29\x1E\x82\xA3\xF1\xF5\xBA\xB4\x7A\xA9\x88\xCB\x74\xA9\xF5\xE2\xDB\xAB\xCB\x75\x29\xF5\xC1\x10\xE7\xBD\x42\x97\x22\x3E\xA9\xBD\x52\x97\x2A\xBD\xA2\xDA\xA0\x33\xD4\x33\x04\xB1\x51\x97\xEA\x15\xE3\x57\xE9\xD2\xD0\x43\xAA\xAB\x75\xA9\x2C\xC7\x70\xB4\xFF\x1A\x5D\x6A\x80\x08\x78\x1B\x99\x6B\xA5\x3F\x02\xC1\xEB\xA4\x3F\x82\x2D\x9B\x74\x69\x01\x22\x10\xA0\x57\x10\xBF\x17\x41\x2C\xC1\x5D\x2F\x66\xF3\x13\xD4\x1F\x74\x99\xE2\x03\xA7\x3A\x9C\x25\x70\x9F\xEF\x06\x5D\x96\x32\xCA\xD1\x37\xEA\xCC\x22\x80\x6D\xA4\x19\x0E\xF7\xF9\x36\xA3\xB8\x80\x40\x52\x5E\xE6\x26\x45\x7E\x36\x54\xB7\x45\x67\x55\x46\xEB\x19\x3A\x70\xB3\xB2\x84\x40\xC8\x7F\x8B\x22\x47\x20\xE4\xBF\x55\x99\xEC\x23\x57\xDC\xA6\xB3\x44\x93\x7D\xE4\x8A\x00\xED\xFB\xA3\x02\x22\x3D\x58\xBD\x5D\x0B\x61\xEE\x40\x10\xB9\x12\x8F\x77\xAB\xAE\x9F\xE2\x69\x97\x97\xA6\x38\xA2\x99\xBB\x53\x97\x85\x9E\x77\x21\xC0\x5D\x3A\x73\x3D\xC5\x79\xBC\x34\xB5\x82\xF6\x87\x38\xE2\x6E\x5D\xB6\x10\xC7\x43\x1C\x1D\x85\xDD\xA3\x33\x78\x19\x6E\xE4\x9F\x74\xE9\x42\x92\x8F\xAC\x2F\x6F\xD3\xA5\xB1\x9C\xCF\x53\x4F\x71\xBC\xCE\x24\xFC\x45\x52\x2B\x84\x58\xBB\xCE\x5C\x4F\x87\x18\x0F\xCB\x11\x41\x21\xDE\xA1\xCB\x16\x53\x94\xB0\x4E\x9D\xF8\xE4\x6C\x39\x4B\xC8\x4F\x0A\xB7\xE9\x32\x04\x08\x47\x7B\x58\x2F\x79\x2F\x1A\x00\x21\x26\xA6\xDE\xA7\xCB\x5A\x49\x33\x0A\xC0\xFD\x3A\xA3\x80\x20\x8E\xFE\x03\x52\x56\x89\x29\xB6\xEB\xC4\xB7\x6B\x88\x31\x1E\xD4\x65\x4A\x1D\x22\x61\x3E\xA4\x4B\x17\xAB\xA5\x88\x95\x0F\xEB\xB2\xC5\x94\x95\xA4\x78\x6F\x95\xF6\xF8\x5A\xA8\x47\x24\xAA\x50\x5E\x01\xF2\xA8\x04\x11\xEB\x16\x5F\x39\xB1\x8F\x49\x95\x05\x58\x01\xF6\xB8\xD4\x93\x24\x4B\x52\xF5\x24\x45\x72\xC4\x0E\x89\x2C\x92\xA5\x70\xEA\x09\x39\x51\x7C\x7E\x4C\xAE\x20\xFE\x2C\x65\x25\xD9\xE5\x14\x4E\xFD\x45\x67\x96\x22\x6C\x90\xF0\x92\xB8\x7F\xA7\x9C\x78\x31\xC1\xD0\x4F\x4A\x74\x0A\x13\x74\x97\x84\x17\x22\x29\xCE\x23\x24\xED\xD6\xA5\xA1\xA7\x78\x5D\x52\x4B\x97\x33\x04\x4B\x5E\x4C\x3C\x25\xE1\xB1\x04\x27\xD1\xF9\x34\x6A\x39\xC1\x79\x84\xEA\xFE\xAA\xB3\xCA\x51\x99\x9C\x67\x62\xA0\x20\x2D\x5D\xE2\xDF\xA3\xCB\x91\x41\xBE\x16\x0A\x0F\x90\x5E\xF1\x0D\xF2\xDE\x68\xD9\x9E\x7A\x3F\xDE\xC0\xEE\x93\xE2\xCB\x45\x0E\x22\xEA\xEB\x9F\x95\xDB\x82\x1E\x96\x3E\x27\xB5\x9C\x25\x38\xD4\xB8\xBF\x45\x89\x11\x5F\xEA\xFE\x5D\x1E\x7D\x71\xF8\x05\xEA\x9F\x47\x25\xD6\x93\x14\xEE\x27\x2F\x26\x5E\x40\xD3\xA3\x9E\x6C\xF6\x50\x38\xF5\x0F\xC4\x0F\xDE\x46\xC2\xDB\xE4\x21\x9A\x83\x2F\xEA\xFA\x4B\x25\x88\x7C\xEF\x21\x29\x71\x05\xBB\x5F\x97\x21\xD4\xC1\x31\x64\xD0\x4F\xFC\x53\xAA\x53\x64\x31\x72\x64\xF9\x4B\xA8\x88\xE5\x0D\x04\x27\xF6\xE6\xBF\x50\x15\xE2\xA3\x69\xCA\x77\x00\xA1\x37\x48\xE8\x2F\x4B\x03\xB9\xDC\x4B\x07\x5B\x5E\x41\x1D\x2D\x4D\xBB\x7F\xEB\xF2\xC4\x7C\x78\x30\xE8\x6F\x91\xDE\xC7\x7B\x88\x66\x2F\xC1\xB2\xAF\x22\xEE\xF2\x06\x82\xAF\xE9\xD2\x25\x82\x03\xC1\x83\x8A\x99\xEE\x0D\x04\x5F\x57\x16\xC6\xBC\x81\xB2\xE0\x94\xEF\x4D\x39\x0B\x1E\xFC\x8F\x34\xC6\xCB\x69\xE6\x2D\x94\x79\x39\xEE\xF3\xB1\xA1\xBA\x43\xCA\xCC\xD4\xDB\x28\x73\x33\xCD\x1C\x96\xCB\xA9\x63\xDF\x91\x18\x69\xB9\xF8\xEE\xFE\x5D\xD4\x8C\xA0\x3F\xC4\xBE\x27\xE3\xF8\x7C\x47\x90\x80\x42\xC5\x7A\x48\xAA\xFE\x7D\x94\xCA\x86\xEA\x3E\x40\x7F\xFA\xC8\x15\x1F\xEA\x8C\x1E\xF9\xEF\x8F\x74\xD9\xD1\x86\x78\x28\x49\x2A\x7D\x2C\xD3\x27\x45\x3F\x91\xD1\x03\x21\xFF\x51\x5D\x96\xA7\x49\x10\x42\x2B\x69\xA6\x49\x18\xE3\x4F\x05\x00\xCE\xE0\x1C\x1B\xAA\x13\x96\xAE\xDE\x63\x3A\x93\x34\xC8\x94\x20\xAC\x8E\xEB\xFA\xA9\x93\x05\xF8\x67\x88\x94\x40\xC8\xFF\x39\xEA\xF7\xE5\x52\x97\x73\xB4\xF4\x8C\x99\xC1\xFD\x5F\x20\x24\x86\x08\x7C\x29\xCD\x6A\x15\xCE\x57\x92\x0C\x54\xC1\x9A\xBF\x46\xBD\xC4\x31\x21\xCA\xFB\x0D\x8A\x88\xBE\x0C\xBE\x95\x47\x9E\x20\xFD\xDF\x21\x46\x26\x2F\x26\x48\x4A\x68\xF7\xF7\x88\x73\x89\x66\xF1\x55\x7D\x68\xC4\xE8\x1F\x10\x04\xBD\xB3\x0F\x8D\x18\xFD\x23\xE2\xB4\x06\x82\x6B\x22\x18\x8A\xF0\x9F\xD0\xE5\x88\x00\xF1\x51\x8B\x20\x3D\x83\x38\x83\x07\xD8\x30\xEA\x4F\x11\xBC\x72\xA4\x68\x83\x47\x74\x99\x2C\xD9\x1C\x15\x93\xAD\x7A\xAB\x20\x3A\x95\xA0\x35\xFA\x2C\xF1\x3D\x45\x90\x66\x09\x4F\x90\x5E\x59\xBE\x56\x6F\x65\x45\x27\xBB\x22\x58\x04\xAD\xD3\xF7\x63\x57\x8A\x76\xB3\xBF\x9E\xA1\x57\x4A\xC0\xF5\xFA\x21\x4A\xF3\x83\x22\x9A\x39\x4F\x90\x21\x03\x84\xC6\x12\xB8\x4C\x6F\x97\xEF\xA9\x38\x28\x62\xA5\x26\xF5\x72\x7D\x6E\x34\xD5\x47\xF8\x09\x8E\x28\x15\x37\x93\xB1\x2B\xF4\x03\x95\xD9\x16\x5D\xA4\xC9\x78\xA5\x3E\x5F\x93\x71\xD1\x45\x28\xEB\x06\x7D\x3F\x8F\xC7\xDB\x8C\x7B\x82\x21\x86\xF0\xAC\x20\x19\x2E\x84\xFB\x37\xEA\x27\x2B\xC9\x55\xDA\x44\x1E\x2F\x1D\x08\xD0\x54\x25\xBA\x99\xE3\xF1\x70\x8D\x0C\xBD\x12\x3D\xFA\x92\x7C\x01\x94\xA2\xDB\x33\x57\xE9\x9F\x82\x7D\x62\x58\xA9\x0B\xEB\xDB\xF3\x86\xAB\xF5\xBB\xFA\xAA\x11\xF2\x90\x8C\xEF\xDB\x16\x5C\xA3\x3F\xDC\x37\x2D\x28\xE9\x9B\x33\x7C\x01\x8E\xB3\x02\xFB\x7A\x28\xDA\x83\xFB\x49\x9C\xAD\x94\x2F\xFF\x26\x7F\x61\x73\xAD\x9E\xE9\x23\x6A\x24\x15\x5F\xAA\x79\x31\x25\x24\x63\xD7\xF5\x59\xA5\xD2\xFB\xC8\x84\x95\x6E\xD2\x07\xFA\xAE\xA5\x04\xB3\x42\x2B\x93\x7E\xAF\x6F\xE9\xDB\x46\x76\x33\xAA\xD7\xEB\x97\x9D\xB2\xBA\xB1\x3F\xE8\xEB\xFB\xA8\xB2\xE8\xB3\x20\xB1\x3E\xEC\x06\xFD\xFA\xBE\x92\x29\xD1\xA9\x43\x34\x73\x04\x43\xE1\xFE\xEE\x7A\xF7\xC6\xBE\x1B\x59\x91\x88\xEE\xEA\xDE\xDC\x77\x23\x1B\x57\x37\x76\x93\xFE\xF6\xBE\xEA\x70\xF4\x96\x5A\x3B\x53\x13\x4C\xDC\x24\x1D\x21\x81\xB7\xE8\x27\x29\x35\xAE\xF4\x2C\x4E\xB2\xF6\x7A\xAF\x70\x6F\xD6\xE3\x9A\xE7\x75\x1E\xF1\x29\xAC\x6A\xEB\xC3\x4F\xB0\x6C\x25\x49\x71\xC3\x1C\x24\xC5\x55\x95\x88\x3F\x65\x55\xA5\xC2\xFF\xA5\xBF\xD5\xFB\x45\x1A\x74\xEC\x16\xFD\x25\x51\xFA\x49\x8A\x8B\xAB\xEB\xCC\x93\xAD\x2C\xFE\xFF\xDD\x55\x7F\xAB\x7E\x45\x1D\x4D\xFB\x55\x38\xD2\x73\x46\x92\xA6\x44\x02\x3C\x24\x25\x8C\x89\x60\xE4\xFC\x9C\xED\xBE\x4D\x3F\x4D\x99\x2C\xAC\x77\x3D\x34\xE5\xD5\x0C\xBC\x63\x05\xED\xC7\x39\xD2\x4F\x94\xA0\x73\x7E\xE9\xC7\x51\x5A\x86\x21\x7B\x0B\xFB\xA3\x7E\x88\xB8\x2D\x34\x89\xE4\x6A\x39\x9C\x23\x2A\x2A\x6A\x42\x6C\x63\xA9\x58\x95\x92\x67\x6F\xD7\x97\x69\xF0\xE6\x32\x2D\xB5\x04\xCE\x78\x13\x20\xDF\xA1\x6F\xD2\x20\x23\x4C\x11\x5A\x2B\xB2\x7A\x0D\x49\x78\x89\xE8\xB9\x59\xD2\x04\xA1\x7B\xE5\xFF\x6B\x91\xA4\x9E\xC2\xB6\xEA\xAF\x86\x62\x52\x0D\x43\x37\xC8\x55\xC9\x55\xFF\x17\x55\x2A\xCA\x9A\x48\x79\x1B\x69\x46\x05\x9A\x89\x73\xDE\xC6\xF3\x48\xCA\x97\x94\x9E\x3B\xF5\x03\xC5\x94\xC9\xD2\xA5\x3D\xA6\xA2\x62\xA2\x30\x51\xCF\xA1\x58\xF1\x3D\x2C\x76\x97\xDE\xA6\x49\x5F\x25\xFF\x59\x8A\xDD\xAD\xC7\xA4\x6D\x3A\xA2\x81\x68\x0E\x56\x54\xCC\xC7\xFD\x4D\x04\x53\x29\xC2\xA6\x31\x78\x43\x55\x45\xC5\x2A\x09\x56\x8A\xDD\xD3\x5D\x41\x7F\xD2\x97\x6A\x12\x27\xE3\x5C\x69\xB4\x1C\x44\xBD\xF0\x27\xD6\xA6\x1F\xAC\x41\xAD\xE5\x70\x46\x89\x2B\xF4\x3F\xC6\x23\x56\x89\xA1\x9D\x1F\x22\x58\x2E\x0E\xAF\x5D\x5F\x12\x57\x73\x90\x0B\x31\x84\x12\x53\xE8\x88\x0E\xFD\x2C\x0D\xE2\x05\x94\x78\x25\xD3\x37\x27\x44\x11\x93\x5A\x38\xA2\x56\xF4\x59\x12\x63\x6A\x49\x3C\x6A\xA2\x12\x73\x08\x05\x76\xEA\x3F\x31\xC4\xED\xAB\xFE\x84\xEB\x37\xEA\xC9\x24\xC9\x4E\x61\xFE\xFD\xB4\x6B\x8B\xC9\x4B\xED\xF1\xD8\x23\x51\xD6\x9F\x5A\x95\x78\xD4\x10\x8F\xD8\xFB\x63\x82\xBE\x20\x2A\xBA\x35\xDF\x3B\xC2\x7A\xB7\xAD\x9E\xAC\x62\xC5\x25\xB4\x04\xD5\x95\xC6\xC3\xE4\xB5\xCC\x36\xFD\x47\xA9\xB1\x0A\x7F\xF6\xDB\x5F\x3D\xB3\xDF\x4F\xB8\xB4\xF5\x1B\x17\xFE\x4F\x72\x61\xF2\x2B\x38\xDD\x37\x30\x28\x28\xA0\x95\xA4\x68\xA0\x51\x2C\xC7\x84\xBC\x9C\x87\x4B\x70\x2D\x85\x0B\x05\xFD\x89\x68\x90\x5F\xF0\xC7\xE3\x56\x39\xBA\x99\x02\x7D\x41\x42\x49\x02\xEC\xAA\x92\x12\xEC\x5E\xFD\x50\xAD\x5E\xF5\xF9\x24\x0D\x31\x87\xF0\x86\x18\x96\x5C\x41\x94\xCA\x36\x12\x76\x5F\x9C\x16\x9A\x46\x52\x3E\x41\xA9\xCC\x11\xBD\x43\x44\x11\xEF\xD7\x3B\x35\x88\x51\xA4\xA9\xC2\xF8\xC7\x30\x1F\xD0\x9F\x11\x4F\x80\xA0\xAA\x44\xE4\x0B\xE6\x4E\x1B\x13\xC5\x45\xAA\x70\xBB\x7E\x44\x37\x39\x66\xE0\x1C\x49\x8D\xD0\xE6\x79\x50\x7F\x5A\x7C\x1E\xCF\x18\xB7\x67\x84\xBB\xBE\xBE\xBE\xBE\x14\x7B\x28\x0E\x61\x32\x1D\x6C\x89\x29\x57\xEC\x61\x7D\xB9\x56\x9F\x37\xD2\x0C\x37\x8F\x64\x49\xA4\xAD\x25\xEB\xA2\x4C\x69\x04\x3C\xA2\x3F\x4B\x93\xA9\x86\x21\xBA\xCD\x22\xD1\x5B\x86\x3D\xAA\x2F\xD2\xE4\x9C\x21\x79\x5D\x2C\x8D\x35\xEA\x31\xFD\x4C\x6D\xE9\x34\xDB\x3D\x45\x89\xFF\x44\x36\xD6\xE3\xFA\x33\xE3\xFA\x40\x7A\x22\xA1\x2E\x4D\xB2\x0D\xA2\x2C\x86\xED\x40\x5C\x71\x0E\x2B\x99\x76\x82\xA1\xA3\xA6\x00\x55\xF0\x04\xB2\x89\x64\xC4\xA9\x94\x2F\x11\xDA\x9F\xF5\xE7\xF5\x64\xAE\x09\xA0\x73\x28\xB4\xFC\xED\xB6\x2B\xB1\xBF\xE8\xF3\x35\xAD\x9A\x42\x73\xA2\x2D\x86\xED\xD4\x17\xC7\xB3\x33\xC9\x36\xAA\x4A\xC4\x9E\x44\x26\x9A\x4C\x0E\xC2\x9E\x4B\x0B\xB6\x6B\x69\xAC\x1B\x76\xE9\x87\x25\xC6\x9B\x43\xAC\x20\x18\x96\xD0\xA0\xEF\x3E\x09\xA3\xB4\x4B\x3F\xBD\x27\xDC\xDE\x32\xD6\x53\xFA\x25\x3D\x16\xF5\x93\xB8\xE8\x69\x34\xA1\xBB\xA9\x40\x33\xB5\xFE\xAA\x7F\x4B\xFF\x9B\xB2\xFD\xB5\x28\xDB\x84\xD7\x1F\x9E\xD1\xBF\xFD\xDB\x10\xFF\x6A\x86\x38\xC9\x1D\x97\x3D\x7A\x36\x56\xA6\x8F\x58\x1E\x22\xA4\x59\x2F\x48\x00\x71\xDF\x40\x21\x04\x12\x93\x94\x14\x5D\xAE\x17\xF7\xF9\x24\xDF\x4B\x5E\x3C\x88\x7B\x49\xAE\xA5\x14\xDB\xAB\xBF\x41\xF5\x14\x5E\x74\x1D\x8A\x2E\xCA\x24\x2F\xB0\xEC\x24\x09\x28\xD3\x6C\xF2\x26\x47\x94\xD7\x31\xFB\xF4\xD7\xFD\x4F\xD1\x55\x52\x82\x3D\x7B\x8A\x48\x2A\xE9\x3D\x49\xCF\xE9\x6F\x3C\xB5\x24\xD5\x33\x34\xC5\xF5\x62\xF8\xFE\xA6\xDF\x74\x6A\x87\xAF\x27\xC2\x4A\x4A\xB0\xBF\xEB\x2B\x44\xC0\x94\x69\x13\xC5\xFF\x95\x46\xB7\xD0\xCA\x92\x6D\xA7\xC5\xEC\x8D\xE7\xF5\xD6\x58\xE6\x55\x42\x6E\xEC\x05\xB4\x57\x24\x82\xE6\xD3\x4C\xD3\xF2\xB9\xF4\x64\x71\xAF\x48\xB1\xCD\x17\x4B\x94\x2B\x51\x41\x94\x3B\xC5\xD8\x3F\xF4\xAE\x18\x86\xB2\xA4\xE8\x16\xAC\x26\xC3\x8B\xFA\x6F\xE0\xC9\xAE\xEE\x63\x35\xD4\x22\x67\x74\x3D\x6D\x1C\xC5\xE7\xD0\x40\xA6\xE3\x6C\x63\x1C\x50\x5C\xA8\x74\x33\x9E\x8A\x02\x63\x7B\x1D\x71\x29\xA5\x5A\x88\xCC\x60\xFB\xF5\x07\xB4\xCE\x9C\x7E\xBE\x9E\xE8\x61\x0F\xE3\xD4\x74\x48\xF2\x65\x77\x8F\xF5\x97\x45\x9D\xDF\x25\x49\x48\x9A\x03\xFB\xA7\xDE\x2D\xED\x21\x8B\x7E\x0B\x6B\x09\x6E\x6E\xA5\xD0\x2B\x55\x15\x15\xE7\x88\x87\x08\xD1\x65\x43\x6C\x8B\xF3\x25\xFD\xD8\x58\x69\x13\x7D\xBE\xB9\xF4\xF9\x21\x22\x94\x88\xFF\xE3\x39\xF8\x5F\xFA\x79\x31\xB4\x39\x21\x4A\xC4\x9C\x4D\x09\xCB\xDD\x5E\xCD\x9F\xB8\x12\x65\x43\xFD\x00\xB2\xA3\xE5\x82\xC5\x56\xC6\x15\x2C\xF7\x9F\x40\xCA\xCB\x68\x45\x21\xE5\x20\x58\x82\x13\xA7\xA0\x02\x7B\xCE\xFC\x19\xB4\xB7\x89\x60\xCA\xB0\x57\xD0\x62\x45\x2A\x44\xDC\xDD\x9F\x36\x6D\x5A\xA9\x16\x56\x23\xDE\xA4\x29\xC3\xFE\xAD\x9F\x25\x9E\xC9\xC4\xD2\xCF\xA1\xFC\x24\x45\xF8\x24\xB4\x19\x34\x1D\x94\x0E\xF0\x91\x13\xAF\xA8\x2F\xAF\x64\x05\xBE\x9A\xA8\xFE\xB9\x49\xD1\x5F\xD3\xCF\x3C\xC9\xFA\x45\xC3\x27\x69\x79\x07\x13\x54\x3F\x37\x79\xF3\x5F\xEF\x7D\xF5\x42\xBD\x3D\xB6\xFE\x8D\x44\xD5\x27\x6F\xFD\x9B\xFA\x19\x27\x57\x7D\xF7\x8D\xFF\x4F\x7C\xED\x73\xBB\x19\xFB\xB7\x7A\xD1\x78\x45\xAB\x7B\x6C\xFC\xA1\x44\xD5\x27\x6F\xFC\xDB\xBD\x68\xBC\xAA\xFA\xEE\x1B\x7F\x38\x41\xED\xDD\x8C\xFC\x3B\xBD\xAE\xBD\x57\x03\xFF\x6E\xA2\xDA\x93\xB7\xFD\x3D\xFD\x79\x27\x55\x7B\xF7\x4D\x3F\x82\x24\xA3\x24\xE6\x28\xDC\xDF\x72\x31\xA1\x3C\x43\x4C\x90\xE5\x7D\xFD\x1A\xA8\x4D\xFB\x89\x07\x8E\xB1\xFF\x97\xA9\x0E\x38\x65\xE9\xA8\x95\xDB\x65\xD8\x07\x68\x33\x51\x32\x6E\xCE\x26\x38\x95\xE9\xA3\x30\x77\xB0\x0F\xF5\x2B\xE3\x4E\xC7\xA3\xC7\xB8\x9E\x20\x43\x37\xB7\x54\x6A\x77\x41\x7B\x59\x74\x45\xC5\x60\x8F\xBB\x44\x79\x92\x59\x56\x52\x52\xE5\xA8\x42\x67\xBE\x1F\xF5\x6D\xCD\x23\x92\xD7\xFC\x71\xDF\xD6\x5C\x9E\xBC\xE6\x4F\xF4\x83\x14\xD9\xA6\x88\x97\x0E\xA3\x65\x4D\x99\x36\xB1\x0C\x3B\xAA\xBF\x33\xFE\x14\x39\x8A\x72\xCA\x0E\x90\x11\xB3\x25\x61\xAF\x4F\xD1\xA1\x6E\xBC\xC1\xE0\x65\x08\x9C\x23\xA4\x0F\x5A\x88\xA7\xCC\xC7\xF4\xD7\xC7\x9B\xAB\xA7\xD2\xA4\x4A\xEC\xF2\x46\xB3\x02\xFE\x05\x0C\xBD\x84\x74\x7D\xA6\x2F\x10\xD1\x67\x09\xE8\xB3\x54\xCB\x15\xEC\x73\x95\x95\x36\x91\x61\xF0\x96\xCA\x18\xF2\xDC\x46\x86\xC0\x7D\x65\xB1\x9B\x79\x42\xE7\x7F\x81\x16\x30\x22\xC6\x2A\xA1\x3C\xEC\x4B\xFD\xEA\x18\x48\x34\xDC\xA4\x9C\xCB\x4B\x7B\x51\xB8\x6C\x7C\x89\xFF\x4B\xCA\x77\xAA\x6B\x3D\x71\x85\x60\x5F\xE9\x2F\xE8\x55\x3B\x6A\x89\x04\x86\x67\x82\x02\x65\x2B\xF6\x6B\xFD\xD2\x84\x7B\x29\x0A\xDC\x6E\x06\x48\x81\x95\x7C\xE3\xE4\x1B\xFD\x65\x30\x86\x5D\xCB\x11\xC1\x5E\xF6\xDA\x49\xF4\x6D\x6F\xBA\x15\xFB\x56\x5F\xAF\xA0\xA3\x0F\x2F\xB5\x7C\xA7\xBF\x22\x5E\x1C\xCD\x3A\xE5\xE2\x48\x20\xE5\x7B\xB4\xE4\x50\x76\x23\x49\x71\xC9\x39\x25\xEA\xF4\xBD\x04\xFB\x41\x79\xA7\x2F\xD1\xCE\x84\x72\x28\x7A\xC7\x25\x09\xB7\x91\x12\xF1\xE5\x8F\x7A\xEE\x94\xD7\x5D\x52\x82\x9D\xE8\x8B\x6A\x13\x6C\x53\x69\xAA\x0D\xEB\x2F\xEE\xC3\x6A\x15\x3B\x3E\x89\xBA\x3A\xA2\x0F\xF5\x61\x57\x27\xA9\xBC\xA4\x04\x6B\x35\x8C\xEE\x95\x48\x8B\xD3\x8E\x6B\x0C\xF3\x7B\x2B\x0B\xA7\x36\x93\x2C\x47\x52\x0D\x51\x4E\xEF\x5E\x1A\xAE\x35\xC4\x8B\x26\x69\xB6\xC4\x11\xB1\xCE\x30\x2E\xF9\xC4\x4A\x5C\xAD\x62\x6A\xAD\x37\x5C\x15\x2F\x21\x66\x53\x44\x0D\xCE\xB2\xBF\x80\x94\xB8\xCC\x90\xAB\xC0\x3F\x87\x95\x29\xC1\x2E\x37\x28\x6F\xBB\xA2\x2F\x2C\x68\xAE\x70\x25\x62\x08\x15\x8A\xC8\x0C\xE8\x43\x11\x4A\x4E\x5B\xC9\xE0\x41\x71\xEB\x49\x8D\x8E\xC4\x75\x55\xDC\xCD\xD7\x04\x58\x25\xD8\x15\x06\xB5\xA9\xC4\x31\x04\x52\x61\xD1\xA3\x7A\xF9\xF1\x6C\xF4\xE8\x9E\x60\xD9\x6E\xF8\x38\x8A\x27\xD2\xED\x23\x58\x8E\xA1\x5B\x4A\xB5\x75\x28\x76\xC6\x14\x15\xA1\x7D\x31\xEC\x4A\x83\x74\x77\x40\x3E\x9C\xAC\x11\x4D\xBE\xD8\x1D\x4D\x09\xAE\x3E\xFF\xDD\x60\x98\x9E\x34\xCF\x4C\xBC\xA5\x8E\x98\x2C\x7E\x4C\x4D\x1A\x3D\x05\x23\xAB\xF0\xA7\xF9\xF1\x06\x16\xDB\x68\xC8\x13\x93\x26\xB7\x78\xFD\xC4\x34\xDA\x2F\x5E\x5B\x10\xD9\xF6\x2A\x03\xD6\x2D\x61\x8A\xD3\x7F\xEC\x6A\xC3\xE9\xDD\xE2\x4E\xA1\xB9\x52\xEC\x1A\x43\x6D\x0F\x2D\x0D\x12\x38\x37\x5B\xD5\xD6\xD9\xC1\x1E\xF4\x2B\x76\xAD\x61\x2C\xC2\x0F\x12\x1C\xC9\x91\x34\x25\x1D\xEF\x26\x3D\x7F\x56\x6C\x68\x5D\x67\x98\x9C\x2C\x6F\x92\x03\xE7\x98\x79\x16\xDD\x3F\xC6\x36\x19\x46\x25\x29\x25\xE9\x7D\x0C\x21\xDB\xEF\x0D\x35\x3D\x76\x87\x54\x5C\x69\x2F\x8C\x42\xB1\x2F\xAE\x37\x4C\x50\x15\x89\x0E\xBA\x25\x61\xD3\xCD\x05\x85\xD8\x05\x8E\x3F\x18\x8A\x93\x97\x10\x3D\xFC\xBF\xC1\x30\x3E\x29\xE9\x53\xE8\x19\x44\x3D\x57\x83\x33\x04\x95\x4C\x3C\x61\x37\x1A\x4A\xBB\xC9\x3F\x8F\x60\x38\xD2\x8B\xFB\x27\xE1\x4C\x29\xB6\xD9\x50\xDE\x0D\xEA\x64\xDA\xEF\xC7\x83\x2C\xA1\x66\x19\xEC\x26\x83\xB3\x9B\x4C\x73\xC8\x86\x46\x44\x20\xB6\x45\xD3\x5E\x35\x26\xBA\x7B\x81\xDD\x6C\x70\xA9\xB0\xE6\x88\xDF\x70\x9A\x41\xE0\x3E\x92\x6A\x48\x34\x41\x6F\x31\x8C\x4C\x9E\x01\x4D\xCB\x78\x8E\xB8\xD5\xA0\xBE\x65\x32\x0D\x17\x84\xE8\x44\xBF\x38\x66\x22\x23\x28\xF3\xA8\x0C\xDA\x04\x33\xFB\x36\xC3\x06\x98\xF8\xBB\x37\x62\x8E\x69\x0C\x1E\x20\xBA\x91\x6D\x62\xBA\x66\xD3\x5C\xF3\x8D\x19\x15\x35\xD2\xDD\x76\x79\x7B\x5C\x03\xC5\xFE\x68\xB8\x5E\xD2\x62\x71\x6D\x3A\x27\x10\xF4\x57\x54\xCC\xA1\x43\x94\xAF\xFC\x64\xDA\x37\x2C\x49\xBB\x6A\x83\x7E\xD2\xDB\x5D\xC3\x24\x04\xF1\x5B\x4A\xB7\xF7\x86\xAA\x91\xA7\x9A\xAA\x3B\x7A\x43\xD5\x88\x53\x4D\xD5\x56\xC3\xD9\x92\xA2\x68\xC4\x99\xC9\x7E\x9C\x65\x27\x85\x48\xBF\x4F\xBE\xEB\x26\xE8\x34\xB1\x2A\xAD\xAC\x4A\xC0\x98\x77\x1A\xA4\x33\xC3\x89\x3E\x9F\xA0\x70\x08\x29\x77\x69\xC2\xD2\xB5\x22\x17\xBB\xCB\x40\x49\xF6\x87\x50\xDE\x05\xD2\xF7\x11\xCF\x66\xE8\x50\x30\x4E\xD8\x94\x75\xD3\x1D\xC9\xEA\x52\x64\x10\x84\x40\x88\x2D\xC3\xEE\x36\x5C\x28\x53\x7B\x81\xA2\xA2\x64\x99\x25\x9C\xE8\xF2\xBC\xDB\x8E\xB8\xC7\x40\x27\xD7\x00\xC2\xDF\x93\x27\x0B\xD5\xE0\x5E\x8E\x60\x12\x35\x4F\x52\x41\xDD\x9B\x7A\xEA\xC6\xFC\xC9\x30\x29\xD6\x7B\x53\xA5\x2F\xC2\x75\x5F\xB0\x2A\xBF\x34\x04\x6D\x86\x8B\x7A\x20\x5B\xEE\x9B\x1E\xC6\xA4\x2C\x71\x8F\xF3\x88\x3F\x92\x96\x5F\x43\x30\x7E\x89\xD9\xE2\xCA\xC7\xDA\xD1\x70\xA9\x32\xF7\x60\x27\x27\xE5\x10\x35\x5D\x1D\x86\xB2\x24\x7A\x7E\x95\x16\x54\x8A\x75\x76\xA7\x48\xC5\xDB\x66\x42\x57\x6E\xEB\x95\xBA\xBD\xD7\x50\x99\x1C\xAB\x67\x2B\xE7\x3E\xC3\xB4\x6E\xB2\x9F\x84\xA1\x73\xBF\xA1\xA1\x87\xDB\x34\xDA\x9B\x40\x49\xEF\x1B\x28\x6F\xEE\x24\xD8\x08\x7A\xC0\xB0\xB6\xD7\x57\x1D\x84\xA2\x7A\x77\xC9\x41\xC4\xEC\xFE\x76\x8A\x88\x22\x9B\x29\xDB\x0D\x97\xFE\x72\x44\x94\x94\x60\x0F\xF6\x5D\xFD\xDD\xDE\x87\x91\xEB\x7F\xC8\xB0\xAE\xCF\x46\xA1\xFB\xDB\x2F\xAA\x61\x78\xD8\xB0\xAA\xCF\x87\x21\x39\x15\x25\x25\xD8\x23\xC8\xC0\x47\x6B\x5E\x8A\xE5\x84\xFF\x93\xDC\x44\x3F\x57\xAA\x3E\x27\x57\x9D\xC3\x3F\x8A\xD6\xFD\xDA\x6C\xF1\x97\xD3\x13\x9D\xE2\x3F\x66\x38\x23\x61\x6E\xF9\x91\x55\x7C\x8E\xC7\x93\xD4\x37\x35\x10\xE4\x5A\xE6\x93\x3E\x0E\xED\x61\x8A\x71\x61\x61\xA5\xCA\xBD\xC3\x30\x28\x61\x6E\x71\x4B\x40\x14\x56\x4F\x18\x4A\x12\x62\xCC\xA2\x83\xAA\x6E\xC0\xFE\x6C\xB0\x27\x44\x9C\x86\x93\xFE\x52\xEC\x2F\x86\x6C\x45\xAA\x78\x05\x19\xDB\x69\xE8\xA7\x80\xAD\x92\x80\x4F\x1A\x06\x28\x80\xB3\x83\x1C\x19\x20\x2F\x26\x4A\xB1\x5D\xAA\x76\x8A\xAD\x49\xAC\x0D\x12\x6E\xED\xEE\x46\x22\x57\x18\x85\x99\x78\x30\x6A\xC5\xCC\xC4\x99\xA6\xD8\x1B\x80\x2E\x43\x61\x62\x2C\x82\x69\x20\x4A\xB1\xA7\x0C\x45\x09\x93\xE7\x10\x5E\xDA\x4F\x33\x62\x77\x3D\x6D\x70\x27\xC4\x11\xFF\x50\xBF\x75\x43\x2A\x17\xFB\xAB\xAA\xE3\x26\xD3\x81\x60\x88\x23\x50\xFE\x52\xEC\x19\x43\x8E\x22\x75\x9A\x1F\xE7\x38\x61\x09\xB3\xC7\x70\x97\x4E\x01\x17\xDA\x51\x1B\xF2\x7A\x09\x96\xA5\x19\x36\xF1\x26\x55\xA2\x1D\xF3\xB8\xA9\x13\xF7\x91\x4C\x04\x10\xFB\x28\xD1\x0C\x93\x64\x7A\x4F\x37\x3B\x7B\x57\x8A\xF0\x5F\xD2\xF3\xB1\x04\x54\xF6\x50\x5C\x19\xB6\x17\xD9\x5A\xF1\x9B\x70\x7E\x82\x4D\xD2\x4D\x02\xD7\xFA\x88\xE6\x79\xB8\x3F\xA4\xDD\x59\xEB\x19\x13\xDB\x67\xD8\xAD\x4B\xFC\x30\x5A\x3C\x40\x2F\x2D\x2B\xC1\xFA\xA2\xDA\x5E\xA0\xCA\x57\xC0\xBA\xC5\xE9\x4D\x39\x92\x85\xD1\x67\x4D\xC1\x9E\x35\xB4\x69\x59\x7B\x0A\x2D\x7E\x0F\x5A\x9A\x63\xC9\xD9\xF8\xFF\x1A\x7B\x3F\x67\xD8\x0B\x55\x42\x91\xE4\x66\x90\xD2\x43\xE4\x93\xE8\x0C\x8D\xD4\xEE\x46\x91\xC6\x90\x7E\xEE\x96\xFC\xCD\xE0\x8F\x97\x81\xD3\x49\x8A\x63\x4B\x7F\x46\x7A\xA3\x82\xFE\xEF\x86\xFB\x93\xEC\x92\xFC\x72\x9C\xD1\xCD\x96\x4B\x55\x29\xF6\xBC\xE1\x1A\x78\x2A\x7B\x48\xBC\xEC\xA1\xFA\x92\x59\x34\x49\x9A\xA8\x2F\xA0\x5D\x47\x59\xA1\x13\xF5\x64\xF3\x44\xAF\x97\xF0\x7B\xA6\x09\x56\xD5\x44\xCA\x37\x49\x30\x32\xC5\x5E\x4E\xEC\x8D\xE2\x1F\x86\x4D\xFA\x6E\xFD\x3B\x8C\xFA\x35\xC9\xCC\xFF\x3F\x64\xEF\x8B\x86\xBF\xC7\xAB\x2F\x8D\xD3\x8B\x91\xBF\xA6\x61\xE9\xDB\xEE\xDC\x6F\x38\xA0\x3B\x09\x1F\x22\xBF\xF5\x6B\x2F\xFB\xF5\x9F\x86\x89\xD2\xC6\xC5\xD4\x72\x69\x91\xD1\xD3\x5E\x8F\x80\x27\xAC\x25\x68\x8A\x95\x0B\x79\xC9\xB0\x4E\xAB\x03\x7A\x79\x61\xED\xA7\xD4\x8A\xAE\xEF\xC5\x2E\xAF\xFD\xCB\x50\x1D\x43\x9E\x33\xB5\xFC\xBF\x28\xF3\x80\x81\xF9\x09\x0D\x51\xBE\x0A\x45\xB2\x5D\x7D\xD3\x6F\xCE\xD4\x72\x24\xC1\x95\x74\xBF\x6C\xB0\xC6\xCA\x58\x25\x10\x8E\xBD\x62\x58\x97\x1A\x77\xD4\xAA\x04\xF4\xBD\x17\xE4\xF8\x47\x79\x01\x3C\x88\xEA\x45\xEE\x93\x4E\x85\xE7\xEB\x53\xDF\x50\xD1\x87\x50\x9F\xD4\x75\x12\xAE\xAE\x4F\x4D\xB3\x1D\xBD\x38\x77\xFF\x25\x06\x43\x3E\xDB\xFF\xB7\xE1\xB0\xA1\xB7\xD3\x40\x63\x17\xF6\xE1\x17\x2D\xE3\xE7\x81\xA6\xEE\x5F\x84\x2E\xD9\xF5\xD5\x7F\xC5\x63\x7D\x49\xE4\x4F\x60\xB2\xBE\xED\x3B\x99\xCB\x5E\x35\x70\x31\xF9\x3B\x85\x96\x36\xEC\x7A\xA1\x3B\xE4\xFB\x3F\x2A\x5B\xBC\x4C\x85\xC1\x20\x6F\x1D\x65\xCA\x03\x2D\xA4\x1A\x5E\x33\xAC\x88\x61\xF6\x50\x69\xF7\x7E\xEA\xE2\x49\x4A\x7C\x26\x85\x2A\x3E\x68\xF0\xC6\x72\xD4\xE0\x0C\x47\xE2\x7E\xB1\xFE\x59\x3D\xB7\x3A\x51\x37\x24\x6E\xE4\xEB\x06\xC9\xAD\x9D\x88\xD8\x48\xD3\x4D\xAC\xD0\xB9\xB3\x68\xAE\x51\x18\x3D\x15\x74\xDA\x44\x71\xBF\x58\x7C\xDA\x24\x3E\x72\x42\xC6\x53\xD2\x74\x59\x5D\xBF\x81\xEE\x29\x20\x44\xE9\xE2\xBB\x06\x77\x3A\x4D\x37\x95\x62\x6F\x1A\x2E\x3A\x09\x6A\xA4\xE7\x3B\x38\xE9\x0F\x31\x44\x3C\x2D\xCA\x54\x99\x92\xFF\x18\x86\x24\xA0\x44\x89\x29\xD1\xF1\x16\xDA\x7B\x8D\x5D\x6C\xD0\x5E\x7E\x88\xDE\xD8\x49\x70\xE0\x79\x08\x5D\x1E\x93\x93\x56\xA1\xEC\xD8\xDB\x86\xCD\xF1\x16\x98\x38\xBD\xEA\x70\x3F\x2E\xD8\x2E\x78\x3D\x47\x30\xC8\x60\xAE\x4C\x38\x09\x45\xBF\x85\x95\xF2\x73\xC3\xC4\x13\x55\x85\x33\x2C\xBE\xB6\x38\x1C\xEC\x30\xDA\x84\x96\x49\x3E\x87\xF2\x32\x44\x7D\x29\xF6\x8E\x61\xFE\x7F\x69\x69\x69\x0A\x8A\x33\x03\xDF\xD5\xD4\x38\x85\x90\x10\xDF\x33\x5C\x99\xA4\x8F\x18\xF1\x76\xC9\xA9\xEC\x95\x23\x9A\x81\x9C\x82\xA4\x23\xF6\x3E\xDA\x18\x97\x13\x90\x1F\x9C\xB9\x74\xEC\x2E\x1A\xF6\x81\x61\x8E\x0A\xA7\x96\xC3\x99\x1A\x7F\x88\x9D\xCD\x28\x1C\xCD\x25\x64\xA6\xE4\x47\xFA\xD8\x87\x86\x31\xEA\x24\x7F\x88\x4D\xE4\x2C\x24\x41\xD6\x8F\x34\x59\x63\x5E\xF1\x7A\xCC\xFA\x71\xF4\xF2\x99\x94\x14\xD7\x80\x6E\xF2\x7E\x62\x58\xAA\x4A\x92\xEE\xEC\xCD\x8E\xDE\xF7\x20\xBA\xEB\x88\x1E\x2F\x7C\x88\xB7\xB9\x8E\xCA\x2B\x0A\x55\x15\x27\x75\x8B\xE8\x53\xCD\x9D\xA4\x28\x75\xB3\x68\xE9\x7A\xCA\x49\x95\x76\x4C\x4B\x90\xD2\x63\x60\x4F\x3D\x26\xC9\xE5\xE3\x86\x29\x1A\x09\x14\x24\xF0\x5E\xF6\xB9\x62\x57\xEB\x33\x43\x95\x0A\x09\xDD\x66\x8C\xB1\x69\x4F\x4D\xF9\x3C\x7A\x77\x50\x4A\x9A\x45\xAC\xD4\xDC\x7B\x88\x46\xBB\x61\x82\x2F\xD0\xA9\x5B\x94\xF7\x56\xE2\x9A\xAB\x64\x5F\x6A\xEE\x9E\x49\xCE\xAA\x92\x36\xB7\x0C\xFB\x0A\xDD\xDB\x9C\x4B\x07\x7B\xC4\xFD\xDA\xB0\x28\xF1\x96\x61\x14\x3D\xF9\xDD\x57\x09\x43\x3E\xC3\x4F\xE0\x39\x1A\xFB\x46\xD5\x38\xE9\x1A\x45\x45\xC5\x5C\xA2\x99\x93\x97\x97\xDF\x1A\x6C\xEA\x4E\x0C\x05\x10\x4B\xB0\xA5\xD8\x77\x06\xF4\xD2\x6F\x32\x4D\xAD\x20\x18\x4E\x10\x24\xEC\x5C\x7A\x52\x0B\x47\xB0\xB1\x77\x08\xF2\x38\xF5\x89\x99\x55\x86\x7D\x6F\xD8\x00\x35\xFC\xB6\x3C\x44\x32\x84\x4F\xDA\xE7\xEC\x1B\x27\xEA\x9A\x07\x62\xF2\xE4\xC2\x7E\x30\xAC\xED\x8E\x98\x69\x34\x23\xEE\xBB\xF6\x2D\x51\xD8\x8F\x88\xC1\xE2\x6F\x82\x69\x21\xA5\xD8\x89\xA4\xB8\xF2\xAD\xB1\xD8\x6E\x72\x18\xD9\x16\x09\x71\x93\xCD\x2D\xF9\xFA\x15\x16\x31\x0C\x4E\x46\x15\x4D\x71\x38\x49\x89\x57\xD3\xB0\xD6\x14\x67\x12\x34\xE9\x8E\xE6\xC4\x3A\x5A\x72\x2F\x87\xAD\x49\x19\x94\x04\x73\x16\xD1\x20\x88\x67\x6C\x6D\xCA\x90\x24\x18\x67\x13\x5C\x8C\x60\x6C\x5D\x8A\xF4\xC4\x56\xC1\xDD\xD1\x9B\x43\x71\xB0\x52\x6C\x7D\x4A\x45\x32\xF4\x9E\x2F\xFB\x5C\x96\x72\x66\xB2\xCC\xDD\xDE\x46\xBE\x3C\x25\x4F\x2D\x88\xC8\x40\xD0\x4F\xD6\xB7\x94\x62\x57\xA4\x9C\xAF\x4A\x51\x7A\xCF\x92\x85\x40\xCF\xEE\xCF\xA2\x31\x51\x31\x5D\x99\x32\xAC\x17\x65\x46\x2F\x40\x6D\x48\x29\xD4\xE8\x31\xF1\xCC\x43\xA4\x91\x28\xC5\x36\xCA\x63\x4A\xE3\x7E\x82\xF5\x12\x72\x83\x63\x4E\xBD\x64\x79\x77\x55\xCA\xB8\x84\x98\xDD\x3B\xCE\x93\x73\x5F\x9D\xB2\x38\x61\xEE\x6E\xBD\x93\xF5\x22\x86\x86\xE0\x9A\x94\xCA\xC4\xCD\xC0\x29\x19\xD4\x1D\x75\xD7\xA6\x8C\x4F\x98\x7D\x0A\x9D\x30\x77\x9C\x70\xB9\x0E\xF5\xA2\x3C\xF2\xC9\x7B\x71\x13\xEA\x45\x2D\x66\xEF\x7A\xF1\xF7\x29\xD5\x09\x73\x77\xE7\x2E\x4E\xC5\x3D\x65\xD8\xF5\x68\x20\xE2\x8A\xF8\x79\x06\xE2\x0F\x29\x73\x13\xB7\x0F\xC5\x13\x9A\x1C\x3D\xDF\x7B\xBD\x21\x65\x54\xB7\xC5\x6A\x44\x9C\xDC\x5F\x37\xA6\x94\x24\x1C\xD7\x55\x6A\x40\x29\xB6\x39\x65\x56\x2F\x26\xD5\x49\x74\xF3\x4D\xA8\x66\x2D\xC1\xAB\xD4\x80\x52\x6C\xCB\xC9\x4D\xE7\x9B\x53\x96\xF5\x86\xD0\x9F\x67\x30\x6F\x41\x02\xB1\xFB\xCA\xE2\x99\xFC\xD6\x94\x96\x1E\x2E\x3B\xAA\x8A\xE8\xBD\x97\xA5\x98\xE1\x94\xF4\xFA\xE3\x6D\x29\x7F\x3C\x39\x4F\x4F\x72\x99\x27\xE1\xEC\x29\x9A\xA5\x17\xEE\xBA\xA2\xB8\xF2\x76\xC1\x1F\x53\xB6\xFC\x6F\x12\x58\x52\x82\xDD\x7E\x4A\x69\xEB\xD9\xB1\x98\x92\xB6\x3B\x52\x6E\xFF\x25\x68\x4B\xEE\xC8\x2B\x6E\x64\xB7\xA6\xDC\xFC\x4B\x8C\x6C\x2F\x28\x2C\x29\xC1\xEE\x4C\x91\xDE\xCA\x29\x76\xF8\x24\x27\x57\xA5\x1A\x8F\xF1\x8A\x6F\x5C\xDC\x95\x22\x3D\x4D\x13\x30\x38\x9A\x41\xAF\xDB\xD4\x8E\x3E\xB0\xBB\x11\x92\x90\xCC\xD1\x42\x4A\x69\xEC\x5E\x9E\x8C\x74\x0F\x32\x08\xEB\x43\x7E\xBF\x58\x8E\xEA\x21\xBE\x20\x6B\x80\xCF\x18\x64\x68\x5F\xC8\x4B\x30\x2C\xCC\x0C\x32\xB4\x97\x60\x59\xC2\x37\xBC\xAE\x05\x66\x5E\x50\x17\xA2\xB8\x90\xC3\xEB\xC7\xA9\x86\xF1\x23\x46\xB8\xDC\x2E\xF7\xF0\xA1\x43\xCB\xDD\xE5\xEE\x11\xEE\xF2\x11\xEE\xB1\xA3\xC6\x8E\x18\x35\x74\xC4\x59\xA3\xCB\x47\x8D\xAD\xF3\x8D\x3E\x6B\xE4\xF0\x11\xAB\x88\xE6\xE0\x88\x55\x51\x8C\xB3\xCE\x3C\x73\xE4\x48\xD7\x88\x11\x23\xFF\x5F\x00\x00\x00\xFF\xFF\x43\x04\x8E\x93\x3A\x2A\x06\x00")
diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm
new file mode 100644
index 00000000..3cc20eb2
Binary files /dev/null and b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm differ
diff --git a/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
new file mode 100644
index 00000000..4a8d096c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go
@@ -0,0 +1,1287 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package wasm contains an IR->WASM compiler backend.
+package wasm
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/internal/compiler/wasm/opa"
+ "github.com/open-policy-agent/opa/internal/ir"
+ "github.com/open-policy-agent/opa/internal/wasm/encoding"
+ "github.com/open-policy-agent/opa/internal/wasm/instruction"
+ "github.com/open-policy-agent/opa/internal/wasm/module"
+ "github.com/open-policy-agent/opa/internal/wasm/types"
+)
+
+const (
+ opaTypeNull int32 = iota + 1
+ opaTypeBoolean
+ opaTypeNumber
+ opaTypeString
+ opaTypeArray
+ opaTypeObject
+)
+
+const (
+ opaFuncPrefix = "opa_"
+ opaAbort = "opa_abort"
+ opaRuntimeError = "opa_runtime_error"
+ opaJSONParse = "opa_json_parse"
+ opaNull = "opa_null"
+ opaBoolean = "opa_boolean"
+ opaNumberInt = "opa_number_int"
+ opaNumberFloat = "opa_number_float"
+ opaNumberRef = "opa_number_ref"
+ opaNumberSize = "opa_number_size"
+ opaArrayWithCap = "opa_array_with_cap"
+ opaArrayAppend = "opa_array_append"
+ opaObject = "opa_object"
+ opaObjectInsert = "opa_object_insert"
+ opaSet = "opa_set"
+ opaSetAdd = "opa_set_add"
+ opaStringTerminated = "opa_string_terminated"
+ opaValueBooleanSet = "opa_value_boolean_set"
+ opaValueNumberSetInt = "opa_value_number_set_int"
+ opaValueCompare = "opa_value_compare"
+ opaValueGet = "opa_value_get"
+ opaValueIter = "opa_value_iter"
+ opaValueLength = "opa_value_length"
+ opaValueMerge = "opa_value_merge"
+ opaValueShallowCopy = "opa_value_shallow_copy"
+ opaValueType = "opa_value_type"
+ opaMemoizeInit = "opa_memoize_init"
+ opaMemoizePush = "opa_memoize_push"
+ opaMemoizePop = "opa_memoize_pop"
+ opaMemoizeInsert = "opa_memoize_insert"
+ opaMemoizeGet = "opa_memoize_get"
+)
+
+var builtinsFunctions = map[string]string{
+ ast.Plus.Name: "opa_arith_plus",
+ ast.Minus.Name: "opa_arith_minus",
+ ast.Multiply.Name: "opa_arith_multiply",
+ ast.Divide.Name: "opa_arith_divide",
+ ast.Abs.Name: "opa_arith_abs",
+ ast.Round.Name: "opa_arith_round",
+ ast.Ceil.Name: "opa_arith_ceil",
+ ast.Floor.Name: "opa_arith_floor",
+ ast.Rem.Name: "opa_arith_rem",
+ ast.ArrayConcat.Name: "opa_array_concat",
+ ast.ArraySlice.Name: "opa_array_slice",
+ ast.SetDiff.Name: "opa_set_diff",
+ ast.And.Name: "opa_set_intersection",
+ ast.Or.Name: "opa_set_union",
+ ast.Intersection.Name: "opa_sets_intersection",
+ ast.Union.Name: "opa_sets_union",
+ ast.IsNumber.Name: "opa_types_is_number",
+ ast.IsString.Name: "opa_types_is_string",
+ ast.IsBoolean.Name: "opa_types_is_boolean",
+ ast.IsArray.Name: "opa_types_is_array",
+ ast.IsSet.Name: "opa_types_is_set",
+ ast.IsObject.Name: "opa_types_is_object",
+ ast.IsNull.Name: "opa_types_is_null",
+ ast.TypeNameBuiltin.Name: "opa_types_name",
+ ast.BitsOr.Name: "opa_bits_or",
+ ast.BitsAnd.Name: "opa_bits_and",
+ ast.BitsNegate.Name: "opa_bits_negate",
+ ast.BitsXOr.Name: "opa_bits_xor",
+ ast.BitsShiftLeft.Name: "opa_bits_shiftleft",
+ ast.BitsShiftRight.Name: "opa_bits_shiftright",
+ ast.Count.Name: "opa_agg_count",
+ ast.Sum.Name: "opa_agg_sum",
+ ast.Product.Name: "opa_agg_product",
+ ast.Max.Name: "opa_agg_max",
+ ast.Min.Name: "opa_agg_min",
+ ast.Sort.Name: "opa_agg_sort",
+ ast.All.Name: "opa_agg_all",
+ ast.Any.Name: "opa_agg_any",
+ ast.Base64IsValid.Name: "opa_base64_is_valid",
+ ast.Base64Decode.Name: "opa_base64_decode",
+ ast.Base64Encode.Name: "opa_base64_encode",
+ ast.Base64UrlEncode.Name: "opa_base64_url_encode",
+ ast.Base64UrlDecode.Name: "opa_base64_url_decode",
+ ast.NetCIDRContains.Name: "opa_cidr_contains",
+ ast.NetCIDROverlap.Name: "opa_cidr_contains",
+ ast.NetCIDRIntersects.Name: "opa_cidr_intersects",
+ ast.GlobMatch.Name: "opa_glob_match",
+ ast.JSONMarshal.Name: "opa_json_marshal",
+ ast.JSONUnmarshal.Name: "opa_json_unmarshal",
+ ast.ObjectFilter.Name: "builtin_object_filter",
+ ast.ObjectGet.Name: "builtin_object_get",
+ ast.ObjectRemove.Name: "builtin_object_remove",
+ ast.ObjectUnion.Name: "builtin_object_union",
+ ast.Concat.Name: "opa_strings_concat",
+ ast.FormatInt.Name: "opa_strings_format_int",
+ ast.IndexOf.Name: "opa_strings_indexof",
+ ast.Substring.Name: "opa_strings_substring",
+ ast.Lower.Name: "opa_strings_lower",
+ ast.Upper.Name: "opa_strings_upper",
+ ast.Contains.Name: "opa_strings_contains",
+ ast.StartsWith.Name: "opa_strings_startswith",
+ ast.EndsWith.Name: "opa_strings_endswith",
+ ast.Split.Name: "opa_strings_split",
+ ast.Replace.Name: "opa_strings_replace",
+ ast.ReplaceN.Name: "opa_strings_replace_n",
+ ast.Trim.Name: "opa_strings_trim",
+ ast.TrimLeft.Name: "opa_strings_trim_left",
+ ast.TrimPrefix.Name: "opa_strings_trim_prefix",
+ ast.TrimRight.Name: "opa_strings_trim_right",
+ ast.TrimSuffix.Name: "opa_strings_trim_suffix",
+ ast.TrimSpace.Name: "opa_strings_trim_space",
+ ast.NumbersRange.Name: "opa_numbers_range",
+ ast.ToNumber.Name: "opa_to_number",
+ ast.WalkBuiltin.Name: "opa_value_transitive_closure",
+ ast.ReachableBuiltin.Name: "builtin_graph_reachable",
+ ast.RegexIsValid.Name: "opa_regex_is_valid",
+ ast.RegexMatch.Name: "opa_regex_match",
+ ast.RegexMatchDeprecated.Name: "opa_regex_match",
+ ast.RegexFindAllStringSubmatch.Name: "opa_regex_find_all_string_submatch",
+ ast.JSONRemove.Name: "builtin_json_remove",
+ ast.JSONFilter.Name: "builtin_json_filter",
+}
+
+var builtinDispatchers = [...]string{
+ "opa_builtin0",
+ "opa_builtin1",
+ "opa_builtin2",
+ "opa_builtin3",
+ "opa_builtin4",
+}
+
+// Compiler implements an IR->WASM compiler backend.
+type Compiler struct {
+ stages []func() error // compiler stages to execute
+ errors []error // compilation errors encountered
+
+ policy *ir.Policy // input policy to compile
+ module *module.Module // output WASM module
+ code *module.CodeEntry // output WASM code
+
+ planfuncs map[string]struct{} // names of functions inside the plan
+ builtinStringAddrs map[int]uint32 // addresses of built-in string constants
+ externalFuncNameAddrs map[string]int32 // addresses of required built-in function names for listing
+ externalFuncs map[string]int32 // required built-in function ids
+ entrypointNameAddrs map[string]int32 // addresses of available entrypoint names for listing
+ entrypoints map[string]int32 // available entrypoint ids
+ stringOffset int32 // null-terminated string data base offset
+ stringAddrs []uint32 // null-terminated string constant addresses
+ fileAddrs []uint32 // null-terminated string constant addresses, used for file names
+ funcs map[string]uint32 // maps imported and exported function names to function indices
+
+ nextLocal uint32
+ locals map[ir.Local]uint32
+ lctx uint32 // local pointing to eval context
+ lrs uint32 // local pointing to result set
+}
+
+const (
+ errVarAssignConflict int = iota
+ errObjectInsertConflict
+ errIllegalEntrypoint
+)
+
+var errorMessages = [...]struct {
+ id int
+ message string
+}{
+ {errVarAssignConflict, "var assignment conflict"},
+ {errObjectInsertConflict, "object insert conflict"},
+ {errIllegalEntrypoint, "internal: illegal entrypoint id"},
+}
+
+// New returns a new compiler object.
+func New() *Compiler {
+ c := &Compiler{}
+ c.stages = []func() error{
+ c.initModule,
+ c.compileStrings,
+ c.compileExternalFuncDecls,
+ c.compileEntrypointDecls,
+ c.compileFuncs,
+ c.compilePlans,
+ }
+ return c
+}
+
+// WithPolicy sets the policy to compile.
+func (c *Compiler) WithPolicy(p *ir.Policy) *Compiler {
+ c.policy = p
+ return c
+}
+
+// Compile returns a compiled WASM module.
+func (c *Compiler) Compile() (*module.Module, error) {
+
+ for _, stage := range c.stages {
+ if err := stage(); err != nil {
+ return nil, err
+ } else if len(c.errors) > 0 {
+ return nil, c.errors[0] // TODO(tsandall) return all errors.
+ }
+ }
+
+ return c.module, nil
+}
+
+// initModule instantiates the module from the pre-compiled OPA binary. The
+// module is then updated to include declarations for all of the functions that
+// are about to be compiled.
+func (c *Compiler) initModule() error {
+
+ bs, err := opa.Bytes()
+ if err != nil {
+ return err
+ }
+
+ c.module, err = encoding.ReadModule(bytes.NewReader(bs))
+ if err != nil {
+ return err
+ }
+
+ c.funcs = make(map[string]uint32)
+ for _, fn := range c.module.Names.Functions {
+ c.funcs[fn.Name] = fn.Index
+ }
+
+ c.planfuncs = map[string]struct{}{}
+
+ for _, fn := range c.policy.Funcs.Funcs {
+
+ params := make([]types.ValueType, len(fn.Params))
+ for i := 0; i < len(params); i++ {
+ params[i] = types.I32
+ }
+
+ tpe := module.FunctionType{
+ Params: params,
+ Results: []types.ValueType{types.I32},
+ }
+
+ c.emitFunctionDecl(fn.Name, tpe, false)
+ c.planfuncs[fn.Name] = struct{}{}
+ }
+
+ c.emitFunctionDecl("eval", module.FunctionType{
+ Params: []types.ValueType{types.I32},
+ Results: []types.ValueType{types.I32},
+ }, true)
+
+ c.emitFunctionDecl("builtins", module.FunctionType{
+ Params: nil,
+ Results: []types.ValueType{types.I32},
+ }, true)
+
+ c.emitFunctionDecl("entrypoints", module.FunctionType{
+ Params: nil,
+ Results: []types.ValueType{types.I32},
+ }, true)
+
+ return nil
+}
+
+// compileStrings compiles string constants into the data section of the module.
+// The strings are indexed for lookups in later stages.
+func (c *Compiler) compileStrings() error {
+
+ var err error
+ c.stringOffset, err = getLowestFreeDataSegmentOffset(c.module)
+ if err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ c.stringAddrs = make([]uint32, len(c.policy.Static.Strings))
+
+ for i, s := range c.policy.Static.Strings {
+ addr := uint32(buf.Len()) + uint32(c.stringOffset)
+ buf.WriteString(s.Value)
+ buf.WriteByte(0)
+ c.stringAddrs[i] = addr
+ }
+
+ // NOTE(sr): All files that have been consulted in planning are recorded,
+ // regardless of their potential in generating runtime errors.
+ c.fileAddrs = make([]uint32, len(c.policy.Static.Files))
+
+ for i, file := range c.policy.Static.Files {
+ addr := uint32(buf.Len()) + uint32(c.stringOffset)
+ buf.WriteString(file.Value)
+ buf.WriteByte(0)
+ c.fileAddrs[i] = addr
+ }
+
+ c.externalFuncNameAddrs = make(map[string]int32)
+
+ for _, decl := range c.policy.Static.BuiltinFuncs {
+ if _, ok := builtinsFunctions[decl.Name]; !ok {
+ addr := int32(buf.Len()) + int32(c.stringOffset)
+ buf.WriteString(decl.Name)
+ buf.WriteByte(0)
+ c.externalFuncNameAddrs[decl.Name] = addr
+ }
+ }
+
+ c.entrypointNameAddrs = make(map[string]int32)
+
+ for _, plan := range c.policy.Plans.Plans {
+ addr := int32(buf.Len()) + int32(c.stringOffset)
+ buf.WriteString(plan.Name)
+ buf.WriteByte(0)
+ c.entrypointNameAddrs[plan.Name] = addr
+ }
+
+ c.builtinStringAddrs = make(map[int]uint32, len(errorMessages))
+
+ for i := range errorMessages {
+ addr := uint32(buf.Len()) + uint32(c.stringOffset)
+ buf.WriteString(errorMessages[i].message)
+ buf.WriteByte(0)
+ c.builtinStringAddrs[errorMessages[i].id] = addr
+ }
+
+ c.module.Data.Segments = append(c.module.Data.Segments, module.DataSegment{
+ Index: 0,
+ Offset: module.Expr{
+ Instrs: []instruction.Instruction{
+ instruction.I32Const{
+ Value: c.stringOffset,
+ },
+ },
+ },
+ Init: buf.Bytes(),
+ })
+
+ return nil
+}
+
+// compileExternalFuncDecls generates a function that lists the built-ins required by
+// the policy. The host environment should invoke this function obtain the list
+// of built-in function identifiers (represented as integers) that will be used
+// when calling out.
+func (c *Compiler) compileExternalFuncDecls() error {
+
+ c.code = &module.CodeEntry{}
+ c.nextLocal = 0
+ c.locals = map[ir.Local]uint32{}
+
+ lobj := c.genLocal()
+
+ c.appendInstr(instruction.Call{Index: c.function(opaObject)})
+ c.appendInstr(instruction.SetLocal{Index: lobj})
+ c.externalFuncs = make(map[string]int32)
+
+ for index, decl := range c.policy.Static.BuiltinFuncs {
+ if _, ok := builtinsFunctions[decl.Name]; !ok {
+ c.appendInstr(instruction.GetLocal{Index: lobj})
+ c.appendInstr(instruction.I32Const{Value: c.externalFuncNameAddrs[decl.Name]})
+ c.appendInstr(instruction.Call{Index: c.function(opaStringTerminated)})
+ c.appendInstr(instruction.I64Const{Value: int64(index)})
+ c.appendInstr(instruction.Call{Index: c.function(opaNumberInt)})
+ c.appendInstr(instruction.Call{Index: c.function(opaObjectInsert)})
+ c.externalFuncs[decl.Name] = int32(index)
+ }
+ }
+
+ c.appendInstr(instruction.GetLocal{Index: lobj})
+
+ c.code.Func.Locals = []module.LocalDeclaration{
+ {
+ Count: c.nextLocal,
+ Type: types.I32,
+ },
+ }
+
+ return c.emitFunction("builtins", c.code)
+}
+
+// compileEntrypointDecls generates a function that lists the entrypoints available
+// in the policy. The host environment can pick which entrypoint to invoke by setting
+// the entrypoint identifier (represented as an integer) on the evaluation context.
+func (c *Compiler) compileEntrypointDecls() error {
+
+ c.code = &module.CodeEntry{}
+ c.nextLocal = 0
+ c.locals = map[ir.Local]uint32{}
+
+ lobj := c.genLocal()
+
+ c.appendInstr(instruction.Call{Index: c.function(opaObject)})
+ c.appendInstr(instruction.SetLocal{Index: lobj})
+ c.entrypoints = make(map[string]int32)
+
+ for index, plan := range c.policy.Plans.Plans {
+ c.appendInstr(instruction.GetLocal{Index: lobj})
+ c.appendInstr(instruction.I32Const{Value: c.entrypointNameAddrs[plan.Name]})
+ c.appendInstr(instruction.Call{Index: c.function(opaStringTerminated)})
+ c.appendInstr(instruction.I64Const{Value: int64(index)})
+ c.appendInstr(instruction.Call{Index: c.function(opaNumberInt)})
+ c.appendInstr(instruction.Call{Index: c.function(opaObjectInsert)})
+ c.entrypoints[plan.Name] = int32(index)
+ }
+
+ c.appendInstr(instruction.GetLocal{Index: lobj})
+
+ c.code.Func.Locals = []module.LocalDeclaration{
+ {
+ Count: c.nextLocal,
+ Type: types.I32,
+ },
+ }
+
+ return c.emitFunction("entrypoints", c.code)
+}
+
+// compileFuncs compiles the policy functions and emits them into the module.
+func (c *Compiler) compileFuncs() error {
+
+ for _, fn := range c.policy.Funcs.Funcs {
+ if err := c.compileFunc(fn); err != nil {
+ return errors.Wrapf(err, "func %v", fn.Name)
+ }
+ }
+
+ return nil
+}
+
+// compilePlans compiles the policy plans and emits the resulting function into
+// the module.
+func (c *Compiler) compilePlans() error {
+
+ c.code = &module.CodeEntry{}
+ c.nextLocal = 0
+ c.locals = map[ir.Local]uint32{}
+ c.lctx = c.genLocal()
+ c.lrs = c.genLocal()
+
+ // Initialize memoization.
+ c.appendInstr(instruction.Call{Index: c.function(opaMemoizeInit)})
+
+ // Initialize the input and data locals.
+ c.appendInstr(instruction.GetLocal{Index: c.lctx})
+ c.appendInstr(instruction.I32Load{Offset: 0, Align: 2})
+ c.appendInstr(instruction.SetLocal{Index: c.local(ir.Input)})
+
+ c.appendInstr(instruction.GetLocal{Index: c.lctx})
+ c.appendInstr(instruction.I32Load{Offset: 4, Align: 2})
+ c.appendInstr(instruction.SetLocal{Index: c.local(ir.Data)})
+
+ // Initialize the result set.
+ c.appendInstr(instruction.Call{Index: c.function(opaSet)})
+ c.appendInstr(instruction.SetLocal{Index: c.lrs})
+ c.appendInstr(instruction.GetLocal{Index: c.lctx})
+ c.appendInstr(instruction.GetLocal{Index: c.lrs})
+ c.appendInstr(instruction.I32Store{Offset: 8, Align: 2})
+
+ // Initialize the entrypoint id local.
+ leid := c.genLocal()
+ c.appendInstr(instruction.GetLocal{Index: c.lctx})
+ c.appendInstr(instruction.I32Load{Offset: 12, Align: 2})
+ c.appendInstr(instruction.SetLocal{Index: leid})
+
+ // Add each entrypoint to this block.
+ main := instruction.Block{}
+
+ for i, plan := range c.policy.Plans.Plans {
+
+ entrypoint := instruction.Block{
+ Instrs: []instruction.Instruction{
+ instruction.GetLocal{Index: leid},
+ instruction.I32Const{Value: int32(i)},
+ instruction.I32Ne{},
+ instruction.BrIf{Index: 0},
+ },
+ }
+
+ for j, block := range plan.Blocks {
+
+ instrs, err := c.compileBlock(block)
+ if err != nil {
+ return errors.Wrapf(err, "plan %d block %d", i, j)
+ }
+
+ entrypoint.Instrs = append(entrypoint.Instrs, instruction.Block{
+ Instrs: instrs,
+ })
+ }
+
+ entrypoint.Instrs = append(entrypoint.Instrs, instruction.Br{Index: 1})
+ main.Instrs = append(main.Instrs, entrypoint)
+ }
+
+ // If none of the entrypoint blocks execute, call opa_abort() as this likely
+ // indicates inconsistency between the generated entrypoint identifiers in the
+ // eval() and entrypoint() functions (or the SDK invoked eval() with an invalid
+ // entrypoint ID which should not be possible.)
+ main.Instrs = append(main.Instrs,
+ instruction.I32Const{Value: c.builtinStringAddr(errIllegalEntrypoint)},
+ instruction.Call{Index: c.function(opaAbort)},
+ instruction.Unreachable{},
+ )
+
+ c.appendInstr(main)
+ c.appendInstr(instruction.I32Const{Value: int32(0)})
+
+ c.code.Func.Locals = []module.LocalDeclaration{
+ {
+ Count: c.nextLocal,
+ Type: types.I32,
+ },
+ }
+
+ return c.emitFunction("eval", c.code)
+}
+
+func (c *Compiler) compileFunc(fn *ir.Func) error {
+
+ if len(fn.Params) == 0 {
+ return fmt.Errorf("illegal function: zero args")
+ }
+
+ c.nextLocal = 0
+ c.locals = map[ir.Local]uint32{}
+
+ for _, a := range fn.Params {
+ _ = c.local(a)
+ }
+
+ _ = c.local(fn.Return)
+
+ c.code = &module.CodeEntry{}
+
+ for i := range fn.Blocks {
+ instrs, err := c.compileBlock(fn.Blocks[i])
+ if err != nil {
+ return errors.Wrapf(err, "block %d", i)
+ }
+ if i < len(fn.Blocks)-1 {
+ c.appendInstr(instruction.Block{Instrs: instrs})
+ } else {
+ c.appendInstrs(instrs)
+ }
+ }
+
+ c.code.Func.Locals = []module.LocalDeclaration{
+ {
+ Count: c.nextLocal,
+ Type: types.I32,
+ },
+ }
+
+ var params []types.ValueType
+
+ for i := 0; i < len(fn.Params); i++ {
+ params = append(params, types.I32)
+ }
+
+ return c.emitFunction(fn.Name, c.code)
+}
+
+func (c *Compiler) compileBlock(block *ir.Block) ([]instruction.Instruction, error) {
+
+ var instrs []instruction.Instruction
+
+ for _, stmt := range block.Stmts {
+ switch stmt := stmt.(type) {
+ case *ir.ResultSetAdd:
+ instrs = append(instrs, instruction.GetLocal{Index: c.lrs})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)})
+ case *ir.ReturnLocalStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.Return{})
+ case *ir.BlockStmt:
+ for i := range stmt.Blocks {
+ block, err := c.compileBlock(stmt.Blocks[i])
+ if err != nil {
+ return nil, err
+ }
+ instrs = append(instrs, instruction.Block{Instrs: block})
+ }
+ case *ir.BreakStmt:
+ instrs = append(instrs, instruction.Br{Index: stmt.Index})
+ case *ir.CallStmt:
+ if err := c.compileCallStmt(stmt, &instrs); err != nil {
+ return nil, err
+ }
+ case *ir.WithStmt:
+ if err := c.compileWithStmt(stmt, &instrs); err != nil {
+ return instrs, err
+ }
+ case *ir.AssignVarStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.AssignVarOnceStmt:
+ instrs = append(instrs, instruction.Block{
+ Instrs: []instruction.Instruction{
+ instruction.Block{
+ Instrs: append([]instruction.Instruction{
+ instruction.GetLocal{Index: c.local(stmt.Target)},
+ instruction.I32Eqz{},
+ instruction.BrIf{Index: 0},
+ instruction.GetLocal{Index: c.local(stmt.Target)},
+ instruction.GetLocal{Index: c.local(stmt.Source)},
+ instruction.Call{Index: c.function(opaValueCompare)},
+ instruction.I32Eqz{},
+ instruction.BrIf{Index: 1},
+ },
+ c.runtimeErrorAbort(stmt.Location, errVarAssignConflict)...),
+ },
+ instruction.GetLocal{Index: c.local(stmt.Source)},
+ instruction.SetLocal{Index: c.local(stmt.Target)},
+ },
+ })
+ case *ir.AssignBooleanStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Target)})
+ if stmt.Value {
+ instrs = append(instrs, instruction.I32Const{Value: 1})
+ } else {
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ }
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueBooleanSet)})
+ case *ir.AssignIntStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Target)})
+ instrs = append(instrs, instruction.I64Const{Value: stmt.Value})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueNumberSetInt)})
+ case *ir.ScanStmt:
+ if err := c.compileScan(stmt, &instrs); err != nil {
+ return nil, err
+ }
+ case *ir.NopStmt:
+ instrs = append(instrs, instruction.Nop{})
+ case *ir.NotStmt:
+ if err := c.compileNot(stmt, &instrs); err != nil {
+ return nil, err
+ }
+ case *ir.DotStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Key)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueGet)})
+ instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Target)})
+ instrs = append(instrs, instruction.I32Eqz{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.LenStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueLength)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaNumberSize)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.EqualStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.LessThanStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)})
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ instrs = append(instrs, instruction.I32GeS{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.LessThanEqualStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)})
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ instrs = append(instrs, instruction.I32GtS{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.GreaterThanStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)})
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ instrs = append(instrs, instruction.I32LeS{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.GreaterThanEqualStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)})
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ instrs = append(instrs, instruction.I32LtS{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.NotEqualStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueCompare)})
+ instrs = append(instrs, instruction.I32Eqz{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.MakeNullStmt:
+ instrs = append(instrs, instruction.Call{Index: c.function(opaNull)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeBooleanStmt:
+ instr := instruction.I32Const{}
+ if stmt.Value {
+ instr.Value = 1
+ } else {
+ instr.Value = 0
+ }
+ instrs = append(instrs, instr)
+ instrs = append(instrs, instruction.Call{Index: c.function(opaBoolean)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeNumberFloatStmt:
+ instrs = append(instrs, instruction.F64Const{Value: stmt.Value})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaNumberFloat)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeNumberIntStmt:
+ instrs = append(instrs, instruction.I64Const{Value: stmt.Value})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaNumberInt)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeNumberRefStmt:
+ instrs = append(instrs, instruction.I32Const{Value: c.stringAddr(stmt.Index)})
+ instrs = append(instrs, instruction.I32Const{Value: int32(len(c.policy.Static.Strings[stmt.Index].Value))})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaNumberRef)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeStringStmt:
+ instrs = append(instrs, instruction.I32Const{Value: c.stringAddr(stmt.Index)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaStringTerminated)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeArrayStmt:
+ instrs = append(instrs, instruction.I32Const{Value: stmt.Capacity})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaArrayWithCap)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeObjectStmt:
+ instrs = append(instrs, instruction.Call{Index: c.function(opaObject)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.MakeSetStmt:
+ instrs = append(instrs, instruction.Call{Index: c.function(opaSet)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.IsArrayStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)})
+ instrs = append(instrs, instruction.I32Const{Value: opaTypeArray})
+ instrs = append(instrs, instruction.I32Ne{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.IsObjectStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueType)})
+ instrs = append(instrs, instruction.I32Const{Value: opaTypeObject})
+ instrs = append(instrs, instruction.I32Ne{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.IsUndefinedStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ instrs = append(instrs, instruction.I32Ne{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.ResetLocalStmt:
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.IsDefinedStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Source)})
+ instrs = append(instrs, instruction.I32Eqz{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ case *ir.ArrayAppendStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Array)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaArrayAppend)})
+ case *ir.ObjectInsertStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Object)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Key)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaObjectInsert)})
+ case *ir.ObjectInsertOnceStmt:
+ tmp := c.genLocal()
+ instrs = append(instrs, instruction.Block{
+ Instrs: []instruction.Instruction{
+ instruction.Block{
+ Instrs: append([]instruction.Instruction{
+ instruction.GetLocal{Index: c.local(stmt.Object)},
+ instruction.GetLocal{Index: c.local(stmt.Key)},
+ instruction.Call{Index: c.function(opaValueGet)},
+ instruction.TeeLocal{Index: tmp},
+ instruction.I32Eqz{},
+ instruction.BrIf{Index: 0},
+ instruction.GetLocal{Index: tmp},
+ instruction.GetLocal{Index: c.local(stmt.Value)},
+ instruction.Call{Index: c.function(opaValueCompare)},
+ instruction.I32Eqz{},
+ instruction.BrIf{Index: 1},
+ }, c.runtimeErrorAbort(stmt.Location, errObjectInsertConflict)...),
+ },
+ instruction.GetLocal{Index: c.local(stmt.Object)},
+ instruction.GetLocal{Index: c.local(stmt.Key)},
+ instruction.GetLocal{Index: c.local(stmt.Value)},
+ instruction.Call{Index: c.function(opaObjectInsert)},
+ },
+ })
+ case *ir.ObjectMergeStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.A)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.B)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueMerge)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(stmt.Target)})
+ case *ir.SetAddStmt:
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Set)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(stmt.Value)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaSetAdd)})
+ default:
+ var buf bytes.Buffer
+ ir.Pretty(&buf, stmt)
+ return instrs, fmt.Errorf("illegal statement: %v", buf.String())
+ }
+ }
+
+ return instrs, nil
+}
+
+func (c *Compiler) compileScan(scan *ir.ScanStmt, result *[]instruction.Instruction) error {
+ var instrs = *result
+ instrs = append(instrs, instruction.I32Const{Value: 0})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(scan.Key)})
+ body, err := c.compileScanBlock(scan)
+ if err != nil {
+ return err
+ }
+ instrs = append(instrs, instruction.Block{
+ Instrs: []instruction.Instruction{
+ instruction.Loop{Instrs: body},
+ },
+ })
+ *result = instrs
+ return nil
+}
+
+func (c *Compiler) compileScanBlock(scan *ir.ScanStmt) ([]instruction.Instruction, error) {
+ var instrs []instruction.Instruction
+
+ // Execute iterator.
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Source)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Key)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueIter)})
+
+ // Check for emptiness.
+ instrs = append(instrs, instruction.TeeLocal{Index: c.local(scan.Key)})
+ instrs = append(instrs, instruction.I32Eqz{})
+ instrs = append(instrs, instruction.BrIf{Index: 1})
+
+ // Load value.
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Source)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(scan.Key)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaValueGet)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(scan.Value)})
+
+ // Loop body.
+ nested, err := c.compileBlock(scan.Block)
+ if err != nil {
+ return nil, err
+ }
+
+ // Continue.
+ instrs = append(instrs, nested...)
+ instrs = append(instrs, instruction.Br{Index: 0})
+
+ return instrs, nil
+}
+
+func (c *Compiler) compileNot(not *ir.NotStmt, result *[]instruction.Instruction) error {
+ var instrs = *result
+
+ // generate and initialize condition variable
+ cond := c.genLocal()
+ instrs = append(instrs, instruction.I32Const{Value: 1})
+ instrs = append(instrs, instruction.SetLocal{Index: cond})
+
+ nested, err := c.compileBlock(not.Block)
+ if err != nil {
+ return err
+ }
+
+ // unset condition variable if end of block is reached
+ nested = append(nested, instruction.I32Const{Value: 0})
+ nested = append(nested, instruction.SetLocal{Index: cond})
+ instrs = append(instrs, instruction.Block{Instrs: nested})
+
+ // break out of block if condition variable was unset
+ instrs = append(instrs, instruction.GetLocal{Index: cond})
+ instrs = append(instrs, instruction.I32Eqz{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+
+ *result = instrs
+ return nil
+}
+
+func (c *Compiler) compileWithStmt(with *ir.WithStmt, result *[]instruction.Instruction) error {
+
+ var instrs = *result
+ save := c.genLocal()
+ instrs = append(instrs, instruction.Call{Index: c.function(opaMemoizePush)})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(with.Local)})
+ instrs = append(instrs, instruction.SetLocal{Index: save})
+
+ if len(with.Path) == 0 {
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(with.Value)})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(with.Local)})
+ } else {
+ instrs = c.compileUpsert(with.Local, with.Path, with.Value, with.Location, instrs)
+ }
+
+ undefined := c.genLocal()
+ instrs = append(instrs, instruction.I32Const{Value: 1})
+ instrs = append(instrs, instruction.SetLocal{Index: undefined})
+
+ nested, err := c.compileBlock(with.Block)
+ if err != nil {
+ return err
+ }
+
+ nested = append(nested, instruction.I32Const{Value: 0})
+ nested = append(nested, instruction.SetLocal{Index: undefined})
+ instrs = append(instrs, instruction.Block{Instrs: nested})
+ instrs = append(instrs, instruction.GetLocal{Index: save})
+ instrs = append(instrs, instruction.SetLocal{Index: c.local(with.Local)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaMemoizePop)})
+ instrs = append(instrs, instruction.GetLocal{Index: undefined})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+
+ *result = instrs
+
+ return nil
+}
+
+func (c *Compiler) compileUpsert(local ir.Local, path []int, value ir.Local, loc ir.Location, instrs []instruction.Instruction) []instruction.Instruction {
+
+ lcopy := c.genLocal() // holds copy of local
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(local)})
+ instrs = append(instrs, instruction.SetLocal{Index: lcopy})
+
+ // Shallow copy the local if defined otherwise initialize to an empty object.
+ instrs = append(instrs, instruction.Block{
+ Instrs: []instruction.Instruction{
+ instruction.Block{Instrs: []instruction.Instruction{
+ instruction.GetLocal{Index: lcopy},
+ instruction.I32Eqz{},
+ instruction.BrIf{Index: 0},
+ instruction.GetLocal{Index: lcopy},
+ instruction.Call{Index: c.function(opaValueShallowCopy)},
+ instruction.TeeLocal{Index: lcopy},
+ instruction.SetLocal{Index: c.local(local)},
+ instruction.Br{Index: 1},
+ }},
+ instruction.Call{Index: c.function(opaObject)},
+ instruction.TeeLocal{Index: lcopy},
+ instruction.SetLocal{Index: c.local(local)},
+ },
+ })
+
+ // Initialize the locals that specify the path of the upsert operation.
+ lpath := make(map[int]uint32, len(path))
+
+ for i := 0; i < len(path); i++ {
+ lpath[i] = c.genLocal()
+ instrs = append(instrs, instruction.I32Const{Value: c.stringAddr(path[i])})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaStringTerminated)})
+ instrs = append(instrs, instruction.SetLocal{Index: lpath[i]})
+ }
+
+ // Generate a block that traverses the path of the upsert operation,
+ // shallowing copying values at each step as needed. Stop before the final
+ // segment that will only be inserted.
+ var inner []instruction.Instruction
+ ltemp := c.genLocal()
+
+ for i := 0; i < len(path)-1; i++ {
+
+ // Lookup the next part of the path.
+ inner = append(inner, instruction.GetLocal{Index: lcopy})
+ inner = append(inner, instruction.GetLocal{Index: lpath[i]})
+ inner = append(inner, instruction.Call{Index: c.function(opaValueGet)})
+ inner = append(inner, instruction.SetLocal{Index: ltemp})
+
+ // If the next node is missing, break.
+ inner = append(inner, instruction.GetLocal{Index: ltemp})
+ inner = append(inner, instruction.I32Eqz{})
+ inner = append(inner, instruction.BrIf{Index: uint32(i)})
+
+ // If the next node is not an object, break.
+ inner = append(inner, instruction.GetLocal{Index: ltemp})
+ inner = append(inner, instruction.Call{Index: c.function(opaValueType)})
+ inner = append(inner, instruction.I32Const{Value: opaTypeObject})
+ inner = append(inner, instruction.I32Ne{})
+ inner = append(inner, instruction.BrIf{Index: uint32(i)})
+
+ // Otherwise, shallow copy the next node node and insert into the copy
+ // before continuing.
+ inner = append(inner, instruction.GetLocal{Index: ltemp})
+ inner = append(inner, instruction.Call{Index: c.function(opaValueShallowCopy)})
+ inner = append(inner, instruction.SetLocal{Index: ltemp})
+ inner = append(inner, instruction.GetLocal{Index: lcopy})
+ inner = append(inner, instruction.GetLocal{Index: lpath[i]})
+ inner = append(inner, instruction.GetLocal{Index: ltemp})
+ inner = append(inner, instruction.Call{Index: c.function(opaObjectInsert)})
+ inner = append(inner, instruction.GetLocal{Index: ltemp})
+ inner = append(inner, instruction.SetLocal{Index: lcopy})
+ }
+
+ inner = append(inner, instruction.Br{Index: uint32(len(path) - 1)})
+
+ // Generate blocks that handle missing nodes during traversal.
+ var block []instruction.Instruction
+ lval := c.genLocal()
+
+ for i := 0; i < len(path)-1; i++ {
+ block = append(block, instruction.Block{Instrs: inner})
+ block = append(block, instruction.Call{Index: c.function(opaObject)})
+ block = append(block, instruction.SetLocal{Index: lval})
+ block = append(block, instruction.GetLocal{Index: lcopy})
+ block = append(block, instruction.GetLocal{Index: lpath[i]})
+ block = append(block, instruction.GetLocal{Index: lval})
+ block = append(block, instruction.Call{Index: c.function(opaObjectInsert)})
+ block = append(block, instruction.GetLocal{Index: lval})
+ block = append(block, instruction.SetLocal{Index: lcopy})
+ inner = block
+ block = nil
+ }
+
+ // Finish by inserting the statement's value into the shallow copied node.
+ instrs = append(instrs, instruction.Block{Instrs: inner})
+ instrs = append(instrs, instruction.GetLocal{Index: lcopy})
+ instrs = append(instrs, instruction.GetLocal{Index: lpath[len(path)-1]})
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(value)})
+ instrs = append(instrs, instruction.Call{Index: c.function(opaObjectInsert)})
+
+ return instrs
+}
+
+func (c *Compiler) compileCallStmt(stmt *ir.CallStmt, result *[]instruction.Instruction) error {
+
+ fn := stmt.Func
+
+ if name, ok := builtinsFunctions[stmt.Func]; ok {
+ fn = name
+ }
+
+ if index, ok := c.funcs[fn]; ok {
+ return c.compileInternalCall(stmt, index, result)
+ }
+
+ if id, ok := c.externalFuncs[fn]; ok {
+ return c.compileExternalCall(stmt, id, result)
+ }
+
+ c.errors = append(c.errors, fmt.Errorf("undefined function: %q", fn))
+
+ return nil
+}
+
+func (c *Compiler) compileInternalCall(stmt *ir.CallStmt, index uint32, result *[]instruction.Instruction) error {
+
+ var memoized bool
+
+ if _, ok := c.planfuncs[stmt.Func]; ok && len(stmt.Args) == 2 {
+ memoized = true
+ }
+
+ block := instruction.Block{}
+
+ // Check if call can be memoized.
+ if memoized {
+ block.Instrs = append(block.Instrs,
+ instruction.I32Const{Value: int32(index)},
+ instruction.Call{Index: c.function(opaMemoizeGet)},
+ instruction.TeeLocal{Index: c.local(stmt.Result)},
+ instruction.BrIf{Index: 0})
+ }
+
+ // Prepare function args and call.
+ for _, arg := range stmt.Args {
+ block.Instrs = append(block.Instrs, instruction.GetLocal{Index: c.local(arg)})
+ }
+
+ block.Instrs = append(block.Instrs,
+ instruction.Call{Index: index},
+ instruction.TeeLocal{Index: c.local(stmt.Result)},
+ instruction.I32Eqz{},
+ instruction.BrIf{Index: 1})
+
+ // Memoize the result.
+ if memoized {
+ block.Instrs = append(block.Instrs,
+ instruction.I32Const{Value: int32(index)},
+ instruction.GetLocal{Index: c.local(stmt.Result)},
+ instruction.Call{Index: c.function(opaMemoizeInsert)})
+ }
+
+ *result = append(*result, block)
+
+ return nil
+}
+
+func (c *Compiler) compileExternalCall(stmt *ir.CallStmt, id int32, result *[]instruction.Instruction) error {
+
+ if len(stmt.Args) >= len(builtinDispatchers) {
+ c.errors = append(c.errors, fmt.Errorf("too many built-in call arguments: %q", stmt.Func))
+ return nil
+ }
+
+ instrs := *result
+ instrs = append(instrs, instruction.I32Const{Value: id})
+ instrs = append(instrs, instruction.I32Const{Value: 0}) // unused context parameter
+
+ for _, arg := range stmt.Args {
+ instrs = append(instrs, instruction.GetLocal{Index: c.local(arg)})
+ }
+
+ instrs = append(instrs, instruction.Call{Index: c.function(builtinDispatchers[len(stmt.Args)])})
+ instrs = append(instrs, instruction.TeeLocal{Index: c.local(stmt.Result)})
+ instrs = append(instrs, instruction.I32Eqz{})
+ instrs = append(instrs, instruction.BrIf{Index: 0})
+ *result = instrs
+ return nil
+}
+
+func (c *Compiler) emitFunctionDecl(name string, tpe module.FunctionType, export bool) {
+
+ typeIndex := c.emitFunctionType(tpe)
+ c.module.Function.TypeIndices = append(c.module.Function.TypeIndices, typeIndex)
+ c.module.Code.Segments = append(c.module.Code.Segments, module.RawCodeSegment{})
+ idx := uint32((len(c.module.Function.TypeIndices) - 1) + c.functionImportCount())
+ c.funcs[name] = idx
+
+ if export {
+ c.module.Export.Exports = append(c.module.Export.Exports, module.Export{
+ Name: name,
+ Descriptor: module.ExportDescriptor{
+ Type: module.FunctionExportType,
+ Index: idx,
+ },
+ })
+ }
+
+ // add functions 'name' entry
+ var found bool
+ for _, m := range c.module.Names.Functions {
+ if m.Index == idx {
+ found = true
+ }
+ }
+ if !found {
+ c.module.Names.Functions = append(c.module.Names.Functions, module.NameMap{
+ Index: idx,
+ Name: name,
+ })
+ }
+}
+
+func (c *Compiler) emitFunctionType(tpe module.FunctionType) uint32 {
+ for i, other := range c.module.Type.Functions {
+ if tpe.Equal(other) {
+ return uint32(i)
+ }
+ }
+ c.module.Type.Functions = append(c.module.Type.Functions, tpe)
+ return uint32(len(c.module.Type.Functions) - 1)
+}
+
+func (c *Compiler) emitFunction(name string, entry *module.CodeEntry) error {
+ var buf bytes.Buffer
+ if err := encoding.WriteCodeEntry(&buf, entry); err != nil {
+ return err
+ }
+ index := c.function(name) - uint32(c.functionImportCount())
+ c.module.Code.Segments[index].Code = buf.Bytes()
+ return nil
+}
+
+func (c *Compiler) functionImportCount() int {
+ var count int
+
+ for _, imp := range c.module.Import.Imports {
+ if imp.Descriptor.Kind() == module.FunctionImportType {
+ count++
+ }
+ }
+
+ return count
+}
+
+func (c *Compiler) stringAddr(index int) int32 {
+ return int32(c.stringAddrs[index])
+}
+
+func (c *Compiler) builtinStringAddr(code int) int32 {
+ return int32(c.builtinStringAddrs[code])
+}
+
+func (c *Compiler) fileAddr(code int) int32 {
+ return int32(c.fileAddrs[code])
+}
+
+func (c *Compiler) local(l ir.Local) uint32 {
+ var u32 uint32
+ var exist bool
+ if u32, exist = c.locals[l]; !exist {
+ u32 = c.nextLocal
+ c.locals[l] = u32
+ c.nextLocal++
+ }
+ return u32
+}
+
+func (c *Compiler) genLocal() uint32 {
+ l := c.nextLocal
+ c.nextLocal++
+ return l
+}
+
+func (c *Compiler) function(name string) uint32 {
+ fidx, ok := c.funcs[name]
+ if !ok {
+ panic(fmt.Sprintf("function not found: %s", name))
+ }
+ return fidx
+}
+
+func (c *Compiler) appendInstr(instr instruction.Instruction) {
+ c.code.Func.Expr.Instrs = append(c.code.Func.Expr.Instrs, instr)
+}
+
+func (c *Compiler) appendInstrs(instrs []instruction.Instruction) {
+ for _, instr := range instrs {
+ c.appendInstr(instr)
+ }
+}
+
+func getLowestFreeDataSegmentOffset(m *module.Module) (int32, error) {
+
+ var offset int32
+
+ for i := range m.Data.Segments {
+
+ if len(m.Data.Segments[i].Offset.Instrs) != 1 {
+ return 0, errors.New("bad data segment offset instructions")
+ }
+
+ instr, ok := m.Data.Segments[i].Offset.Instrs[0].(instruction.I32Const)
+ if !ok {
+ return 0, errors.New("bad data segment offset expr")
+ }
+
+ // NOTE(tsandall): assume memory up to but not including addr is taken.
+ addr := instr.Value + int32(len(m.Data.Segments[i].Init))
+ if addr > offset {
+ offset = addr
+ }
+ }
+
+ return offset, nil
+}
+
+// runtimeErrorAbort uses the passed source location to build the
+// arguments for a call to opa_runtime_error(file, row, col, msg).
+// It returns the instructions that make up the function call with
+// arguments, followed by Unreachable.
+func (c *Compiler) runtimeErrorAbort(loc ir.Location, errType int) []instruction.Instruction {
+ index, row, col := loc.Index, loc.Row, loc.Col
+ return []instruction.Instruction{
+ instruction.I32Const{Value: c.fileAddr(index)},
+ instruction.I32Const{Value: int32(row)},
+ instruction.I32Const{Value: int32(col)},
+ instruction.I32Const{Value: c.builtinStringAddr(errType)},
+ instruction.Call{Index: c.function(opaRuntimeError)},
+ instruction.Unreachable{},
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go b/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go
new file mode 100644
index 00000000..cf8cc1cf
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/deepcopy/deepcopy.go
@@ -0,0 +1,27 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package deepcopy
+
+// DeepCopy performs a recursive deep copy for nested slices/maps and
+// returns the copied object. Supports []interface{}
+// and map[string]interface{} only
+func DeepCopy(val interface{}) interface{} {
+ switch val := val.(type) {
+ case []interface{}:
+ cpy := make([]interface{}, len(val))
+ for i := range cpy {
+ cpy[i] = DeepCopy(val[i])
+ }
+ return cpy
+ case map[string]interface{}:
+ cpy := make(map[string]interface{}, len(val))
+ for k := range val {
+ cpy[k] = DeepCopy(val[k])
+ }
+ return cpy
+ default:
+ return val
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go b/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go
new file mode 100644
index 00000000..6b8ba48d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/file/archive/tarball.go
@@ -0,0 +1,42 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "strings"
+)
+
+// MustWriteTarGz write the list of file names and content
+// into a tarball.
+func MustWriteTarGz(files [][2]string) *bytes.Buffer {
+ var buf bytes.Buffer
+ gw := gzip.NewWriter(&buf)
+ defer gw.Close()
+ tw := tar.NewWriter(gw)
+ defer tw.Close()
+ for _, file := range files {
+ if err := WriteFile(tw, file[0], []byte(file[1])); err != nil {
+ panic(err)
+ }
+ }
+ return &buf
+}
+
+// WriteFile adds a file header with content to the given tar writer
+func WriteFile(tw *tar.Writer, path string, bs []byte) error {
+
+ hdr := &tar.Header{
+ Name: "/" + strings.TrimLeft(path, "/"),
+ Mode: 0600,
+ Typeflag: tar.TypeReg,
+ Size: int64(len(bs)),
+ }
+
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ _, err := tw.Write(bs)
+ return err
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/file/url/url.go b/vendor/github.com/open-policy-agent/opa/internal/file/url/url.go
new file mode 100644
index 00000000..aacc5718
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/file/url/url.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package url contains helpers for dealing with file paths and URLs.
+package url
+
+import (
+ "fmt"
+ "net/url"
+ "runtime"
+ "strings"
+)
+
+var goos = runtime.GOOS
+
+// Clean returns a cleaned file path that may or may not be a URL.
+func Clean(path string) (string, error) {
+
+ if strings.Contains(path, "://") {
+
+ url, err := url.Parse(path)
+ if err != nil {
+ return "", err
+ }
+
+ if url.Scheme != "file" {
+ return "", fmt.Errorf("unsupported URL scheme: %v", path)
+ }
+
+ path = url.Path
+
+ // Trim leading slash on Windows if present. The url.Path field returned
+ // by url.Parse has leading slash that causes CreateFile() calls to fail
+ // on Windows. See https://github.com/golang/go/issues/6027 for details.
+ if goos == "windows" && len(path) >= 1 && path[0] == '/' {
+ path = path[1:]
+ }
+ }
+
+ return path, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/ir/ir.go b/vendor/github.com/open-policy-agent/opa/internal/ir/ir.go
new file mode 100644
index 00000000..791a9154
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/ir/ir.go
@@ -0,0 +1,520 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package ir defines an intermediate representation (IR) for Rego.
+//
+// The IR specifies an imperative execution model for Rego policies similar to a
+// query plan in traditional databases.
+package ir
+
+import (
+ "fmt"
+)
+
+type (
+ // Policy represents a planned policy query.
+ Policy struct {
+ Static *Static
+ Plans *Plans
+ Funcs *Funcs
+ }
+
+ // Static represents a static data segment that is indexed into by the policy.
+ Static struct {
+ Strings []*StringConst
+ BuiltinFuncs []*BuiltinFunc
+ Files []*StringConst
+ }
+
+ // BuiltinFunc represents a built-in function that may be required by the
+ // policy.
+ BuiltinFunc struct {
+ Name string
+ }
+
+ // Plans represents a collection of named query plans to expose in the policy.
+ Plans struct {
+ Plans []*Plan
+ }
+
+ // Funcs represents a collection of planned functions to include in the
+ // policy.
+ Funcs struct {
+ Funcs []*Func
+ }
+
+ // Func represents a named plan (function) that can be invoked. Functions
+ // accept one or more parameters and return a value. By convention, the
+ // input document and data documents are always passed as the first and
+ // second arguments (respectively).
+ Func struct {
+ Name string
+ Params []Local
+ Return Local
+ Blocks []*Block // TODO(tsandall): should this be a plan?
+ }
+
+ // Plan represents an ordered series of blocks to execute. Plan execution
+ // stops when a return statement is reached. Blocks are executed in-order.
+ Plan struct {
+ Name string
+ Blocks []*Block
+ }
+
+ // Block represents an ordered sequence of statements to execute. Blocks are
+ // executed until a return statement is encountered, a statement is undefined,
+ // or there are no more statements. If all statements are defined but no return
+ // statement is encountered, the block is undefined.
+ Block struct {
+ Stmts []Stmt
+ }
+
+ // Stmt represents an operation (e.g., comparison, loop, dot, etc.) to execute.
+ Stmt interface {
+ locationStmt
+ }
+
+ locationStmt interface {
+ SetLocation(index, row, col int, file, text string)
+ GetLocation() *Location
+ }
+
+ // Local represents a plan-scoped variable.
+ //
+ // TODO(tsandall): should this be int32 for safety?
+ Local int
+
+ // Const represents a constant value from the policy.
+ Const interface {
+ typeMarker()
+ }
+
+ // NullConst represents a null value.
+ NullConst struct{}
+
+ // BooleanConst represents a boolean value.
+ BooleanConst struct {
+ Value bool
+ }
+
+ // StringConst represents a string value.
+ StringConst struct {
+ Value string
+ }
+
+ // IntConst represents an integer constant.
+ IntConst struct {
+ Value int64
+ }
+
+ // FloatConst represents a floating-point constant.
+ FloatConst struct {
+ Value float64
+ }
+)
+
+const (
+ // Input is the local variable that refers to the global input document.
+ Input Local = iota
+
+ // Data is the local variable that refers to the global data document.
+ Data
+
+ // Unused is the free local variable that can be allocated in a plan.
+ Unused
+)
+
+func (a *Policy) String() string {
+ return "Policy"
+}
+
+func (a *Static) String() string {
+ return fmt.Sprintf("Static (%d strings, %d files)", len(a.Strings), len(a.Files))
+}
+
+func (a *Funcs) String() string {
+ return fmt.Sprintf("Funcs (%d funcs)", len(a.Funcs))
+}
+
+func (a *Func) String() string {
+ return fmt.Sprintf("%v (%d params: %v, %d blocks)", a.Name, len(a.Params), a.Params, len(a.Blocks))
+}
+
+func (a *Plan) String() string {
+ return fmt.Sprintf("Plan %v (%d blocks)", a.Name, len(a.Blocks))
+}
+
+func (a *Block) String() string {
+ return fmt.Sprintf("Block (%d statements)", len(a.Stmts))
+}
+
+func (*BooleanConst) typeMarker() {}
+func (*NullConst) typeMarker() {}
+func (*IntConst) typeMarker() {}
+func (*FloatConst) typeMarker() {}
+func (*StringConst) typeMarker() {}
+
+// ReturnLocalStmt represents a return statement that yields a local value.
+type ReturnLocalStmt struct {
+ Source Local
+
+ Location
+}
+
+// CallStmt represents a named function call. The result should be stored in the
+// result local.
+type CallStmt struct {
+ Func string
+ Args []Local
+ Result Local
+
+ Location
+}
+
+// BlockStmt represents a nested block. Nested blocks and break statements can
+// be used to short-circuit execution.
+type BlockStmt struct {
+ Blocks []*Block
+
+ Location
+}
+
+func (a *BlockStmt) String() string {
+ return fmt.Sprintf("BlockStmt (%d blocks) %v", len(a.Blocks), a.GetLocation())
+}
+
+// BreakStmt represents a jump out of the current block. The index specifies how
+// many blocks to jump starting from zero (the current block). Execution will
+// continue from the end of the block that is jumped to.
+type BreakStmt struct {
+ Index uint32
+
+ Location
+}
+
+// DotStmt represents a lookup operation on a value (e.g., array, object, etc.)
+// The source of a DotStmt may be a scalar value in which case the statement
+// will be undefined.
+type DotStmt struct {
+ Source Local
+ Key Local
+ Target Local
+
+ Location
+}
+
+// LenStmt represents a length() operation on a local variable. The
+// result is stored in the target local variable.
+type LenStmt struct {
+ Source Local
+ Target Local
+
+ Location
+}
+
+// ScanStmt represents a linear scan over a composite value. The
+// source may be a scalar in which case the block will never execute.
+type ScanStmt struct {
+ Source Local
+ Key Local
+ Value Local
+ Block *Block
+
+ Location
+}
+
+// NotStmt represents a negated statement.
+type NotStmt struct {
+ Block *Block
+
+ Location
+}
+
+// AssignBooleanStmt represents an assignment of a boolean value to a local variable.
+type AssignBooleanStmt struct {
+ Value bool
+ Target Local
+
+ Location
+}
+
+// AssignIntStmt represents an assignment of an integer value to a
+// local variable.
+type AssignIntStmt struct {
+ Value int64
+ Target Local
+
+ Location
+}
+
+// AssignVarStmt represents an assignment of one local variable to another.
+type AssignVarStmt struct {
+ Source Local
+ Target Local
+
+ Location
+}
+
+// AssignVarOnceStmt represents an assignment of one local variable to another.
+// If the target is defined, execution aborts with a conflict error.
+//
+// TODO(tsandall): is there a better name for this?
+type AssignVarOnceStmt struct {
+ Target Local
+ Source Local
+
+ Location
+}
+
+// ResetLocalStmt resets a local variable to 0.
+type ResetLocalStmt struct {
+ Target Local
+
+ Location
+}
+
+// MakeStringStmt constructs a local variable that refers to a string constant.
+type MakeStringStmt struct {
+ Index int
+ Target Local
+
+ Location
+}
+
+// MakeNullStmt constructs a local variable that refers to a null value.
+type MakeNullStmt struct {
+ Target Local
+
+ Location
+}
+
+// MakeBooleanStmt constructs a local variable that refers to a boolean value.
+type MakeBooleanStmt struct {
+ Value bool
+ Target Local
+
+ Location
+}
+
+// MakeNumberFloatStmt constructs a local variable that refers to a
+// floating-point number value.
+type MakeNumberFloatStmt struct {
+ Value float64
+ Target Local
+
+ Location
+}
+
+// MakeNumberIntStmt constructs a local variable that refers to an integer value.
+type MakeNumberIntStmt struct {
+ Value int64
+ Target Local
+
+ Location
+}
+
+// MakeNumberRefStmt constructs a local variable that refers to a number stored as a string.
+type MakeNumberRefStmt struct {
+ Index int
+ Target Local
+
+ Location
+}
+
+// MakeArrayStmt constructs a local variable that refers to an array value.
+type MakeArrayStmt struct {
+ Capacity int32
+ Target Local
+
+ Location
+}
+
+// MakeObjectStmt constructs a local variable that refers to an object value.
+type MakeObjectStmt struct {
+ Target Local
+
+ Location
+}
+
+// MakeSetStmt constructs a local variable that refers to a set value.
+type MakeSetStmt struct {
+ Target Local
+
+ Location
+}
+
+// EqualStmt represents an value-equality check of two local variables.
+type EqualStmt struct {
+ A Local
+ B Local
+
+ Location
+}
+
+// LessThanStmt represents a < check of two local variables.
+type LessThanStmt struct {
+ A Local
+ B Local
+
+ Location
+}
+
+// LessThanEqualStmt represents a <= check of two local variables.
+type LessThanEqualStmt struct {
+ A Local
+ B Local
+
+ Location
+}
+
+// GreaterThanStmt represents a > check of two local variables.
+type GreaterThanStmt struct {
+ A Local
+ B Local
+
+ Location
+}
+
+// GreaterThanEqualStmt represents a >= check of two local variables.
+type GreaterThanEqualStmt struct {
+ A Local
+ B Local
+
+ Location
+}
+
+// NotEqualStmt represents a != check of two local variables.
+type NotEqualStmt struct {
+ A Local
+ B Local
+
+ Location
+}
+
+// IsArrayStmt represents a dynamic type check on a local variable.
+type IsArrayStmt struct {
+ Source Local
+
+ Location
+}
+
+// IsObjectStmt represents a dynamic type check on a local variable.
+type IsObjectStmt struct {
+ Source Local
+
+ Location
+}
+
+// IsDefinedStmt represents a check of whether a local variable is defined.
+type IsDefinedStmt struct {
+ Source Local
+
+ Location
+}
+
+// IsUndefinedStmt represents a check of whether local variable is undefined.
+type IsUndefinedStmt struct {
+ Source Local
+
+ Location
+}
+
+// ArrayAppendStmt represents a dynamic append operation of a value
+// onto an array.
+type ArrayAppendStmt struct {
+ Value Local
+ Array Local
+
+ Location
+}
+
+// ObjectInsertStmt represents a dynamic insert operation of a
+// key/value pair into an object.
+type ObjectInsertStmt struct {
+ Key Local
+ Value Local
+ Object Local
+
+ Location
+}
+
+// ObjectInsertOnceStmt represents a dynamic insert operation of a key/value
+// pair into an object. If the key already exists and the value differs,
+// execution aborts with a conflict error.
+type ObjectInsertOnceStmt struct {
+ Key Local
+ Value Local
+ Object Local
+
+ Location
+}
+
+// ObjectMergeStmt performs a recursive merge of two object values. If either of
+// the locals refer to non-object values this operation will abort with a
+// conflict error. Overlapping object keys are merged recursively.
+type ObjectMergeStmt struct {
+ A Local
+ B Local
+ Target Local
+
+ Location
+}
+
+// SetAddStmt represents a dynamic add operation of an element into a set.
+type SetAddStmt struct {
+ Value Local
+ Set Local
+
+ Location
+}
+
+// WithStmt replaces the Local or a portion of the document referred to by the
+// Local with the Value and executes the contained block. If the Path is
+// non-empty, the Value is upserted into the Local. If the intermediate nodes in
+// the Local referred to by the Path do not exist, they will be created. When
+// the WithStmt finishes the Local is reset to it's original value.
+type WithStmt struct {
+ Local Local
+ Path []int
+ Value Local
+ Block *Block
+
+ Location
+}
+
+// NopStmt adds a nop instruction. Useful during development and debugging only.
+type NopStmt struct {
+ Location
+}
+
+// ResultSetAdd adds a value into the result set returned by the query plan.
+type ResultSetAdd struct {
+ Value Local
+
+ Location
+}
+
+// Location records the filen index, and the row and column inside that file
+// that a statement can be connected to.
+type Location struct {
+ Index int // filename string constant index
+ Col, Row int
+ file, text string // only used for debugging
+}
+
+// SetLocation sets the Location for a given Stmt.
+func (l *Location) SetLocation(index, row, col int, file, text string) {
+ *l = Location{
+ Index: index,
+ Row: row,
+ Col: col,
+ file: file,
+ text: text,
+ }
+}
+
+// GetLocation returns a Stmt's Location.
+func (l *Location) GetLocation() *Location {
+ return l
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/ir/pretty.go b/vendor/github.com/open-policy-agent/opa/internal/ir/pretty.go
new file mode 100644
index 00000000..a23bad72
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/ir/pretty.go
@@ -0,0 +1,44 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Pretty writes a human-readable representation of an IR object to w.
+func Pretty(w io.Writer, x interface{}) {
+
+ pp := &prettyPrinter{
+ depth: -1,
+ w: w,
+ }
+ Walk(pp, x)
+}
+
+type prettyPrinter struct {
+ depth int
+ w io.Writer
+}
+
+func (pp *prettyPrinter) Before(x interface{}) {
+ pp.depth++
+}
+
+func (pp *prettyPrinter) After(x interface{}) {
+ pp.depth--
+}
+
+func (pp *prettyPrinter) Visit(x interface{}) (Visitor, error) {
+ pp.writeIndent("%T %+v", x, x)
+ return pp, nil
+}
+
+func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) {
+ pad := strings.Repeat("| ", pp.depth)
+ fmt.Fprintf(pp.w, pad+f+"\n", a...)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/ir/walk.go b/vendor/github.com/open-policy-agent/opa/internal/ir/walk.go
new file mode 100644
index 00000000..08a8f424
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/ir/walk.go
@@ -0,0 +1,93 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package ir
+
+// Visitor defines the interface for visiting IR nodes.
+type Visitor interface {
+ Before(x interface{})
+ Visit(x interface{}) (Visitor, error)
+ After(x interface{})
+}
+
+// Walk invokes the visitor for nodes under x.
+func Walk(vis Visitor, x interface{}) error {
+ impl := walkerImpl{
+ vis: vis,
+ }
+ impl.walk(x)
+ return impl.err
+}
+
+type walkerImpl struct {
+ vis Visitor
+ err error
+}
+
+func (w *walkerImpl) walk(x interface{}) {
+ if w.err != nil { // abort on error
+ return
+ }
+ if x == nil {
+ return
+ }
+
+ prev := w.vis
+ w.vis.Before(x)
+ defer w.vis.After(x)
+ w.vis, w.err = w.vis.Visit(x)
+ if w.err != nil {
+ return
+ } else if w.vis == nil {
+ w.vis = prev
+ return
+ }
+
+ switch x := x.(type) {
+ case *Policy:
+ w.walk(x.Static)
+ w.walk(x.Plans)
+ w.walk(x.Funcs)
+ case *Static:
+ for _, s := range x.Strings {
+ w.walk(s)
+ }
+ for _, f := range x.BuiltinFuncs {
+ w.walk(f)
+ }
+ for _, f := range x.Files {
+ w.walk(f)
+ }
+ case *Plans:
+ for _, pl := range x.Plans {
+ w.walk(pl)
+ }
+ case *Funcs:
+ for _, fn := range x.Funcs {
+ w.walk(fn)
+ }
+ case *Func:
+ for _, b := range x.Blocks {
+ w.walk(b)
+ }
+ case *Plan:
+ for _, b := range x.Blocks {
+ w.walk(b)
+ }
+ case *Block:
+ for _, s := range x.Stmts {
+ w.walk(s)
+ }
+ case *BlockStmt:
+ for _, b := range x.Blocks {
+ w.walk(b)
+ }
+ case *ScanStmt:
+ w.walk(x.Block)
+ case *NotStmt:
+ w.walk(x.Block)
+ case *WithStmt:
+ w.walk(x.Block)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE b/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE
new file mode 100644
index 00000000..6369f4fc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 lestrrat
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go
new file mode 100644
index 00000000..ca4ac419
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/buffer/buffer.go
@@ -0,0 +1,113 @@
+// Package buffer provides a very thin wrapper around []byte buffer called
+// `Buffer`, to provide functionalities that are often used within the jwx
+// related packages
+package buffer
+
+import (
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/pkg/errors"
+)
+
+// Buffer wraps `[]byte` and provides functions that are often used in
+// the jwx related packages. One notable difference is that while
+// encoding/json marshalls `[]byte` using base64.StdEncoding, this
+// module uses base64.RawURLEncoding as mandated by the spec
+type Buffer []byte
+
+// FromUint creates a `Buffer` from an unsigned int
+func FromUint(v uint64) Buffer {
+ data := make([]byte, 8)
+ binary.BigEndian.PutUint64(data, v)
+
+ i := 0
+ for ; i < len(data); i++ {
+ if data[i] != 0x0 {
+ break
+ }
+ }
+ return Buffer(data[i:])
+}
+
+// FromBase64 constructs a new Buffer from a base64 encoded data
+func FromBase64(v []byte) (Buffer, error) {
+ b := Buffer{}
+ if err := b.Base64Decode(v); err != nil {
+ return Buffer(nil), errors.Wrap(err, "failed to decode from base64")
+ }
+
+ return b, nil
+}
+
+// FromNData constructs a new Buffer from a "n:data" format
+// (I made that name up)
+func FromNData(v []byte) (Buffer, error) {
+ size := binary.BigEndian.Uint32(v)
+ buf := make([]byte, int(size))
+ copy(buf, v[4:4+size])
+ return Buffer(buf), nil
+}
+
+// Bytes returns the raw bytes that comprises the Buffer
+func (b Buffer) Bytes() []byte {
+ return []byte(b)
+}
+
+// NData returns Datalen || Data, where Datalen is a 32 bit counter for
+// the length of the following data, and Data is the octets that comprise
+// the buffer data
+func (b Buffer) NData() []byte {
+ buf := make([]byte, 4+b.Len())
+ binary.BigEndian.PutUint32(buf, uint32(b.Len()))
+
+ copy(buf[4:], b.Bytes())
+ return buf
+}
+
+// Len returns the number of bytes that the Buffer holds
+func (b Buffer) Len() int {
+ return len(b)
+}
+
+// Base64Encode encodes the contents of the Buffer using base64.RawURLEncoding
+func (b Buffer) Base64Encode() ([]byte, error) {
+ enc := base64.RawURLEncoding
+ out := make([]byte, enc.EncodedLen(len(b)))
+ enc.Encode(out, b)
+ return out, nil
+}
+
+// Base64Decode decodes the contents of the Buffer using base64.RawURLEncoding
+func (b *Buffer) Base64Decode(v []byte) error {
+ enc := base64.RawURLEncoding
+ out := make([]byte, enc.DecodedLen(len(v)))
+ n, err := enc.Decode(out, v)
+ if err != nil {
+ return errors.Wrap(err, "failed to decode from base64")
+ }
+ out = out[:n]
+ *b = Buffer(out)
+ return nil
+}
+
+// MarshalJSON marshals the buffer into JSON format after encoding the buffer
+// with base64.RawURLEncoding
+func (b Buffer) MarshalJSON() ([]byte, error) {
+ v, err := b.Base64Encode()
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to encode to base64")
+ }
+ return json.Marshal(string(v))
+}
+
+// UnmarshalJSON unmarshals from a JSON string into a Buffer, after decoding it
+// with base64.RawURLEncoding
+func (b *Buffer) UnmarshalJSON(data []byte) error {
+ var x string
+ if err := json.Unmarshal(data, &x); err != nil {
+ return errors.Wrap(err, "failed to unmarshal JSON")
+ }
+ return b.Base64Decode([]byte(x))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go
new file mode 100644
index 00000000..b7e35dc7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/elliptic.go
@@ -0,0 +1,11 @@
+package jwa
+
+// EllipticCurveAlgorithm represents the algorithms used for EC keys
+type EllipticCurveAlgorithm string
+
+// Supported values for EllipticCurveAlgorithm
+const (
+ P256 EllipticCurveAlgorithm = "P-256"
+ P384 EllipticCurveAlgorithm = "P-384"
+ P521 EllipticCurveAlgorithm = "P-521"
+)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go
new file mode 100644
index 00000000..076bd39e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/key_type.go
@@ -0,0 +1,67 @@
+package jwa
+
+import (
+ "strconv"
+
+ "github.com/pkg/errors"
+)
+
+// KeyType represents the key type ("kty") that are supported
+type KeyType string
+
+var keyTypeAlg = map[string]struct{}{"EC": {}, "oct": {}, "RSA": {}}
+
+// Supported values for KeyType
+const (
+ EC KeyType = "EC" // Elliptic Curve
+ InvalidKeyType KeyType = "" // Invalid KeyType
+ OctetSeq KeyType = "oct" // Octet sequence (used to represent symmetric keys)
+ RSA KeyType = "RSA" // RSA
+)
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (keyType *KeyType) Accept(value interface{}) error {
+ var tmp KeyType
+ switch x := value.(type) {
+ case string:
+ tmp = KeyType(x)
+ case KeyType:
+ tmp = x
+ default:
+ return errors.Errorf(`invalid type for jwa.KeyType: %T`, value)
+ }
+ _, ok := keyTypeAlg[tmp.String()]
+ if !ok {
+ return errors.Errorf("Unknown Key Type algorithm")
+ }
+
+ *keyType = tmp
+ return nil
+}
+
+// String returns the string representation of a KeyType
+func (keyType KeyType) String() string {
+ return string(keyType)
+}
+
+// UnmarshalJSON unmarshals and checks data as KeyType Algorithm
+func (keyType *KeyType) UnmarshalJSON(data []byte) error {
+ var quote byte = '"'
+ var quoted string
+ if data[0] == quote {
+ var err error
+ quoted, err = strconv.Unquote(string(data))
+ if err != nil {
+ return errors.Wrap(err, "Failed to process signature algorithm")
+ }
+ } else {
+ quoted = string(data)
+ }
+ _, ok := keyTypeAlg[quoted]
+ if !ok {
+ return errors.Errorf("Unknown signature algorithm")
+ }
+ *keyType = KeyType(quoted)
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go
new file mode 100644
index 00000000..2fe72e1d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/parameters.go
@@ -0,0 +1,29 @@
+package jwa
+
+import (
+ "crypto/elliptic"
+
+ "github.com/open-policy-agent/opa/internal/jwx/buffer"
+)
+
+// EllipticCurve provides a indirect type to standard elliptic curve such that we can
+// use it for unmarshal
+type EllipticCurve struct {
+ elliptic.Curve
+}
+
+// AlgorithmParameters provides a single structure suitable to unmarshaling any JWK
+type AlgorithmParameters struct {
+ N buffer.Buffer `json:"n,omitempty"`
+ E buffer.Buffer `json:"e,omitempty"`
+ D buffer.Buffer `json:"d,omitempty"`
+ P buffer.Buffer `json:"p,omitempty"`
+ Q buffer.Buffer `json:"q,omitempty"`
+ Dp buffer.Buffer `json:"dp,omitempty"`
+ Dq buffer.Buffer `json:"dq,omitempty"`
+ Qi buffer.Buffer `json:"qi,omitempty"`
+ Crv EllipticCurveAlgorithm `json:"crv,omitempty"`
+ X buffer.Buffer `json:"x,omitempty"`
+ Y buffer.Buffer `json:"y,omitempty"`
+ K buffer.Buffer `json:"k,omitempty"`
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go
new file mode 100644
index 00000000..a0988eca
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwa/signature.go
@@ -0,0 +1,76 @@
+package jwa
+
+import (
+ "strconv"
+
+ "github.com/pkg/errors"
+)
+
+// SignatureAlgorithm represents the various signature algorithms as described in https://tools.ietf.org/html/rfc7518#section-3.1
+type SignatureAlgorithm string
+
+var signatureAlg = map[string]struct{}{"ES256": {}, "ES384": {}, "ES512": {}, "HS256": {}, "HS384": {}, "HS512": {}, "PS256": {}, "PS384": {}, "PS512": {}, "RS256": {}, "RS384": {}, "RS512": {}, "none": {}}
+
+// Supported values for SignatureAlgorithm
+const (
+ ES256 SignatureAlgorithm = "ES256" // ECDSA using P-256 and SHA-256
+ ES384 SignatureAlgorithm = "ES384" // ECDSA using P-384 and SHA-384
+ ES512 SignatureAlgorithm = "ES512" // ECDSA using P-521 and SHA-512
+ HS256 SignatureAlgorithm = "HS256" // HMAC using SHA-256
+ HS384 SignatureAlgorithm = "HS384" // HMAC using SHA-384
+ HS512 SignatureAlgorithm = "HS512" // HMAC using SHA-512
+ NoSignature SignatureAlgorithm = "none"
+ PS256 SignatureAlgorithm = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
+ PS384 SignatureAlgorithm = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
+ PS512 SignatureAlgorithm = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
+ RS256 SignatureAlgorithm = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
+ RS384 SignatureAlgorithm = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
+ RS512 SignatureAlgorithm = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
+ NoValue SignatureAlgorithm = "" // No value is different from none
+)
+
+// Accept is used when conversion from values given by
+// outside sources (such as JSON payloads) is required
+func (signature *SignatureAlgorithm) Accept(value interface{}) error {
+ var tmp SignatureAlgorithm
+ switch x := value.(type) {
+ case string:
+ tmp = SignatureAlgorithm(x)
+ case SignatureAlgorithm:
+ tmp = x
+ default:
+ return errors.Errorf(`invalid type for jwa.SignatureAlgorithm: %T`, value)
+ }
+ _, ok := signatureAlg[tmp.String()]
+ if !ok {
+ return errors.Errorf("Unknown signature algorithm")
+ }
+ *signature = tmp
+ return nil
+}
+
+// String returns the string representation of a SignatureAlgorithm
+func (signature SignatureAlgorithm) String() string {
+ return string(signature)
+}
+
+// UnmarshalJSON unmarshals and checks data as Signature Algorithm
+func (signature *SignatureAlgorithm) UnmarshalJSON(data []byte) error {
+ var quote byte = '"'
+ var quoted string
+ if data[0] == quote {
+ var err error
+ quoted, err = strconv.Unquote(string(data))
+ if err != nil {
+ return errors.Wrap(err, "Failed to process signature algorithm")
+ }
+ } else {
+ quoted = string(data)
+ }
+ _, ok := signatureAlg[quoted]
+ if !ok {
+ return errors.Errorf("Unknown signature algorithm")
+ }
+ *signature = SignatureAlgorithm(quoted)
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go
new file mode 100644
index 00000000..30bee46b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/ecdsa.go
@@ -0,0 +1,120 @@
+package jwk
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "math/big"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+func newECDSAPublicKey(key *ecdsa.PublicKey) (*ECDSAPublicKey, error) {
+
+ var hdr StandardHeaders
+ err := hdr.Set(KeyTypeKey, jwa.EC)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Failed to set Key Type")
+ }
+
+ return &ECDSAPublicKey{
+ StandardHeaders: &hdr,
+ key: key,
+ }, nil
+}
+
+func newECDSAPrivateKey(key *ecdsa.PrivateKey) (*ECDSAPrivateKey, error) {
+
+ var hdr StandardHeaders
+ err := hdr.Set(KeyTypeKey, jwa.EC)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Failed to set Key Type")
+ }
+
+ return &ECDSAPrivateKey{
+ StandardHeaders: &hdr,
+ key: key,
+ }, nil
+}
+
+// Materialize returns the EC-DSA public key represented by this JWK
+func (k ECDSAPublicKey) Materialize() (interface{}, error) {
+ return k.key, nil
+}
+
+// Materialize returns the EC-DSA private key represented by this JWK
+func (k ECDSAPrivateKey) Materialize() (interface{}, error) {
+ return k.key, nil
+}
+
+// GenerateKey creates a ECDSAPublicKey from JWK format
+func (k *ECDSAPublicKey) GenerateKey(keyJSON *RawKeyJSON) error {
+
+ var x, y big.Int
+
+ if keyJSON.X == nil || keyJSON.Y == nil || keyJSON.Crv == "" {
+ return errors.Errorf("Missing mandatory key parameters X, Y or Crv")
+ }
+
+ x.SetBytes(keyJSON.X.Bytes())
+ y.SetBytes(keyJSON.Y.Bytes())
+
+ var curve elliptic.Curve
+ switch keyJSON.Crv {
+ case jwa.P256:
+ curve = elliptic.P256()
+ case jwa.P384:
+ curve = elliptic.P384()
+ case jwa.P521:
+ curve = elliptic.P521()
+ default:
+ return errors.Errorf(`invalid curve name %s`, keyJSON.Crv)
+ }
+
+ *k = ECDSAPublicKey{
+ StandardHeaders: &keyJSON.StandardHeaders,
+ key: &ecdsa.PublicKey{
+ Curve: curve,
+ X: &x,
+ Y: &y,
+ },
+ }
+ return nil
+}
+
+// GenerateKey creates a ECDSAPrivateKey from JWK format
+func (k *ECDSAPrivateKey) GenerateKey(keyJSON *RawKeyJSON) error {
+
+ if keyJSON.D == nil {
+ return errors.Errorf("Missing mandatory key parameter D")
+ }
+ eCDSAPublicKey := &ECDSAPublicKey{}
+ err := eCDSAPublicKey.GenerateKey(keyJSON)
+ if err != nil {
+ return errors.Wrap(err, `failed to generate public key`)
+ }
+ dBytes := keyJSON.D.Bytes()
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := eCDSAPublicKey.key.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ if octetLength-len(dBytes) != 0 {
+ return errors.Errorf("Failed to generate private key. Incorrect D value")
+ }
+ privateKey := &ecdsa.PrivateKey{
+ PublicKey: *eCDSAPublicKey.key,
+ D: (&big.Int{}).SetBytes(keyJSON.D.Bytes()),
+ }
+
+ k.key = privateKey
+ k.StandardHeaders = &keyJSON.StandardHeaders
+
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go
new file mode 100644
index 00000000..cf700ee8
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/headers.go
@@ -0,0 +1,178 @@
+package jwk
+
+import (
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+// Convenience constants for common JWK parameters
+const (
+ AlgorithmKey = "alg"
+ KeyIDKey = "kid"
+ KeyOpsKey = "key_ops"
+ KeyTypeKey = "kty"
+ KeyUsageKey = "use"
+ PrivateParamsKey = "privateParams"
+)
+
+// Headers provides a common interface to all future possible headers
+type Headers interface {
+ Get(string) (interface{}, bool)
+ Set(string, interface{}) error
+ Walk(func(string, interface{}) error) error
+ GetAlgorithm() jwa.SignatureAlgorithm
+ GetKeyID() string
+ GetKeyOps() KeyOperationList
+ GetKeyType() jwa.KeyType
+ GetKeyUsage() string
+ GetPrivateParams() map[string]interface{}
+}
+
+// StandardHeaders stores the common JWK parameters
+type StandardHeaders struct {
+ Algorithm *jwa.SignatureAlgorithm `json:"alg,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.4
+ KeyID string `json:"kid,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ KeyOps KeyOperationList `json:"key_ops,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.3
+ KeyType jwa.KeyType `json:"kty,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.1
+ KeyUsage string `json:"use,omitempty"` // https://tools.ietf.org/html/rfc7517#section-4.2
+ PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
+}
+
+// GetAlgorithm is a convenience function to retrieve the corresponding value stored in the StandardHeaders
+func (h *StandardHeaders) GetAlgorithm() jwa.SignatureAlgorithm {
+ if v := h.Algorithm; v != nil {
+ return *v
+ }
+ return jwa.NoValue
+}
+
+// GetKeyID is a convenience function to retrieve the corresponding value stored in the StandardHeaders
+func (h *StandardHeaders) GetKeyID() string {
+ return h.KeyID
+}
+
+// GetKeyOps is a convenience function to retrieve the corresponding value stored in the StandardHeaders
+func (h *StandardHeaders) GetKeyOps() KeyOperationList {
+ return h.KeyOps
+}
+
+// GetKeyType is a convenience function to retrieve the corresponding value stored in the StandardHeaders
+func (h *StandardHeaders) GetKeyType() jwa.KeyType {
+ return h.KeyType
+}
+
+// GetKeyUsage is a convenience function to retrieve the corresponding value stored in the StandardHeaders
+func (h *StandardHeaders) GetKeyUsage() string {
+ return h.KeyUsage
+}
+
+// GetPrivateParams is a convenience function to retrieve the corresponding value stored in the StandardHeaders
+func (h *StandardHeaders) GetPrivateParams() map[string]interface{} {
+ return h.PrivateParams
+}
+
+// Get is a general getter function for JWK StandardHeaders structure
+func (h *StandardHeaders) Get(name string) (interface{}, bool) {
+ switch name {
+ case AlgorithmKey:
+ alg := h.GetAlgorithm()
+ if alg != jwa.NoValue {
+ return alg, true
+ }
+ return nil, false
+ case KeyIDKey:
+ v := h.KeyID
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ case KeyOpsKey:
+ v := h.KeyOps
+ if v == nil {
+ return nil, false
+ }
+ return v, true
+ case KeyTypeKey:
+ v := h.KeyType
+ if v == jwa.InvalidKeyType {
+ return nil, false
+ }
+ return v, true
+ case KeyUsageKey:
+ v := h.KeyUsage
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ case PrivateParamsKey:
+ v := h.PrivateParams
+ if len(v) == 0 {
+ return nil, false
+ }
+ return v, true
+ default:
+ return nil, false
+ }
+}
+
+// Set is a general getter function for JWK StandardHeaders structure
+func (h *StandardHeaders) Set(name string, value interface{}) error {
+ switch name {
+ case AlgorithmKey:
+ var acceptor jwa.SignatureAlgorithm
+ if err := acceptor.Accept(value); err != nil {
+ return errors.Wrapf(err, `invalid value for %s key`, AlgorithmKey)
+ }
+ h.Algorithm = &acceptor
+ return nil
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.KeyID = v
+ return nil
+ }
+ return errors.Errorf("invalid value for %s key: %T", KeyIDKey, value)
+ case KeyOpsKey:
+ if err := h.KeyOps.Accept(value); err != nil {
+ return errors.Wrapf(err, "invalid value for %s key", KeyOpsKey)
+ }
+ return nil
+ case KeyTypeKey:
+ if err := h.KeyType.Accept(value); err != nil {
+ return errors.Wrapf(err, "invalid value for %s key", KeyTypeKey)
+ }
+ return nil
+ case KeyUsageKey:
+ if v, ok := value.(string); ok {
+ h.KeyUsage = v
+ return nil
+ }
+ return errors.Errorf("invalid value for %s key: %T", KeyUsageKey, value)
+ case PrivateParamsKey:
+ if v, ok := value.(map[string]interface{}); ok {
+ h.PrivateParams = v
+ return nil
+ }
+ return errors.Errorf("invalid value for %s key: %T", PrivateParamsKey, value)
+ default:
+ return errors.Errorf(`invalid key: %s`, name)
+ }
+}
+
+// Walk iterates over all JWK standard headers fields while applying a function to its value.
+func (h StandardHeaders) Walk(f func(string, interface{}) error) error {
+ for _, key := range []string{AlgorithmKey, KeyIDKey, KeyOpsKey, KeyTypeKey, KeyUsageKey, PrivateParamsKey} {
+ if v, ok := h.Get(key); ok {
+ if err := f(key, v); err != nil {
+ return errors.Wrapf(err, `walk function returned error for %s`, key)
+ }
+ }
+ }
+
+ for k, v := range h.PrivateParams {
+ if err := f(k, v); err != nil {
+ return errors.Wrapf(err, `walk function returned error for %s`, k)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go
new file mode 100644
index 00000000..98229730
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/interface.go
@@ -0,0 +1,70 @@
+package jwk
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+// Set is a convenience struct to allow generating and parsing
+// JWK sets as opposed to single JWKs
+type Set struct {
+ Keys []Key `json:"keys"`
+}
+
+// Key defines the minimal interface for each of the
+// key types. Their use and implementation differ significantly
+// between each key types, so you should use type assertions
+// to perform more specific tasks with each key
+type Key interface {
+ Headers
+
+ // Materialize creates the corresponding key. For example,
+ // RSA types would create *rsa.PublicKey or *rsa.PrivateKey,
+ // EC types would create *ecdsa.PublicKey or *ecdsa.PrivateKey,
+ // and OctetSeq types create a []byte key.
+ Materialize() (interface{}, error)
+ GenerateKey(*RawKeyJSON) error
+}
+
+// RawKeyJSON is generic type that represents any kind JWK
+type RawKeyJSON struct {
+ StandardHeaders
+ jwa.AlgorithmParameters
+}
+
+// RawKeySetJSON is generic type that represents a JWK Set
+type RawKeySetJSON struct {
+ Keys []RawKeyJSON `json:"keys"`
+}
+
+// RSAPublicKey is a type of JWK generated from RSA public keys
+type RSAPublicKey struct {
+ *StandardHeaders
+ key *rsa.PublicKey
+}
+
+// RSAPrivateKey is a type of JWK generated from RSA private keys
+type RSAPrivateKey struct {
+ *StandardHeaders
+ key *rsa.PrivateKey
+}
+
+// SymmetricKey is a type of JWK generated from symmetric keys
+type SymmetricKey struct {
+ *StandardHeaders
+ key []byte
+}
+
+// ECDSAPublicKey is a type of JWK generated from ECDSA public keys
+type ECDSAPublicKey struct {
+ *StandardHeaders
+ key *ecdsa.PublicKey
+}
+
+// ECDSAPrivateKey is a type of JWK generated from ECDH-ES private keys
+type ECDSAPrivateKey struct {
+ *StandardHeaders
+ key *ecdsa.PrivateKey
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go
new file mode 100644
index 00000000..22ccf8df
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/jwk.go
@@ -0,0 +1,150 @@
+// Package jwk implements JWK as described in https://tools.ietf.org/html/rfc7517
+package jwk
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "encoding/json"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+// GetPublicKey returns the public key based on the private key type.
+// For rsa key types *rsa.PublicKey is returned; for ecdsa key types *ecdsa.PublicKey;
+// for byte slice (raw) keys, the key itself is returned. If the corresponding
+// public key cannot be deduced, an error is returned
+func GetPublicKey(key interface{}) (interface{}, error) {
+ if key == nil {
+ return nil, errors.New(`jwk.New requires a non-nil key`)
+ }
+
+ switch v := key.(type) {
+ // Mental note: although Public() is defined in both types,
+ // you can not coalesce the clauses for rsa.PrivateKey and
+ // ecdsa.PrivateKey, as then `v` becomes interface{}
+ // b/c the compiler cannot deduce the exact type.
+ case *rsa.PrivateKey:
+ return v.Public(), nil
+ case *ecdsa.PrivateKey:
+ return v.Public(), nil
+ case []byte:
+ return v, nil
+ default:
+ return nil, errors.Errorf(`invalid key type %T`, key)
+ }
+}
+
+// GetKeyTypeFromKey creates a jwk.Key from the given key.
+func GetKeyTypeFromKey(key interface{}) jwa.KeyType {
+
+ switch key.(type) {
+ case *rsa.PrivateKey, *rsa.PublicKey:
+ return jwa.RSA
+ case *ecdsa.PrivateKey, *ecdsa.PublicKey:
+ return jwa.EC
+ case []byte:
+ return jwa.OctetSeq
+ default:
+ return jwa.InvalidKeyType
+ }
+}
+
+// New creates a jwk.Key from the given key.
+func New(key interface{}) (Key, error) {
+ if key == nil {
+ return nil, errors.New(`jwk.New requires a non-nil key`)
+ }
+
+ switch v := key.(type) {
+ case *rsa.PrivateKey:
+ return newRSAPrivateKey(v)
+ case *rsa.PublicKey:
+ return newRSAPublicKey(v)
+ case *ecdsa.PrivateKey:
+ return newECDSAPrivateKey(v)
+ case *ecdsa.PublicKey:
+ return newECDSAPublicKey(v)
+ case []byte:
+ return newSymmetricKey(v)
+ default:
+ return nil, errors.Errorf(`invalid key type %T`, key)
+ }
+}
+
+func parse(jwkSrc string) (*Set, error) {
+
+ var jwkKeySet Set
+ var jwkKey Key
+ rawKeySetJSON := &RawKeySetJSON{}
+ err := json.Unmarshal([]byte(jwkSrc), rawKeySetJSON)
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to unmarshal JWK Set")
+ }
+ if len(rawKeySetJSON.Keys) == 0 {
+
+ // It might be a single key
+ rawKeyJSON := &RawKeyJSON{}
+ err := json.Unmarshal([]byte(jwkSrc), rawKeyJSON)
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to unmarshal JWK")
+ }
+ jwkKey, err = rawKeyJSON.GenerateKey()
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to generate key")
+ }
+ // Add to set
+ jwkKeySet.Keys = append(jwkKeySet.Keys, jwkKey)
+ } else {
+ for i := range rawKeySetJSON.Keys {
+ rawKeyJSON := rawKeySetJSON.Keys[i]
+ jwkKey, err = rawKeyJSON.GenerateKey()
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to generate key: %s")
+ }
+ jwkKeySet.Keys = append(jwkKeySet.Keys, jwkKey)
+ }
+ }
+ return &jwkKeySet, nil
+}
+
+// ParseBytes parses JWK from the incoming byte buffer.
+func ParseBytes(buf []byte) (*Set, error) {
+ return parse(string(buf[:]))
+}
+
+// ParseString parses JWK from the incoming string.
+func ParseString(s string) (*Set, error) {
+ return parse(s)
+}
+
+// GenerateKey creates an internal representation of a key from a raw JWK JSON
+func (r *RawKeyJSON) GenerateKey() (Key, error) {
+
+ var key Key
+
+ switch r.KeyType {
+ case jwa.RSA:
+ if r.D != nil {
+ key = &RSAPrivateKey{}
+ } else {
+ key = &RSAPublicKey{}
+ }
+ case jwa.EC:
+ if r.D != nil {
+ key = &ECDSAPrivateKey{}
+ } else {
+ key = &ECDSAPublicKey{}
+ }
+ case jwa.OctetSeq:
+ key = &SymmetricKey{}
+ default:
+ return nil, errors.Errorf(`Unrecognized key type`)
+ }
+ err := key.GenerateKey(r)
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to generate key from JWK")
+ }
+ return key, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go
new file mode 100644
index 00000000..36d4dd3e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/key_ops.go
@@ -0,0 +1,68 @@
+package jwk
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+// KeyUsageType is used to denote what this key should be used for
+type KeyUsageType string
+
+const (
+ // ForSignature is the value used in the headers to indicate that
+ // this key should be used for signatures
+ ForSignature KeyUsageType = "sig"
+ // ForEncryption is the value used in the headers to indicate that
+ // this key should be used for encryptiong
+ ForEncryption KeyUsageType = "enc"
+)
+
+// KeyOperation is used to denote the allowed operations for a Key
+type KeyOperation string
+
+// KeyOperationList represents an slice of KeyOperation
+type KeyOperationList []KeyOperation
+
+var keyOps = map[string]struct{}{"sign": {}, "verify": {}, "encrypt": {}, "decrypt": {}, "wrapKey": {}, "unwrapKey": {}, "deriveKey": {}, "deriveBits": {}}
+
+// KeyOperation constants
+const (
+ KeyOpSign KeyOperation = "sign" // (compute digital signature or MAC)
+ KeyOpVerify KeyOperation = "verify" // (verify digital signature or MAC)
+ KeyOpEncrypt KeyOperation = "encrypt" // (encrypt content)
+ KeyOpDecrypt KeyOperation = "decrypt" // (decrypt content and validate decryption, if applicable)
+ KeyOpWrapKey KeyOperation = "wrapKey" // (encrypt key)
+ KeyOpUnwrapKey KeyOperation = "unwrapKey" // (decrypt key and validate decryption, if applicable)
+ KeyOpDeriveKey KeyOperation = "deriveKey" // (derive key)
+ KeyOpDeriveBits KeyOperation = "deriveBits" // (derive bits not to be used as a key)
+)
+
+// Accept determines if Key Operation is valid
+func (keyOperationList *KeyOperationList) Accept(v interface{}) error {
+ switch x := v.(type) {
+ case KeyOperationList:
+ *keyOperationList = x
+ return nil
+ default:
+ return errors.Errorf(`invalid value %T`, v)
+ }
+}
+
+// UnmarshalJSON unmarshals and checks data as KeyType Algorithm
+func (keyOperationList *KeyOperationList) UnmarshalJSON(data []byte) error {
+ var tempKeyOperationList []string
+ err := json.Unmarshal(data, &tempKeyOperationList)
+ if err != nil {
+ return fmt.Errorf("invalid key operation")
+ }
+ for _, value := range tempKeyOperationList {
+ _, ok := keyOps[value]
+ if !ok {
+ return fmt.Errorf("unknown key operation")
+ }
+ *keyOperationList = append(*keyOperationList, KeyOperation(value))
+ }
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go
new file mode 100644
index 00000000..c885ffff
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/rsa.go
@@ -0,0 +1,103 @@
+package jwk
+
+import (
+ "crypto/rsa"
+ "math/big"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+func newRSAPublicKey(key *rsa.PublicKey) (*RSAPublicKey, error) {
+
+ var hdr StandardHeaders
+ err := hdr.Set(KeyTypeKey, jwa.RSA)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Failed to set Key Type")
+ }
+ return &RSAPublicKey{
+ StandardHeaders: &hdr,
+ key: key,
+ }, nil
+}
+
+func newRSAPrivateKey(key *rsa.PrivateKey) (*RSAPrivateKey, error) {
+
+ var hdr StandardHeaders
+ err := hdr.Set(KeyTypeKey, jwa.RSA)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Failed to set Key Type")
+ }
+ return &RSAPrivateKey{
+ StandardHeaders: &hdr,
+ key: key,
+ }, nil
+}
+
+// Materialize returns the standard RSA Public Key representation stored in the internal representation
+func (k *RSAPublicKey) Materialize() (interface{}, error) {
+ if k.key == nil {
+ return nil, errors.New(`key has no rsa.PublicKey associated with it`)
+ }
+ return k.key, nil
+}
+
+// Materialize returns the standard RSA Private Key representation stored in the internal representation
+func (k *RSAPrivateKey) Materialize() (interface{}, error) {
+ if k.key == nil {
+ return nil, errors.New(`key has no rsa.PrivateKey associated with it`)
+ }
+ return k.key, nil
+}
+
+// GenerateKey creates a RSAPublicKey from a RawKeyJSON
+func (k *RSAPublicKey) GenerateKey(keyJSON *RawKeyJSON) error {
+
+ if keyJSON.N == nil || keyJSON.E == nil {
+ return errors.Errorf("Missing mandatory key parameters N or E")
+ }
+ rsaPublicKey := &rsa.PublicKey{
+ N: (&big.Int{}).SetBytes(keyJSON.N.Bytes()),
+ E: int((&big.Int{}).SetBytes(keyJSON.E.Bytes()).Int64()),
+ }
+ k.key = rsaPublicKey
+ k.StandardHeaders = &keyJSON.StandardHeaders
+ return nil
+}
+
+// GenerateKey creates a RSAPublicKey from a RawKeyJSON
+func (k *RSAPrivateKey) GenerateKey(keyJSON *RawKeyJSON) error {
+
+ rsaPublicKey := &RSAPublicKey{}
+ err := rsaPublicKey.GenerateKey(keyJSON)
+ if err != nil {
+ return errors.Wrap(err, "failed to generate public key")
+ }
+
+ if keyJSON.D == nil || keyJSON.P == nil || keyJSON.Q == nil {
+ return errors.Errorf("Missing mandatory key parameters D, P or Q")
+ }
+ privateKey := &rsa.PrivateKey{
+ PublicKey: *rsaPublicKey.key,
+ D: (&big.Int{}).SetBytes(keyJSON.D.Bytes()),
+ Primes: []*big.Int{
+ (&big.Int{}).SetBytes(keyJSON.P.Bytes()),
+ (&big.Int{}).SetBytes(keyJSON.Q.Bytes()),
+ },
+ }
+
+ if keyJSON.Dp.Len() > 0 {
+ privateKey.Precomputed.Dp = (&big.Int{}).SetBytes(keyJSON.Dp.Bytes())
+ }
+ if keyJSON.Dq.Len() > 0 {
+ privateKey.Precomputed.Dq = (&big.Int{}).SetBytes(keyJSON.Dq.Bytes())
+ }
+ if keyJSON.Qi.Len() > 0 {
+ privateKey.Precomputed.Qinv = (&big.Int{}).SetBytes(keyJSON.Qi.Bytes())
+ }
+
+ k.key = privateKey
+ k.StandardHeaders = &keyJSON.StandardHeaders
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go
new file mode 100644
index 00000000..8a073615
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jwk/symmetric.go
@@ -0,0 +1,41 @@
+package jwk
+
+import (
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+func newSymmetricKey(key []byte) (*SymmetricKey, error) {
+ var hdr StandardHeaders
+
+ err := hdr.Set(KeyTypeKey, jwa.OctetSeq)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Failed to set Key Type")
+ }
+ return &SymmetricKey{
+ StandardHeaders: &hdr,
+ key: key,
+ }, nil
+}
+
+// Materialize returns the octets for this symmetric key.
+// Since this is a symmetric key, this just calls Octets
+func (s SymmetricKey) Materialize() (interface{}, error) {
+ return s.Octets(), nil
+}
+
+// Octets returns the octets in the key
+func (s SymmetricKey) Octets() []byte {
+ return s.key
+}
+
+// GenerateKey creates a Symmetric key from a RawKeyJSON
+func (s *SymmetricKey) GenerateKey(keyJSON *RawKeyJSON) error {
+
+ *s = SymmetricKey{
+ StandardHeaders: &keyJSON.StandardHeaders,
+ key: keyJSON.K,
+ }
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go
new file mode 100644
index 00000000..045e38fa
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/headers.go
@@ -0,0 +1,154 @@
+package jws
+
+import (
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+// Constants for JWS Common parameters
+const (
+ AlgorithmKey = "alg"
+ ContentTypeKey = "cty"
+ CriticalKey = "crit"
+ JWKKey = "jwk"
+ JWKSetURLKey = "jku"
+ KeyIDKey = "kid"
+ PrivateParamsKey = "privateParams"
+ TypeKey = "typ"
+)
+
+// Headers provides a common interface for common header parameters
+type Headers interface {
+ Get(string) (interface{}, bool)
+ Set(string, interface{}) error
+ GetAlgorithm() jwa.SignatureAlgorithm
+}
+
+// StandardHeaders contains JWS common parameters.
+type StandardHeaders struct {
+ Algorithm jwa.SignatureAlgorithm `json:"alg,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.1
+ ContentType string `json:"cty,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.10
+ Critical []string `json:"crit,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.11
+ JWK string `json:"jwk,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.3
+ JWKSetURL string `json:"jku,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.2
+ KeyID string `json:"kid,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.4
+ PrivateParams map[string]interface{} `json:"privateParams,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9
+ Type string `json:"typ,omitempty"` // https://tools.ietf.org/html/rfc7515#section-4.1.9
+}
+
+// GetAlgorithm returns algorithm
+func (h *StandardHeaders) GetAlgorithm() jwa.SignatureAlgorithm {
+ return h.Algorithm
+}
+
+// Get is a general getter function for StandardHeaders structure
+func (h *StandardHeaders) Get(name string) (interface{}, bool) {
+ switch name {
+ case AlgorithmKey:
+ v := h.Algorithm
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ case ContentTypeKey:
+ v := h.ContentType
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ case CriticalKey:
+ v := h.Critical
+ if len(v) == 0 {
+ return nil, false
+ }
+ return v, true
+ case JWKKey:
+ v := h.JWK
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ case JWKSetURLKey:
+ v := h.JWKSetURL
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ case KeyIDKey:
+ v := h.KeyID
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ case PrivateParamsKey:
+ v := h.PrivateParams
+ if len(v) == 0 {
+ return nil, false
+ }
+ return v, true
+ case TypeKey:
+ v := h.Type
+ if v == "" {
+ return nil, false
+ }
+ return v, true
+ default:
+ return nil, false
+ }
+}
+
+// Set is a general setter function for StandardHeaders structure
+func (h *StandardHeaders) Set(name string, value interface{}) error {
+ switch name {
+ case AlgorithmKey:
+ if err := h.Algorithm.Accept(value); err != nil {
+ return errors.Wrapf(err, `invalid value for %s key`, AlgorithmKey)
+ }
+ return nil
+ case ContentTypeKey:
+ if v, ok := value.(string); ok {
+ h.ContentType = v
+ return nil
+ }
+ return errors.Errorf(`invalid value for %s key: %T`, ContentTypeKey, value)
+ case CriticalKey:
+ if v, ok := value.([]string); ok {
+ h.Critical = v
+ return nil
+ }
+ return errors.Errorf(`invalid value for %s key: %T`, CriticalKey, value)
+ case JWKKey:
+ if v, ok := value.(string); ok {
+ h.JWK = v
+ return nil
+ }
+ return errors.Errorf(`invalid value for %s key: %T`, JWKKey, value)
+ case JWKSetURLKey:
+ if v, ok := value.(string); ok {
+ h.JWKSetURL = v
+ return nil
+ }
+ return errors.Errorf(`invalid value for %s key: %T`, JWKSetURLKey, value)
+ case KeyIDKey:
+ if v, ok := value.(string); ok {
+ h.KeyID = v
+ return nil
+ }
+ return errors.Errorf(`invalid value for %s key: %T`, KeyIDKey, value)
+ case PrivateParamsKey:
+ if v, ok := value.(map[string]interface{}); ok {
+ h.PrivateParams = v
+ return nil
+ }
+ return errors.Errorf(`invalid value for %s key: %T`, PrivateParamsKey, value)
+ case TypeKey:
+ if v, ok := value.(string); ok {
+ h.Type = v
+ return nil
+ }
+ return errors.Errorf(`invalid value for %s key: %T`, TypeKey, value)
+ default:
+ return errors.Errorf(`invalid key: %s`, name)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go
new file mode 100644
index 00000000..e647c8ac
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/interface.go
@@ -0,0 +1,22 @@
+package jws
+
+// Message represents a full JWS encoded message. Flattened serialization
+// is not supported as a struct, but rather it's represented as a
+// Message struct with only one `Signature` element.
+//
+// Do not expect to use the Message object to verify or construct a
+// signed payloads with. You should only use this when you want to actually
+// want to programmatically view the contents for the full JWS Payload.
+//
+// To sign and verify, use the appropriate `SignWithOption()` nad `Verify()` functions
+type Message struct {
+ Payload []byte `json:"payload"`
+ Signatures []*Signature `json:"signatures,omitempty"`
+}
+
+// Signature represents the headers and signature of a JWS message
+type Signature struct {
+ Headers Headers `json:"header,omitempty"` // Unprotected Headers
+ Protected Headers `json:"Protected,omitempty"` // Protected Headers
+ Signature []byte `json:"signature,omitempty"` // GetSignature
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go
new file mode 100644
index 00000000..6fca28d2
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/jws.go
@@ -0,0 +1,210 @@
+// Package jws implements the digital Signature on JSON based data
+// structures as described in https://tools.ietf.org/html/rfc7515
+//
+// If you do not care about the details, the only things that you
+// would need to use are the following functions:
+//
+// jws.SignWithOption(Payload, algorithm, key)
+// jws.Verify(encodedjws, algorithm, key)
+//
+// To sign, simply use `jws.SignWithOption`. `Payload` is a []byte buffer that
+// contains whatever data you want to sign. `alg` is one of the
+// jwa.SignatureAlgorithm constants from package jwa. For RSA and
+// ECDSA family of algorithms, you will need to prepare a private key.
+// For HMAC family, you just need a []byte value. The `jws.SignWithOption`
+// function will return the encoded JWS message on success.
+//
+// To verify, use `jws.Verify`. It will parse the `encodedjws` buffer
+// and verify the result using `algorithm` and `key`. Upon successful
+// verification, the original Payload is returned, so you can work on it.
+package jws
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "strings"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+ "github.com/open-policy-agent/opa/internal/jwx/jwk"
+ "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
+ "github.com/open-policy-agent/opa/internal/jwx/jws/verify"
+
+ "github.com/pkg/errors"
+)
+
+// SignLiteral generates a Signature for the given Payload and Headers, and serializes
+// it in compact serialization format. In this format you may NOT use
+// multiple signers.
+//
+func SignLiteral(payload []byte, alg jwa.SignatureAlgorithm, key interface{}, hdrBuf []byte) ([]byte, error) {
+ encodedHdr := base64.RawURLEncoding.EncodeToString(hdrBuf)
+ encodedPayload := base64.RawURLEncoding.EncodeToString(payload)
+ signingInput := strings.Join(
+ []string{
+ encodedHdr,
+ encodedPayload,
+ }, ".",
+ )
+ signer, err := sign.New(alg)
+ if err != nil {
+ return nil, errors.Wrap(err, `failed to create signer`)
+ }
+ signature, err := signer.Sign([]byte(signingInput), key)
+ if err != nil {
+ return nil, errors.Wrap(err, `failed to sign Payload`)
+ }
+ encodedSignature := base64.RawURLEncoding.EncodeToString(signature)
+ compactSerialization := strings.Join(
+ []string{
+ signingInput,
+ encodedSignature,
+ }, ".",
+ )
+ return []byte(compactSerialization), nil
+}
+
+// SignWithOption generates a Signature for the given Payload, and serializes
+// it in compact serialization format. In this format you may NOT use
+// multiple signers.
+//
+// If you would like to pass custom Headers, use the WithHeaders option.
+func SignWithOption(payload []byte, alg jwa.SignatureAlgorithm, key interface{}) ([]byte, error) {
+ var headers Headers = &StandardHeaders{}
+
+ err := headers.Set(AlgorithmKey, alg)
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to set alg value")
+ }
+
+ hdrBuf, err := json.Marshal(headers)
+ if err != nil {
+ return nil, errors.Wrap(err, `failed to marshal Headers`)
+ }
+ return SignLiteral(payload, alg, key, hdrBuf)
+}
+
+// Verify checks if the given JWS message is verifiable using `alg` and `key`.
+// If the verification is successful, `err` is nil, and the content of the
+// Payload that was signed is returned. If you need more fine-grained
+// control of the verification process, manually call `Parse`, generate a
+// verifier, and call `Verify` on the parsed JWS message object.
+func Verify(buf []byte, alg jwa.SignatureAlgorithm, key interface{}) (ret []byte, err error) {
+
+ verifier, err := verify.New(alg)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create verifier")
+ }
+
+ buf = bytes.TrimSpace(buf)
+ if len(buf) == 0 {
+ return nil, errors.New(`attempt to verify empty buffer`)
+ }
+
+ parts, err := SplitCompact(string(buf[:]))
+ if err != nil {
+ return nil, errors.Wrap(err, `failed extract from compact serialization format`)
+ }
+
+ signingInput := strings.Join(
+ []string{
+ parts[0],
+ parts[1],
+ }, ".",
+ )
+
+ decodedSignature, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to decode signature")
+ }
+ if err := verifier.Verify([]byte(signingInput), decodedSignature, key); err != nil {
+ return nil, errors.Wrap(err, "Failed to verify message")
+ }
+
+ if decodedPayload, err := base64.RawURLEncoding.DecodeString(parts[1]); err == nil {
+ return decodedPayload, nil
+ }
+ return nil, errors.Wrap(err, "Failed to decode Payload")
+}
+
+// VerifyWithJWK verifies the JWS message using the specified JWK
+func VerifyWithJWK(buf []byte, key jwk.Key) (payload []byte, err error) {
+
+ keyVal, err := key.Materialize()
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to materialize key")
+ }
+ return Verify(buf, key.GetAlgorithm(), keyVal)
+}
+
+// VerifyWithJWKSet verifies the JWS message using JWK key set.
+// By default it will only pick up keys that have the "use" key
+// set to either "sig" or "enc", but you can override it by
+// providing a keyaccept function.
+func VerifyWithJWKSet(buf []byte, keyset *jwk.Set) (payload []byte, err error) {
+
+ for _, key := range keyset.Keys {
+ payload, err := VerifyWithJWK(buf, key)
+ if err == nil {
+ return payload, nil
+ }
+ }
+ return nil, errors.New("failed to verify with any of the keys")
+}
+
+// ParseByte parses a JWS value serialized via compact serialization and provided as []byte.
+func ParseByte(jwsCompact []byte) (m *Message, err error) {
+ return parseCompact(string(jwsCompact[:]))
+}
+
+// ParseString parses a JWS value serialized via compact serialization and provided as string.
+func ParseString(s string) (*Message, error) {
+ return parseCompact(s)
+}
+
+// SplitCompact splits a JWT and returns its three parts
+// separately: Protected Headers, Payload and Signature.
+func SplitCompact(jwsCompact string) ([]string, error) {
+
+ parts := strings.Split(jwsCompact, ".")
+ if len(parts) < 3 {
+ return nil, errors.New("Failed to split compact serialization")
+ }
+ return parts, nil
+}
+
+// parseCompact parses a JWS value serialized via compact serialization.
+func parseCompact(str string) (m *Message, err error) {
+
+ var decodedHeader, decodedPayload, decodedSignature []byte
+ parts, err := SplitCompact(str)
+ if err != nil {
+ return nil, errors.Wrap(err, `invalid compact serialization format`)
+ }
+
+ if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil {
+ return nil, errors.Wrap(err, `failed to decode Headers`)
+ }
+ var hdr StandardHeaders
+ if err := json.Unmarshal(decodedHeader, &hdr); err != nil {
+ return nil, errors.Wrap(err, `failed to parse JOSE Headers`)
+ }
+
+ if decodedPayload, err = base64.RawURLEncoding.DecodeString(parts[1]); err != nil {
+ return nil, errors.Wrap(err, `failed to decode Payload`)
+ }
+
+ if len(parts) > 2 {
+ if decodedSignature, err = base64.RawURLEncoding.DecodeString(parts[2]); err != nil {
+ return nil, errors.Wrap(err, `failed to decode Signature`)
+ }
+ }
+
+ var msg Message
+ msg.Payload = decodedPayload
+ msg.Signatures = append(msg.Signatures, &Signature{
+ Protected: &hdr,
+ Signature: decodedSignature,
+ })
+ return &msg, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go
new file mode 100644
index 00000000..1366a3d7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/message.go
@@ -0,0 +1,26 @@
+package jws
+
+// PublicHeaders returns the public headers in a JWS
+func (s Signature) PublicHeaders() Headers {
+ return s.Headers
+}
+
+// ProtectedHeaders returns the protected headers in a JWS
+func (s Signature) ProtectedHeaders() Headers {
+ return s.Protected
+}
+
+// GetSignature returns the signature in a JWS
+func (s Signature) GetSignature() []byte {
+ return s.Signature
+}
+
+// GetPayload returns the payload in a JWS
+func (m Message) GetPayload() []byte {
+ return m.Payload
+}
+
+// GetSignatures returns the all signatures in a JWS
+func (m Message) GetSignatures() []*Signature {
+ return m.Signatures
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go
new file mode 100644
index 00000000..70239068
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/ecdsa.go
@@ -0,0 +1,84 @@
+package sign
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+
+ "github.com/pkg/errors"
+)
+
+var ecdsaSignFuncs = map[jwa.SignatureAlgorithm]ecdsaSignFunc{}
+
+func init() {
+ algs := map[jwa.SignatureAlgorithm]crypto.Hash{
+ jwa.ES256: crypto.SHA256,
+ jwa.ES384: crypto.SHA384,
+ jwa.ES512: crypto.SHA512,
+ }
+
+ for alg, h := range algs {
+ ecdsaSignFuncs[alg] = makeECDSASignFunc(h)
+ }
+}
+
+func makeECDSASignFunc(hash crypto.Hash) ecdsaSignFunc {
+ return ecdsaSignFunc(func(payload []byte, key *ecdsa.PrivateKey) ([]byte, error) {
+ curveBits := key.Curve.Params().BitSize
+ keyBytes := curveBits / 8
+ // Curve bits do not need to be a multiple of 8.
+ if curveBits%8 > 0 {
+ keyBytes++
+ }
+ h := hash.New()
+ h.Write(payload)
+ r, s, err := ecdsa.Sign(rand.Reader, key, h.Sum(nil))
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to sign payload using ecdsa")
+ }
+
+ rBytes := r.Bytes()
+ rBytesPadded := make([]byte, keyBytes)
+ copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
+
+ sBytes := s.Bytes()
+ sBytesPadded := make([]byte, keyBytes)
+ copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
+
+ out := append(rBytesPadded, sBytesPadded...)
+ return out, nil
+ })
+}
+
+func newECDSA(alg jwa.SignatureAlgorithm) (*ECDSASigner, error) {
+ signfn, ok := ecdsaSignFuncs[alg]
+ if !ok {
+ return nil, errors.Errorf(`unsupported algorithm while trying to create ECDSA signer: %s`, alg)
+ }
+
+ return &ECDSASigner{
+ alg: alg,
+ sign: signfn,
+ }, nil
+}
+
+// Algorithm returns the signer algorithm
+func (s ECDSASigner) Algorithm() jwa.SignatureAlgorithm {
+ return s.alg
+}
+
+// Sign signs payload with a ECDSA private key
+func (s ECDSASigner) Sign(payload []byte, key interface{}) ([]byte, error) {
+ if key == nil {
+ return nil, errors.New(`missing private key while signing payload`)
+ }
+
+ privateKey, ok := key.(*ecdsa.PrivateKey)
+ if !ok {
+ return nil, errors.Errorf(`invalid key type %T. *ecdsa.PrivateKey is required`, key)
+ }
+
+ return s.sign(payload, privateKey)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go
new file mode 100644
index 00000000..cbf7b9f0
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/hmac.go
@@ -0,0 +1,66 @@
+package sign
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "crypto/sha512"
+ "hash"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+
+ "github.com/pkg/errors"
+)
+
+var hmacSignFuncs = map[jwa.SignatureAlgorithm]hmacSignFunc{}
+
+func init() {
+ algs := map[jwa.SignatureAlgorithm]func() hash.Hash{
+ jwa.HS256: sha256.New,
+ jwa.HS384: sha512.New384,
+ jwa.HS512: sha512.New,
+ }
+
+ for alg, h := range algs {
+ hmacSignFuncs[alg] = makeHMACSignFunc(h)
+
+ }
+}
+
+func newHMAC(alg jwa.SignatureAlgorithm) (*HMACSigner, error) {
+ signer, ok := hmacSignFuncs[alg]
+ if !ok {
+ return nil, errors.Errorf(`unsupported algorithm while trying to create HMAC signer: %s`, alg)
+ }
+
+ return &HMACSigner{
+ alg: alg,
+ sign: signer,
+ }, nil
+}
+
+func makeHMACSignFunc(hfunc func() hash.Hash) hmacSignFunc {
+ return hmacSignFunc(func(payload []byte, key []byte) ([]byte, error) {
+ h := hmac.New(hfunc, key)
+ h.Write(payload)
+ return h.Sum(nil), nil
+ })
+}
+
+// Algorithm returns the signer algorithm
+func (s HMACSigner) Algorithm() jwa.SignatureAlgorithm {
+ return s.alg
+}
+
+// Sign signs payload with a Symmetric key
+func (s HMACSigner) Sign(payload []byte, key interface{}) ([]byte, error) {
+ hmackey, ok := key.([]byte)
+ if !ok {
+ return nil, errors.Errorf(`invalid key type %T. []byte is required`, key)
+ }
+
+ if len(hmackey) == 0 {
+ return nil, errors.New(`missing key while signing payload`)
+ }
+
+ return s.sign(payload, hmackey)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go
new file mode 100644
index 00000000..42a10c42
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/interface.go
@@ -0,0 +1,45 @@
+package sign
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+// Signer provides a common interface for supported alg signing methods
+type Signer interface {
+ // Sign creates a signature for the given `payload`.
+ // `key` is the key used for signing the payload, and is usually
+ // the private key type associated with the signature method. For example,
+ // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
+ // `*"crypto/rsa".PrivateKey` type.
+ // Check the documentation for each signer for details
+ Sign(payload []byte, key interface{}) ([]byte, error)
+
+ Algorithm() jwa.SignatureAlgorithm
+}
+
+type rsaSignFunc func([]byte, *rsa.PrivateKey) ([]byte, error)
+
+// RSASigner uses crypto/rsa to sign the payloads.
+type RSASigner struct {
+ alg jwa.SignatureAlgorithm
+ sign rsaSignFunc
+}
+
+type ecdsaSignFunc func([]byte, *ecdsa.PrivateKey) ([]byte, error)
+
+// ECDSASigner uses crypto/ecdsa to sign the payloads.
+type ECDSASigner struct {
+ alg jwa.SignatureAlgorithm
+ sign ecdsaSignFunc
+}
+
+type hmacSignFunc func([]byte, []byte) ([]byte, error)
+
+// HMACSigner uses crypto/hmac to sign the payloads.
+type HMACSigner struct {
+ alg jwa.SignatureAlgorithm
+ sign hmacSignFunc
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go
new file mode 100644
index 00000000..bc51dbcd
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/rsa.go
@@ -0,0 +1,97 @@
+package sign
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+
+ "github.com/pkg/errors"
+)
+
+var rsaSignFuncs = map[jwa.SignatureAlgorithm]rsaSignFunc{}
+
+func init() {
+ algs := map[jwa.SignatureAlgorithm]struct {
+ Hash crypto.Hash
+ SignFunc func(crypto.Hash) rsaSignFunc
+ }{
+ jwa.RS256: {
+ Hash: crypto.SHA256,
+ SignFunc: makeSignPKCS1v15,
+ },
+ jwa.RS384: {
+ Hash: crypto.SHA384,
+ SignFunc: makeSignPKCS1v15,
+ },
+ jwa.RS512: {
+ Hash: crypto.SHA512,
+ SignFunc: makeSignPKCS1v15,
+ },
+ jwa.PS256: {
+ Hash: crypto.SHA256,
+ SignFunc: makeSignPSS,
+ },
+ jwa.PS384: {
+ Hash: crypto.SHA384,
+ SignFunc: makeSignPSS,
+ },
+ jwa.PS512: {
+ Hash: crypto.SHA512,
+ SignFunc: makeSignPSS,
+ },
+ }
+
+ for alg, item := range algs {
+ rsaSignFuncs[alg] = item.SignFunc(item.Hash)
+ }
+}
+
+func makeSignPKCS1v15(hash crypto.Hash) rsaSignFunc {
+ return rsaSignFunc(func(payload []byte, key *rsa.PrivateKey) ([]byte, error) {
+ h := hash.New()
+ h.Write(payload)
+ return rsa.SignPKCS1v15(rand.Reader, key, hash, h.Sum(nil))
+ })
+}
+
+func makeSignPSS(hash crypto.Hash) rsaSignFunc {
+ return rsaSignFunc(func(payload []byte, key *rsa.PrivateKey) ([]byte, error) {
+ h := hash.New()
+ h.Write(payload)
+ return rsa.SignPSS(rand.Reader, key, hash, h.Sum(nil), &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ })
+ })
+}
+
+func newRSA(alg jwa.SignatureAlgorithm) (*RSASigner, error) {
+ signfn, ok := rsaSignFuncs[alg]
+ if !ok {
+ return nil, errors.Errorf(`unsupported algorithm while trying to create RSA signer: %s`, alg)
+ }
+ return &RSASigner{
+ alg: alg,
+ sign: signfn,
+ }, nil
+}
+
+// Algorithm returns the signer algorithm
+func (s RSASigner) Algorithm() jwa.SignatureAlgorithm {
+ return s.alg
+}
+
+// Sign creates a signature using crypto/rsa. key must be a non-nil instance of
+// `*"crypto/rsa".PrivateKey`.
+func (s RSASigner) Sign(payload []byte, key interface{}) ([]byte, error) {
+ if key == nil {
+ return nil, errors.New(`missing private key while signing payload`)
+ }
+ rsakey, ok := key.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.Errorf(`invalid key type %T. *rsa.PrivateKey is required`, key)
+ }
+
+ return s.sign(payload, rsakey)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go
new file mode 100644
index 00000000..a808b054
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/sign/sign.go
@@ -0,0 +1,59 @@
+package sign
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+// New creates a signer that signs payloads using the given signature algorithm.
+func New(alg jwa.SignatureAlgorithm) (Signer, error) {
+ switch alg {
+ case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
+ return newRSA(alg)
+ case jwa.ES256, jwa.ES384, jwa.ES512:
+ return newECDSA(alg)
+ case jwa.HS256, jwa.HS384, jwa.HS512:
+ return newHMAC(alg)
+ default:
+ return nil, errors.Errorf(`unsupported signature algorithm %s`, alg)
+ }
+}
+
+// GetSigningKey returns a *rsa.PrivateKey or *ecdsa.PrivateKey typically encoded in PEM blocks of type "RSA PRIVATE KEY"
+// or "EC PRIVATE KEY" for RSA and ECDSA family of algorithms.
+// For HMAC family, it return a []byte value
+func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) {
+ switch alg {
+ case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
+ block, _ := pem.Decode([]byte(key))
+ if block == nil {
+ return nil, fmt.Errorf("failed to parse PEM block containing the key")
+ }
+
+ priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return priv, nil
+ case jwa.ES256, jwa.ES384, jwa.ES512:
+ block, _ := pem.Decode([]byte(key))
+ if block == nil {
+ return nil, fmt.Errorf("failed to parse PEM block containing the key")
+ }
+
+ priv, err := x509.ParseECPrivateKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ return priv, nil
+ case jwa.HS256, jwa.HS384, jwa.HS512:
+ return []byte(key), nil
+ default:
+ return nil, errors.Errorf("unsupported signature algorithm: %s", alg)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go
new file mode 100644
index 00000000..e71dc6f8
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/ecdsa.go
@@ -0,0 +1,67 @@
+package verify
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "math/big"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+var ecdsaVerifyFuncs = map[jwa.SignatureAlgorithm]ecdsaVerifyFunc{}
+
+func init() {
+ algs := map[jwa.SignatureAlgorithm]crypto.Hash{
+ jwa.ES256: crypto.SHA256,
+ jwa.ES384: crypto.SHA384,
+ jwa.ES512: crypto.SHA512,
+ }
+
+ for alg, h := range algs {
+ ecdsaVerifyFuncs[alg] = makeECDSAVerifyFunc(h)
+ }
+}
+
+func makeECDSAVerifyFunc(hash crypto.Hash) ecdsaVerifyFunc {
+ return ecdsaVerifyFunc(func(payload []byte, signature []byte, key *ecdsa.PublicKey) error {
+
+ r, s := &big.Int{}, &big.Int{}
+ n := len(signature) / 2
+ r.SetBytes(signature[:n])
+ s.SetBytes(signature[n:])
+
+ h := hash.New()
+ h.Write(payload)
+
+ if !ecdsa.Verify(key, h.Sum(nil), r, s) {
+ return errors.New(`failed to verify signature using ecdsa`)
+ }
+ return nil
+ })
+}
+
+func newECDSA(alg jwa.SignatureAlgorithm) (*ECDSAVerifier, error) {
+ verifyfn, ok := ecdsaVerifyFuncs[alg]
+ if !ok {
+ return nil, errors.Errorf(`unsupported algorithm while trying to create ECDSA verifier: %s`, alg)
+ }
+
+ return &ECDSAVerifier{
+ verify: verifyfn,
+ }, nil
+}
+
+// Verify checks whether the signature for a given input and key is correct
+func (v ECDSAVerifier) Verify(payload []byte, signature []byte, key interface{}) error {
+ if key == nil {
+ return errors.New(`missing public key while verifying payload`)
+ }
+ ecdsakey, ok := key.(*ecdsa.PublicKey)
+ if !ok {
+ return errors.Errorf(`invalid key type %T. *ecdsa.PublicKey is required`, key)
+ }
+
+ return v.verify(payload, signature, ecdsakey)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go
new file mode 100644
index 00000000..77e45887
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/hmac.go
@@ -0,0 +1,33 @@
+package verify
+
+import (
+ "crypto/hmac"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+ "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
+)
+
+func newHMAC(alg jwa.SignatureAlgorithm) (*HMACVerifier, error) {
+
+ s, err := sign.New(alg)
+ if err != nil {
+ return nil, errors.Wrap(err, `failed to generate HMAC signer`)
+ }
+ return &HMACVerifier{signer: s}, nil
+}
+
+// Verify checks whether the signature for a given input and key is correct
+func (v HMACVerifier) Verify(signingInput, signature []byte, key interface{}) (err error) {
+
+ expected, err := v.signer.Sign(signingInput, key)
+ if err != nil {
+ return errors.Wrap(err, `failed to generated signature`)
+ }
+
+ if !hmac.Equal(signature, expected) {
+ return errors.New(`failed to match hmac signature`)
+ }
+ return nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go
new file mode 100644
index 00000000..f5beb697
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/interface.go
@@ -0,0 +1,39 @@
+package verify
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jws/sign"
+)
+
+// Verifier provides a common interface for supported alg verification methods
+type Verifier interface {
+ // Verify checks whether the payload and signature are valid for
+ // the given key.
+ // `key` is the key used for verifying the payload, and is usually
+ // the public key associated with the signature method. For example,
+ // for `jwa.RSXXX` and `jwa.PSXXX` types, you need to pass the
+ // `*"crypto/rsa".PublicKey` type.
+ // Check the documentation for each verifier for details
+ Verify(payload []byte, signature []byte, key interface{}) error
+}
+
+type rsaVerifyFunc func([]byte, []byte, *rsa.PublicKey) error
+
+// RSAVerifier implements the Verifier interface
+type RSAVerifier struct {
+ verify rsaVerifyFunc
+}
+
+type ecdsaVerifyFunc func([]byte, []byte, *ecdsa.PublicKey) error
+
+// ECDSAVerifier implements the Verifier interface
+type ECDSAVerifier struct {
+ verify ecdsaVerifyFunc
+}
+
+// HMACVerifier implements the Verifier interface
+type HMACVerifier struct {
+ signer sign.Signer
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go
new file mode 100644
index 00000000..8188ceb1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/rsa.go
@@ -0,0 +1,88 @@
+package verify
+
+import (
+ "crypto"
+ "crypto/rsa"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+
+ "github.com/pkg/errors"
+)
+
+var rsaVerifyFuncs = map[jwa.SignatureAlgorithm]rsaVerifyFunc{}
+
+func init() {
+ algs := map[jwa.SignatureAlgorithm]struct {
+ Hash crypto.Hash
+ VerifyFunc func(crypto.Hash) rsaVerifyFunc
+ }{
+ jwa.RS256: {
+ Hash: crypto.SHA256,
+ VerifyFunc: makeVerifyPKCS1v15,
+ },
+ jwa.RS384: {
+ Hash: crypto.SHA384,
+ VerifyFunc: makeVerifyPKCS1v15,
+ },
+ jwa.RS512: {
+ Hash: crypto.SHA512,
+ VerifyFunc: makeVerifyPKCS1v15,
+ },
+ jwa.PS256: {
+ Hash: crypto.SHA256,
+ VerifyFunc: makeVerifyPSS,
+ },
+ jwa.PS384: {
+ Hash: crypto.SHA384,
+ VerifyFunc: makeVerifyPSS,
+ },
+ jwa.PS512: {
+ Hash: crypto.SHA512,
+ VerifyFunc: makeVerifyPSS,
+ },
+ }
+
+ for alg, item := range algs {
+ rsaVerifyFuncs[alg] = item.VerifyFunc(item.Hash)
+ }
+}
+
+func makeVerifyPKCS1v15(hash crypto.Hash) rsaVerifyFunc {
+ return rsaVerifyFunc(func(payload, signature []byte, key *rsa.PublicKey) error {
+ h := hash.New()
+ h.Write(payload)
+ return rsa.VerifyPKCS1v15(key, hash, h.Sum(nil), signature)
+ })
+}
+
+func makeVerifyPSS(hash crypto.Hash) rsaVerifyFunc {
+ return rsaVerifyFunc(func(payload, signature []byte, key *rsa.PublicKey) error {
+ h := hash.New()
+ h.Write(payload)
+ return rsa.VerifyPSS(key, hash, h.Sum(nil), signature, nil)
+ })
+}
+
+func newRSA(alg jwa.SignatureAlgorithm) (*RSAVerifier, error) {
+ verifyfn, ok := rsaVerifyFuncs[alg]
+ if !ok {
+ return nil, errors.Errorf(`unsupported algorithm while trying to create RSA verifier: %s`, alg)
+ }
+
+ return &RSAVerifier{
+ verify: verifyfn,
+ }, nil
+}
+
+// Verify checks if a JWS is valid.
+func (v RSAVerifier) Verify(payload, signature []byte, key interface{}) error {
+ if key == nil {
+ return errors.New(`missing public key while verifying payload`)
+ }
+ rsaKey, ok := key.(*rsa.PublicKey)
+ if !ok {
+ return errors.Errorf(`invalid key type %T. *rsa.PublicKey is required`, key)
+ }
+
+ return v.verify(payload, signature, rsaKey)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go
new file mode 100644
index 00000000..1bb3bf83
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/jwx/jws/verify/verify.go
@@ -0,0 +1,57 @@
+package verify
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/jwx/jwa"
+)
+
+// New creates a new JWS verifier using the specified algorithm
+// and the public key
+func New(alg jwa.SignatureAlgorithm) (Verifier, error) {
+ switch alg {
+ case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512:
+ return newRSA(alg)
+ case jwa.ES256, jwa.ES384, jwa.ES512:
+ return newECDSA(alg)
+ case jwa.HS256, jwa.HS384, jwa.HS512:
+ return newHMAC(alg)
+ default:
+ return nil, errors.Errorf(`unsupported signature algorithm: %s`, alg)
+ }
+}
+
+// GetSigningKey returns a *rsa.PublicKey or *ecdsa.PublicKey typically encoded in PEM blocks of type "PUBLIC KEY",
+// for RSA and ECDSA family of algorithms.
+// For HMAC family, it return a []byte value
+func GetSigningKey(key string, alg jwa.SignatureAlgorithm) (interface{}, error) {
+ switch alg {
+ case jwa.RS256, jwa.RS384, jwa.RS512, jwa.PS256, jwa.PS384, jwa.PS512, jwa.ES256, jwa.ES384, jwa.ES512:
+ block, _ := pem.Decode([]byte(key))
+ if block == nil {
+ return nil, fmt.Errorf("failed to parse PEM block containing the key")
+ }
+
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey, *ecdsa.PublicKey:
+ return pub, nil
+ default:
+ return nil, fmt.Errorf("invalid key type %T", pub)
+ }
+ case jwa.HS256, jwa.HS384, jwa.HS512:
+ return []byte(key), nil
+ default:
+ return nil, errors.Errorf("unsupported signature algorithm: %s", alg)
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/lcss/README.md b/vendor/github.com/open-policy-agent/opa/internal/lcss/README.md
new file mode 100644
index 00000000..39b8d82c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/lcss/README.md
@@ -0,0 +1,3 @@
+# Longest Common Substring
+
+Original source https://github.com/vmarkovtsev/go-lcss
diff --git a/vendor/github.com/open-policy-agent/opa/internal/lcss/lcss.go b/vendor/github.com/open-policy-agent/opa/internal/lcss/lcss.go
new file mode 100644
index 00000000..8217a345
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/lcss/lcss.go
@@ -0,0 +1,197 @@
+package lcss
+
+import "bytes"
+
+// LongestCommonSubstring returns the longest substring which is present in all the given strings.
+// https://en.wikipedia.org/wiki/Longest_common_substring_problem
+// Not to be confused with the Longest Common Subsequence.
+// Complexity:
+// * time: sum of `n_i*log(n_i)` where `n_i` is the length of each string.
+// * space: sum of `n_i`.
+// Returns a byte slice which is never a nil.
+//
+// ### Algorithm.
+// We build suffix arrays for each of the passed string and then follow the same procedure
+// as in merge sort: pick the least suffix in the lexicographical order. It is possible
+// because the suffix arrays are already sorted.
+// We record the last encountered suffixes from each of the strings and measure the longest
+// common prefix of those at each "merge sort" step.
+// The string comparisons are optimized by maintaining the char-level prefix tree of the "heads"
+// of the suffix array sequences.
+func LongestCommonSubstring(strs ...[]byte) []byte {
+ strslen := len(strs)
+ if strslen == 0 {
+ return []byte{}
+ }
+ if strslen == 1 {
+ return strs[0]
+ }
+ suffixes := make([][]int, strslen)
+ for i, str := range strs {
+ suffixes[i] = qsufsort(str)
+ }
+ return lcss(strs, suffixes)
+}
+
+func lcss(strs [][]byte, suffixes [][]int) []byte {
+ strslen := len(strs)
+ if strslen == 0 {
+ return []byte{}
+ }
+ if strslen == 1 {
+ return strs[0]
+ }
+ minstrlen := len(strs[0]) // minimum length of the strings
+ for _, str := range strs {
+ if minstrlen > len(str) {
+ minstrlen = len(str)
+ }
+ }
+ heads := make([]int, strslen) // position in each suffix array
+ boilerplate := make([][]byte, strslen) // existing suffixes in the tree
+ boiling := 0 // indicates how many distinct suffix arrays are presented in `boilerplate`
+ var root charNode // the character tree built on the strings from `boilerplate`
+ lcs := []byte{} // our function's return value, `var lcss []byte` does *not* work
+ for {
+ mini := -1
+ var minSuffixStr []byte
+ for i, head := range heads {
+ if head >= len(suffixes[i]) {
+ // this suffix array has been scanned till the end
+ continue
+ }
+ suffix := strs[i][suffixes[i][head]:]
+ if minSuffixStr == nil {
+ // initialize
+ mini = i
+ minSuffixStr = suffix
+ } else if bytes.Compare(minSuffixStr, suffix) > 0 {
+ // the current suffix is the smallest in the lexicographical order
+ mini = i
+ minSuffixStr = suffix
+ }
+ }
+ if mini == -1 {
+ // all heads exhausted
+ break
+ }
+ if boilerplate[mini] != nil {
+ // if we already have a suffix from this string, replace it with the new one
+ root.Remove(boilerplate[mini])
+ } else {
+ // we track the number of distinct strings which have been touched
+ // when `boiling` becomes strslen we can start measuring the longest common prefix
+ boiling++
+ }
+ boilerplate[mini] = minSuffixStr
+ root.Add(minSuffixStr)
+ heads[mini]++
+ if boiling == strslen && root.LongestCommonPrefixLength() > len(lcs) {
+ // all heads > 0, the current common prefix of the suffixes is the longest
+ lcs = root.LongestCommonPrefix()
+ if len(lcs) == minstrlen {
+ // early exit - we will never find a longer substring
+ break
+ }
+ }
+ }
+ return lcs
+}
+
+// charNode builds a tree of individual characters.
+// `used` is the counter for collecting garbage: those nodes which have `used`=0 are removed.
+// The root charNode always remains intact apart from `children`.
+// The tree supports 4 operations:
+// 1. Add() a new string.
+// 2. Remove() an existing string which was previously Add()-ed.
+// 3. LongestCommonPrefixLength().
+// 4. LongestCommonPrefix().
+type charNode struct {
+ char byte
+ children []charNode
+ used int
+}
+
+// Add includes a new string into the tree. We start from the root and
+// increment `used` of all the nodes we visit.
+func (cn *charNode) Add(str []byte) {
+ head := cn
+ for i, char := range str {
+ found := false
+ for j, child := range head.children {
+ if child.char == char {
+ head.children[j].used++
+ head = &head.children[j] // -> child
+ found = true
+ break
+ }
+ }
+ if !found {
+ // add the missing nodes one by one
+ for _, char = range str[i:] {
+ head.children = append(head.children, charNode{char: char, children: nil, used: 1})
+ head = &head.children[len(head.children)-1]
+ }
+ break
+ }
+ }
+}
+
+// Remove excludes a node which was previously Add()-ed.
+// We start from the root and decrement `used` of all the nodes we visit.
+// If there is a node with `used`=0, we erase it from the parent's list of children
+// and stop traversing the tree.
+func (cn *charNode) Remove(str []byte) {
+ stop := false
+ head := cn
+ for _, char := range str {
+ for j, child := range head.children {
+ if child.char != char {
+ continue
+ }
+ head.children[j].used--
+ var parent *charNode
+ head, parent = &head.children[j], head // shift to the child
+ if head.used == 0 {
+ parent.children = append(parent.children[:j], parent.children[j+1:]...)
+ // we can skip deleting the rest of the nodes - they have been already discarded
+ stop = true
+ }
+ break
+ }
+ if stop {
+ break
+ }
+ }
+}
+
+// LongestCommonPrefixLength returns the length of the longest common prefix of the strings
+// which are stored in the tree. We visit the children recursively starting from the root and
+// stop if `used` value decreases or there is more than one child.
+func (cn charNode) LongestCommonPrefixLength() int {
+ var result int
+ for head := cn; len(head.children) == 1 && head.children[0].used >= head.used; head = head.children[0] {
+
+ result++
+ }
+ return result
+}
+
+// LongestCommonPrefix returns the longest common prefix of the strings
+// which are stored in the tree. We compute the length by calling LongestCommonPrefixLength()
+// and then record the characters which we visit along the way from the root to the last node.
+func (cn charNode) LongestCommonPrefix() []byte {
+ result := make([]byte, cn.LongestCommonPrefixLength())
+ if len(result) == 0 {
+ return result
+ }
+ var i int
+ for head := cn.children[0]; ; head = head.children[0] {
+ result[i] = head.char
+ i++
+ if i == len(result) {
+ break
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/lcss/qsufsort.go b/vendor/github.com/open-policy-agent/opa/internal/lcss/qsufsort.go
new file mode 100644
index 00000000..61c51968
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/lcss/qsufsort.go
@@ -0,0 +1,169 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This algorithm is based on "Faster Suffix Sorting"
+// by N. Jesper Larsson and Kunihiko Sadakane
+// paper: http://www.larsson.dogma.net/ssrev-tr.pdf
+// code: http://www.larsson.dogma.net/qsufsort.c
+
+// This algorithm computes the suffix array sa by computing its inverse.
+// Consecutive groups of suffixes in sa are labeled as sorted groups or
+// unsorted groups. For a given pass of the sorter, all suffixes are ordered
+// up to their first h characters, and sa is h-ordered. Suffixes in their
+// final positions and unambiguously sorted in h-order are in a sorted group.
+// Consecutive groups of suffixes with identical first h characters are an
+// unsorted group. In each pass of the algorithm, unsorted groups are sorted
+// according to the group number of their following suffix.
+
+// In the implementation, if sa[i] is negative, it indicates that i is
+// the first element of a sorted group of length -sa[i], and can be skipped.
+// An unsorted group sa[i:k] is given the group number of the index of its
+// last element, k-1. The group numbers are stored in the inverse slice (inv),
+// and when all groups are sorted, this slice is the inverse suffix array.
+
+package lcss
+
+import "sort"
+
+// qsufsort constructs the suffix array for a given string.
+func qsufsort(data []byte) []int {
+ // initial sorting by first byte of suffix
+ sa := sortedByFirstByte(data)
+ if len(sa) < 2 {
+ return sa
+ }
+ // initialize the group lookup table
+ // this becomes the inverse of the suffix array when all groups are sorted
+ inv := initGroups(sa, data)
+
+ // the index starts 1-ordered
+ sufSortable := &suffixSortable{sa: sa, inv: inv, h: 1}
+
+ for sa[0] > -len(sa) { // until all suffixes are one big sorted group
+ // The suffixes are h-ordered, make them 2*h-ordered
+ pi := 0 // pi is first position of first group
+ sl := 0 // sl is negated length of sorted groups
+ for pi < len(sa) {
+ if s := sa[pi]; s < 0 { // if pi starts sorted group
+ pi -= s // skip over sorted group
+ sl += s // add negated length to sl
+ } else { // if pi starts unsorted group
+ if sl != 0 {
+ sa[pi+sl] = sl // combine sorted groups before pi
+ sl = 0
+ }
+ pk := inv[s] + 1 // pk-1 is last position of unsorted group
+ sufSortable.sa = sa[pi:pk]
+ sort.Sort(sufSortable)
+ sufSortable.updateGroups(pi)
+ pi = pk // next group
+ }
+ }
+ if sl != 0 { // if the array ends with a sorted group
+ sa[pi+sl] = sl // combine sorted groups at end of sa
+ }
+
+ sufSortable.h *= 2 // double sorted depth
+ }
+
+ for i := range sa { // reconstruct suffix array from inverse
+ sa[inv[i]] = i
+ }
+ return sa
+}
+
+func sortedByFirstByte(data []byte) []int {
+ // total byte counts
+ var count [256]int
+ for _, b := range data {
+ count[b]++
+ }
+ // make count[b] equal index of first occurrence of b in sorted array
+ sum := 0
+ for b := range count {
+ count[b], sum = sum, count[b]+sum
+ }
+ // iterate through bytes, placing index into the correct spot in sa
+ sa := make([]int, len(data))
+ for i, b := range data {
+ sa[count[b]] = i
+ count[b]++
+ }
+ return sa
+}
+
+func initGroups(sa []int, data []byte) []int {
+ // label contiguous same-letter groups with the same group number
+ inv := make([]int, len(data))
+ prevGroup := len(sa) - 1
+ groupByte := data[sa[prevGroup]]
+ for i := len(sa) - 1; i >= 0; i-- {
+ if b := data[sa[i]]; b < groupByte {
+ if prevGroup == i+1 {
+ sa[i+1] = -1
+ }
+ groupByte = b
+ prevGroup = i
+ }
+ inv[sa[i]] = prevGroup
+ if prevGroup == 0 {
+ sa[0] = -1
+ }
+ }
+ // Separate out the final suffix to the start of its group.
+ // This is necessary to ensure the suffix "a" is before "aba"
+ // when using a potentially unstable sort.
+ lastByte := data[len(data)-1]
+ s := -1
+ for i := range sa {
+ if sa[i] >= 0 {
+ if data[sa[i]] == lastByte && s == -1 {
+ s = i
+ }
+ if sa[i] == len(sa)-1 {
+ sa[i], sa[s] = sa[s], sa[i]
+ inv[sa[s]] = s
+ sa[s] = -1 // mark it as an isolated sorted group
+ break
+ }
+ }
+ }
+ return inv
+}
+
+type suffixSortable struct {
+ sa []int
+ inv []int
+ h int
+ buf []int // common scratch space
+}
+
+func (x *suffixSortable) Len() int { return len(x.sa) }
+func (x *suffixSortable) Less(i, j int) bool { return x.inv[x.sa[i]+x.h] < x.inv[x.sa[j]+x.h] }
+func (x *suffixSortable) Swap(i, j int) { x.sa[i], x.sa[j] = x.sa[j], x.sa[i] }
+
+func (x *suffixSortable) updateGroups(offset int) {
+ bounds := x.buf[0:0]
+ group := x.inv[x.sa[0]+x.h]
+ for i := 1; i < len(x.sa); i++ {
+ if g := x.inv[x.sa[i]+x.h]; g > group {
+ bounds = append(bounds, i)
+ group = g
+ }
+ }
+ bounds = append(bounds, len(x.sa))
+ x.buf = bounds
+
+ // update the group numberings after all new groups are determined
+ prev := 0
+ for _, b := range bounds {
+ for i := prev; i < b; i++ {
+ x.inv[x.sa[i]] = offset + b - 1
+ }
+ if b-prev == 1 {
+ x.sa[prev] = -1
+ }
+ prev = b
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/leb128/leb128.go b/vendor/github.com/open-policy-agent/opa/internal/leb128/leb128.go
new file mode 100644
index 00000000..24ddc909
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/leb128/leb128.go
@@ -0,0 +1,170 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package leb128 implements LEB128 integer encoding.
+package leb128
+
+import (
+ "io"
+)
+
+// MustReadVarInt32 returns an int32 from r or panics.
+func MustReadVarInt32(r io.Reader) int32 {
+ i32, err := ReadVarInt32(r)
+ if err != nil {
+ panic(err)
+ }
+ return i32
+}
+
+// MustReadVarInt64 returns an int64 from r or panics.
+func MustReadVarInt64(r io.Reader) int64 {
+ i64, err := ReadVarInt64(r)
+ if err != nil {
+ panic(err)
+ }
+ return i64
+}
+
+// MustReadVarUint32 returns an uint32 from r or panics.
+func MustReadVarUint32(r io.Reader) uint32 {
+ u32, err := ReadVarUint32(r)
+ if err != nil {
+ panic(err)
+ }
+ return u32
+}
+
+// MustReadVarUint64 returns an uint64 from r or panics.
+func MustReadVarUint64(r io.Reader) uint64 {
+ u64, err := ReadVarUint64(r)
+ if err != nil {
+ panic(err)
+ }
+ return u64
+}
+
+// Copied rom http://dwarfstd.org/doc/Dwarf3.pdf.
+
+// ReadVarUint32 tries to read a uint32 from r.
+func ReadVarUint32(r io.Reader) (uint32, error) {
+ u64, err := ReadVarUint64(r)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(u64), nil
+}
+
+// ReadVarUint64 tries to read a uint64 from r.
+func ReadVarUint64(r io.Reader) (uint64, error) {
+ var result uint64
+ var shift uint64
+ buf := make([]byte, 1)
+ for {
+ if _, err := r.Read(buf); err != nil {
+ return 0, err
+ }
+ v := uint64(buf[0])
+ result |= (v & 0x7F) << shift
+ if v&0x80 == 0 {
+ return result, nil
+ }
+ shift += 7
+ }
+
+}
+
+// ReadVarInt32 tries to read a int32 from r.
+func ReadVarInt32(r io.Reader) (int32, error) {
+ i64, err := ReadVarInt64(r)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i64), nil
+}
+
+// ReadVarInt64 tries to read a int64 from r.
+func ReadVarInt64(r io.Reader) (int64, error) {
+ var result int64
+ var shift uint64
+ size := uint64(32)
+ buf := make([]byte, 1)
+ for {
+ if _, err := r.Read(buf); err != nil {
+ return 0, err
+ }
+ v := int64(buf[0])
+ result |= (v & 0x7F) << shift
+ shift += 7
+ if v&0x80 == 0 {
+ if (shift < size) && (v&0x40 != 0) {
+ result |= (^0 << shift)
+ }
+ return result, nil
+ }
+ }
+}
+
+// WriteVarUint32 writes u to w.
+func WriteVarUint32(w io.Writer, u uint32) error {
+ var b []byte
+ _, err := w.Write(appendUleb128(b, uint64(u)))
+ return err
+}
+
+// WriteVarUint64 writes u to w.
+func WriteVarUint64(w io.Writer, u uint64) error {
+ var b []byte
+ _, err := w.Write(appendUleb128(b, u))
+ return err
+}
+
+// WriteVarInt32 writes u to w.
+func WriteVarInt32(w io.Writer, i int32) error {
+ var b []byte
+ _, err := w.Write(appendSleb128(b, int64(i)))
+ return err
+}
+
+// WriteVarInt64 writes u to w.
+func WriteVarInt64(w io.Writer, i int64) error {
+ var b []byte
+ _, err := w.Write(appendSleb128(b, i))
+ return err
+}
+
+// Copied from https://github.com/golang/go/blob/master/src/cmd/internal/dwarf/dwarf.go.
+
+// appendUleb128 appends v to b using DWARF's unsigned LEB128 encoding.
+func appendUleb128(b []byte, v uint64) []byte {
+ for {
+ c := uint8(v & 0x7f)
+ v >>= 7
+ if v != 0 {
+ c |= 0x80
+ }
+ b = append(b, c)
+ if c&0x80 == 0 {
+ break
+ }
+ }
+ return b
+}
+
+// appendSleb128 appends v to b using DWARF's signed LEB128 encoding.
+func appendSleb128(b []byte, v int64) []byte {
+ for {
+ c := uint8(v & 0x7f)
+ s := uint8(v & 0x40)
+ v >>= 7
+ if (v != -1 || s == 0) && (v != 0 || s != 0) {
+ c |= 0x80
+ }
+ b = append(b, c)
+ if c&0x80 == 0 {
+ break
+ }
+ }
+ return b
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go b/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go
new file mode 100644
index 00000000..fa53236d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/merge/merge.go
@@ -0,0 +1,40 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package merge contains helpers to merge data structures
+// frequently encountered in OPA.
+package merge
+
+// InterfaceMaps returns the result of merging a and b. If a and b cannot be
+// merged because of conflicting key-value pairs, ok is false.
+func InterfaceMaps(a map[string]interface{}, b map[string]interface{}) (c map[string]interface{}, ok bool) {
+
+ c = map[string]interface{}{}
+ for k := range a {
+ c[k] = a[k]
+ }
+
+ for k := range b {
+
+ add := b[k]
+ exist, ok := c[k]
+ if !ok {
+ c[k] = add
+ continue
+ }
+
+ existObj, existOk := exist.(map[string]interface{})
+ addObj, addOk := add.(map[string]interface{})
+ if !existOk || !addOk {
+ return nil, false
+ }
+
+ c[k], ok = InterfaceMaps(existObj, addObj)
+ if !ok {
+ return nil, false
+ }
+ }
+
+ return c, true
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
new file mode 100644
index 00000000..ca9426bc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go
@@ -0,0 +1,1943 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package planner contains a query planner for Rego queries.
+package planner
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/ast/location"
+ "github.com/open-policy-agent/opa/internal/ir"
+)
+
+// QuerySet represents the input to the planner.
+type QuerySet struct {
+ Name string
+ Queries []ast.Body
+ RewrittenVars map[ast.Var]ast.Var
+}
+
+type planiter func() error
+type binaryiter func(ir.Local, ir.Local) error
+
+type wasmBuiltin struct {
+ *ast.Builtin
+ WasmFunction string
+}
+
+// Planner implements a query planner for Rego queries.
+type Planner struct {
+ policy *ir.Policy // result of planning
+ queries []QuerySet // input queries to plan
+ modules []*ast.Module // input modules to support queries
+ strings map[string]int // global string constant indices
+ files map[string]int // global file constant indices
+ externs map[string]struct{} // built-in functions that are required in execution environment
+ decls map[string]*ast.Builtin // built-in functions that may be provided in execution environment
+ rules *ruletrie // rules that may be planned
+ funcs *funcstack // functions that have been planned
+ plan *ir.Plan // in-progress query plan
+ curr *ir.Block // in-progress query block
+ vars *varstack // in-scope variables
+ ltarget ir.Local // target variable of last planned statement
+ lnext ir.Local // next variable to use
+ loc *location.Location // location currently "being planned"
+}
+
+// New returns a new Planner object.
+func New() *Planner {
+ return &Planner{
+ policy: &ir.Policy{
+ Static: &ir.Static{},
+ Plans: &ir.Plans{},
+ Funcs: &ir.Funcs{},
+ },
+ strings: map[string]int{},
+ files: map[string]int{},
+ externs: map[string]struct{}{},
+ lnext: ir.Unused,
+ vars: newVarstack(map[ast.Var]ir.Local{
+ ast.InputRootDocument.Value.(ast.Var): ir.Input,
+ ast.DefaultRootDocument.Value.(ast.Var): ir.Data,
+ }),
+ rules: newRuletrie(),
+ funcs: newFuncstack(),
+ }
+}
+
+// WithBuiltinDecls tells the planner what built-in function may be available
+// inside the execution environment.
+func (p *Planner) WithBuiltinDecls(decls map[string]*ast.Builtin) *Planner {
+ p.decls = decls
+ return p
+}
+
+// WithQueries sets the query sets to generate a plan for. The rewritten collection provides
+// a mapping of rewritten query vars for each query set. The planner uses rewritten variables
+// but the result set key will be the original variable name.
+func (p *Planner) WithQueries(queries []QuerySet) *Planner {
+ p.queries = queries
+ return p
+}
+
+// WithModules sets the module set that contains query dependencies.
+func (p *Planner) WithModules(modules []*ast.Module) *Planner {
+ p.modules = modules
+ return p
+}
+
+// Plan returns a IR plan for the policy query.
+func (p *Planner) Plan() (*ir.Policy, error) {
+
+ if err := p.buildFunctrie(); err != nil {
+ return nil, err
+ }
+
+ if err := p.planQueries(); err != nil {
+ return nil, err
+ }
+
+ if err := p.planExterns(); err != nil {
+ return nil, err
+ }
+
+ return p.policy, nil
+}
+
+func (p *Planner) buildFunctrie() error {
+
+ for _, module := range p.modules {
+
+ // Create functrie node for empty packages so that extent queries return
+ // empty objects. For example:
+ //
+ // package x.y
+ //
+ // Query: data.x
+ //
+ // Expected result: {"y": {}}
+ if len(module.Rules) == 0 {
+ _ = p.rules.LookupOrInsert(module.Package.Path)
+ continue
+ }
+
+ for _, rule := range module.Rules {
+ val := p.rules.LookupOrInsert(rule.Path())
+ val.rules = append(val.rules, rule)
+ }
+ }
+
+ return nil
+}
+
+func (p *Planner) planRules(rules []*ast.Rule) (string, error) {
+
+ path := rules[0].Path().String()
+
+ if funcName, ok := p.funcs.Get(path); ok {
+ return funcName, nil
+ }
+
+ // Save current state of planner.
+ //
+ // TODO(tsandall): perhaps we would be better off using stacks here or
+ // splitting block planner into separate struct that could be instantiated
+ // for rule and comprehension bodies.
+ pvars := p.vars
+ pcurr := p.curr
+ pltarget := p.ltarget
+ plnext := p.lnext
+ ploc := p.loc
+
+ // Reset the variable counter for the function plan.
+ p.lnext = ir.Input
+
+ // Set the location to the rule head.
+ p.loc = rules[0].Head.Loc()
+
+ // Create function definition for rules.
+ fn := &ir.Func{
+ Name: fmt.Sprintf("g%d.%s", p.funcs.gen, path),
+ Params: []ir.Local{
+ p.newLocal(), // input document
+ p.newLocal(), // data document
+ },
+ Return: p.newLocal(),
+ }
+
+ // Initialize parameters for functions.
+ for i := 0; i < len(rules[0].Head.Args); i++ {
+ fn.Params = append(fn.Params, p.newLocal())
+ }
+
+ params := fn.Params[2:]
+
+ // Initialize return value for partial set/object rules. Complete document
+ // rules assign directly to `fn.Return`.
+ switch rules[0].Head.DocKind() {
+ case ast.PartialObjectDoc:
+ fn.Blocks = append(fn.Blocks, p.blockWithStmt(&ir.MakeObjectStmt{Target: fn.Return}))
+ case ast.PartialSetDoc:
+ fn.Blocks = append(fn.Blocks, p.blockWithStmt(&ir.MakeSetStmt{Target: fn.Return}))
+ }
+
+ // For complete document rules, allocate one local variable for output
+ // of the rule body + else branches.
+ // It is used to let ordered rules (else blocks) check if the previous
+ // rule body returned a value.
+ lresult := p.newLocal()
+
+ // At this point the locals for the params and return value have been
+ // allocated. This will be the first local that can be used in each block.
+ lnext := p.lnext
+
+ var defaultRule *ast.Rule
+ var ruleLoc *location.Location
+
+ // Generate function blocks for rules.
+ for i := range rules {
+
+ // Save location of first encountered rule for the ReturnLocalStmt below
+ if i == 0 {
+ ruleLoc = p.loc
+ }
+
+ // Save default rule for the end.
+ if rules[i].Default {
+ defaultRule = rules[i]
+ continue
+ }
+
+ // Ordered rules are nested inside an additional block so that execution
+ // can short-circuit. For unordered rules, blocks can be added directly
+ // to the function.
+ var blocks *[]*ir.Block
+
+ if rules[i].Else == nil {
+ blocks = &fn.Blocks
+ } else {
+ stmt := &ir.BlockStmt{}
+ block := &ir.Block{Stmts: []ir.Stmt{stmt}}
+ fn.Blocks = append(fn.Blocks, block)
+ blocks = &stmt.Blocks
+ }
+
+ var prev *ast.Rule
+
+ // Unordered rules are treated as a special case of ordered rules.
+ for rule := rules[i]; rule != nil; prev, rule = rule, rule.Else {
+
+ // Update the location for each ordered rule.
+ p.loc = rule.Head.Loc()
+
+ // Setup planner for block.
+ p.lnext = lnext
+ p.vars = newVarstack(map[ast.Var]ir.Local{
+ ast.InputRootDocument.Value.(ast.Var): fn.Params[0],
+ ast.DefaultRootDocument.Value.(ast.Var): fn.Params[1],
+ })
+
+ curr := &ir.Block{}
+ *blocks = append(*blocks, curr)
+ p.curr = curr
+
+ if prev != nil {
+ // Ordered rules are handled by short circuiting execution. The
+ // plan will jump out to the extra block that was planned above.
+ p.appendStmt(&ir.IsUndefinedStmt{Source: lresult})
+ } else {
+ // The first rule body resets the local, so it can be reused.
+ p.appendStmt(&ir.ResetLocalStmt{Target: lresult})
+ }
+
+ // Complete and partial rules are treated as special cases of
+ // functions. If there are no args, the first step is a no-op.
+ err := p.planFuncParams(params, rule.Head.Args, 0, func() error {
+
+ // Run planner on the rule body.
+ err := p.planQuery(rule.Body, 0, func() error {
+
+ // Run planner on the result.
+ switch rule.Head.DocKind() {
+ case ast.CompleteDoc:
+ return p.planTerm(rule.Head.Value, func() error {
+ p.appendStmt(&ir.AssignVarOnceStmt{
+ Target: lresult,
+ Source: p.ltarget,
+ })
+ return nil
+ })
+ case ast.PartialSetDoc:
+ return p.planTerm(rule.Head.Key, func() error {
+ p.appendStmt(&ir.SetAddStmt{
+ Set: fn.Return,
+ Value: p.ltarget,
+ })
+ return nil
+ })
+ case ast.PartialObjectDoc:
+ return p.planTerm(rule.Head.Key, func() error {
+ key := p.ltarget
+ return p.planTerm(rule.Head.Value, func() error {
+ value := p.ltarget
+ p.appendStmt(&ir.ObjectInsertOnceStmt{
+ Object: fn.Return,
+ Key: key,
+ Value: value,
+ })
+ return nil
+ })
+ })
+ default:
+ return fmt.Errorf("illegal rule kind")
+ }
+ })
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return "", err
+ }
+ }
+
+ // rule[i] and its else-rule(s), if present, are done
+ if rules[i].Head.DocKind() == ast.CompleteDoc {
+ end := &ir.Block{}
+ p.appendStmtToBlock(&ir.IsDefinedStmt{Source: lresult}, end)
+ p.appendStmtToBlock(
+ &ir.AssignVarOnceStmt{
+ Target: fn.Return,
+ Source: lresult,
+ },
+ end)
+ *blocks = append(*blocks, end)
+ }
+ }
+
+ // Default rules execute if the return is undefined.
+ if defaultRule != nil {
+
+ // Set the location for the default rule head.
+ p.loc = defaultRule.Head.Loc()
+ // NOTE(sr) for `default p = 1`,
+ // defaultRule.Loc() is `default`,
+ // defaultRule.Head.Loc() is `p = 1`.
+
+ fn.Blocks = append(fn.Blocks, p.blockWithStmt(&ir.IsUndefinedStmt{Source: fn.Return}))
+
+ p.curr = fn.Blocks[len(fn.Blocks)-1]
+
+ err := p.planQuery(defaultRule.Body, 0, func() error {
+ p.loc = defaultRule.Head.Loc()
+ return p.planTerm(defaultRule.Head.Value, func() error {
+ p.appendStmt(&ir.AssignVarOnceStmt{
+ Target: fn.Return,
+ Source: p.ltarget,
+ })
+ return nil
+ })
+ })
+
+ if err != nil {
+ return "", err
+ }
+ }
+
+ p.loc = ruleLoc
+
+ // All rules return a value.
+ fn.Blocks = append(fn.Blocks, p.blockWithStmt(&ir.ReturnLocalStmt{Source: fn.Return}))
+
+ p.appendFunc(fn)
+ p.funcs.Add(path, fn.Name)
+
+ // Restore the state of the planner.
+ p.lnext = plnext
+ p.ltarget = pltarget
+ p.vars = pvars
+ p.curr = pcurr
+ p.loc = ploc
+
+ return fn.Name, nil
+}
+
+func (p *Planner) planFuncParams(params []ir.Local, args ast.Args, idx int, iter planiter) error {
+ if idx >= len(args) {
+ return iter()
+ }
+ return p.planUnifyLocal(params[idx], args[idx], func() error {
+ return p.planFuncParams(params, args, idx+1, iter)
+ })
+}
+
+func (p *Planner) planQueries() error {
+
+ for _, qs := range p.queries {
+
+ // Initialize the plan with a block that prepares the query result.
+ p.plan = &ir.Plan{Name: qs.Name}
+ p.policy.Plans.Plans = append(p.policy.Plans.Plans, p.plan)
+ p.curr = &ir.Block{}
+
+ // Build a set of variables appearing in the query and allocate strings for
+ // each one. The strings will be used in the result set objects.
+ qvs := ast.NewVarSet()
+
+ for _, q := range qs.Queries {
+ vs := q.Vars(ast.VarVisitorParams{SkipRefCallHead: true, SkipClosures: true}).Diff(ast.ReservedVars)
+ qvs.Update(vs)
+ }
+
+ lvarnames := make(map[ast.Var]ir.Local, len(qvs))
+
+ for _, qv := range qvs.Sorted() {
+ qv = rewrittenVar(qs.RewrittenVars, qv)
+ if !qv.IsGenerated() && !qv.IsWildcard() {
+ // NOTE(sr): We have no location for these: they could appear in multiple
+ // queries, and we've lost track when building the ast.VarSet.
+ stmt := &ir.MakeStringStmt{
+ Index: p.getStringConst(string(qv)),
+ Target: p.newLocal(),
+ }
+ p.appendStmt(stmt)
+ lvarnames[qv] = stmt.Target
+ }
+ }
+
+ if len(p.curr.Stmts) > 0 {
+ p.appendBlock(p.curr)
+ }
+
+ lnext := p.lnext
+
+ for _, q := range qs.Queries {
+ p.loc = q.Loc()
+ p.lnext = lnext
+ p.vars.Push(map[ast.Var]ir.Local{})
+ p.curr = &ir.Block{}
+ defined := false
+ qvs := q.Vars(ast.VarVisitorParams{SkipRefCallHead: true, SkipClosures: true}).Diff(ast.ReservedVars).Sorted()
+
+ if err := p.planQuery(q, 0, func() error {
+
+ // Add an object containing variable bindings into the result set.
+ lr := p.newLocal()
+
+ p.appendStmt(&ir.MakeObjectStmt{
+ Target: lr,
+ })
+
+ for _, qv := range qvs {
+ rw := rewrittenVar(qs.RewrittenVars, qv)
+ if !rw.IsGenerated() && !rw.IsWildcard() {
+ p.appendStmt(&ir.ObjectInsertStmt{
+ Object: lr,
+ Key: lvarnames[rw],
+ Value: p.vars.GetOrEmpty(qv),
+ })
+ }
+ }
+
+ p.appendStmt(&ir.ResultSetAdd{
+ Value: lr,
+ })
+
+ defined = true
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ p.vars.Pop()
+
+ if defined {
+ p.appendBlock(p.curr)
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func (p *Planner) planQuery(q ast.Body, index int, iter planiter) error {
+
+ if index >= len(q) {
+ return iter()
+ }
+
+ old := p.loc
+ p.loc = q[index].Loc()
+
+ err := p.planExpr(q[index], func() error {
+ return p.planQuery(q, index+1, func() error {
+ curr := p.loc
+ p.loc = old
+ err := iter()
+ p.loc = curr
+ return err
+ })
+ })
+
+ p.loc = old
+ return err
+}
+
+// TODO(tsandall): improve errors to include location information.
+func (p *Planner) planExpr(e *ast.Expr, iter planiter) error {
+
+ if e.Negated {
+ return p.planNot(e, iter)
+ }
+
+ if len(e.With) > 0 {
+ return p.planWith(e, iter)
+ }
+
+ if e.IsCall() {
+ return p.planExprCall(e, iter)
+ }
+
+ return p.planExprTerm(e, iter)
+}
+
+func (p *Planner) planNot(e *ast.Expr, iter planiter) error {
+
+ not := &ir.NotStmt{
+ Block: &ir.Block{},
+ }
+
+ prev := p.curr
+ p.curr = not.Block
+
+ if err := p.planExpr(e.Complement(), func() error {
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ p.curr = prev
+ p.appendStmt(not)
+
+ return iter()
+}
+
+func (p *Planner) planWith(e *ast.Expr, iter planiter) error {
+
+ // Plan the values that will be applied by the with modifiers. All values
+ // must be defined for the overall expression to evaluate.
+ values := make([]*ast.Term, len(e.With))
+
+ for i := range e.With {
+ values[i] = e.With[i].Value
+ }
+
+ return p.planTermSlice(values, func(locals []ir.Local) error {
+
+ paths := make([][]int, len(e.With))
+ saveVars := ast.NewVarSet()
+ dataRefs := []ast.Ref{}
+
+ for i := range e.With {
+
+ target := e.With[i].Target.Value.(ast.Ref)
+ paths[i] = make([]int, len(target)-1)
+
+ for j := 1; j < len(target); j++ {
+ if s, ok := target[j].Value.(ast.String); ok {
+ paths[i][j-1] = p.getStringConst(string(s))
+ } else {
+ return errors.New("invalid with target")
+ }
+ }
+
+ head := target[0].Value.(ast.Var)
+ saveVars.Add(head)
+
+ if head.Equal(ast.DefaultRootDocument.Value) {
+ dataRefs = append(dataRefs, target)
+ }
+ }
+
+ restore := make([][2]ir.Local, len(saveVars))
+
+ for i, v := range saveVars.Sorted() {
+ lorig := p.vars.GetOrEmpty(v)
+ lsave := p.newLocal()
+ p.appendStmt(&ir.AssignVarStmt{Source: lorig, Target: lsave})
+ restore[i] = [2]ir.Local{lorig, lsave}
+ }
+
+ // If any of the with statements targeted the data document we shadow
+ // the existing planned functions during expression planning. This
+ // causes the planner to re-plan any rules that may be required during
+ // planning of this expression (transitively).
+ if len(dataRefs) > 0 {
+ p.funcs.Push(map[string]string{})
+ for _, ref := range dataRefs {
+ p.rules.Push(ref)
+ }
+ }
+
+ err := p.planWithRec(e, paths, locals, 0, func() error {
+ if len(dataRefs) > 0 {
+ p.funcs.Pop()
+ for i := len(dataRefs) - 1; i >= 0; i-- {
+ p.rules.Pop(dataRefs[i])
+ }
+ }
+
+ err := p.planWithUndoRec(restore, 0, func() error {
+
+ err := iter()
+
+ if len(dataRefs) > 0 {
+ p.funcs.Push(map[string]string{})
+ for _, ref := range dataRefs {
+ p.rules.Push(ref)
+ }
+ }
+ return err
+ })
+
+ return err
+ })
+
+ if len(dataRefs) > 0 {
+ p.funcs.Pop()
+ for i := len(dataRefs) - 1; i >= 0; i-- {
+ p.rules.Pop(dataRefs[i])
+ }
+ }
+ return err
+
+ })
+}
+
+func (p *Planner) planWithRec(e *ast.Expr, targets [][]int, values []ir.Local, index int, iter planiter) error {
+ if index >= len(e.With) {
+ return p.planExpr(e.NoWith(), iter)
+ }
+
+ prev := p.curr
+ p.curr = &ir.Block{}
+
+ err := p.planWithRec(e, targets, values, index+1, iter)
+ if err != nil {
+ return err
+ }
+
+ block := p.curr
+ p.curr = prev
+ target := e.With[index].Target.Value.(ast.Ref)
+ head := target[0].Value.(ast.Var)
+
+ p.appendStmt(&ir.WithStmt{
+ Local: p.vars.GetOrEmpty(head),
+ Path: targets[index],
+ Value: values[index],
+ Block: block,
+ })
+
+ return nil
+}
+
+func (p *Planner) planWithUndoRec(restore [][2]ir.Local, index int, iter planiter) error {
+
+ if index >= len(restore) {
+ return iter()
+ }
+
+ prev := p.curr
+ p.curr = &ir.Block{}
+
+ if err := p.planWithUndoRec(restore, index+1, iter); err != nil {
+ return err
+ }
+
+ block := p.curr
+ p.curr = prev
+ lorig := restore[index][0]
+ lsave := restore[index][1]
+
+ p.appendStmt(&ir.WithStmt{
+ Local: lorig,
+ Value: lsave,
+ Block: block,
+ })
+
+ return nil
+}
+
+func (p *Planner) planExprTerm(e *ast.Expr, iter planiter) error {
+ return p.planTerm(e.Terms.(*ast.Term), func() error {
+ falsy := p.newLocal()
+ p.appendStmt(&ir.MakeBooleanStmt{
+ Value: false,
+ Target: falsy,
+ })
+ p.appendStmt(&ir.NotEqualStmt{
+ A: p.ltarget,
+ B: falsy,
+ })
+ return iter()
+ })
+}
+
+func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error {
+ operator := e.Operator().String()
+ switch operator {
+ case ast.Equality.Name:
+ return p.planUnify(e.Operand(0), e.Operand(1), iter)
+ case ast.Equal.Name:
+ return p.planBinaryExpr(e, func(a, b ir.Local) error {
+ p.appendStmt(&ir.EqualStmt{
+ A: a,
+ B: b,
+ })
+ return iter()
+ })
+ case ast.LessThan.Name:
+ return p.planBinaryExpr(e, func(a, b ir.Local) error {
+ p.appendStmt(&ir.LessThanStmt{
+ A: a,
+ B: b,
+ })
+ return iter()
+ })
+ case ast.LessThanEq.Name:
+ return p.planBinaryExpr(e, func(a, b ir.Local) error {
+ p.appendStmt(&ir.LessThanEqualStmt{
+ A: a,
+ B: b,
+ })
+ return iter()
+ })
+ case ast.GreaterThan.Name:
+ return p.planBinaryExpr(e, func(a, b ir.Local) error {
+ p.appendStmt(&ir.GreaterThanStmt{
+ A: a,
+ B: b,
+ })
+ return iter()
+ })
+ case ast.GreaterThanEq.Name:
+ return p.planBinaryExpr(e, func(a, b ir.Local) error {
+ p.appendStmt(&ir.GreaterThanEqualStmt{
+ A: a,
+ B: b,
+ })
+ return iter()
+ })
+ case ast.NotEqual.Name:
+ return p.planBinaryExpr(e, func(a, b ir.Local) error {
+ p.appendStmt(&ir.NotEqualStmt{
+ A: a,
+ B: b,
+ })
+ return iter()
+ })
+ default:
+
+ var relation bool
+ var name string
+ var arity int
+ var args []ir.Local
+
+ node := p.rules.Lookup(e.Operator())
+
+ if node != nil {
+ var err error
+ name, err = p.planRules(node.Rules())
+ if err != nil {
+ return err
+ }
+ arity = node.Arity()
+ args = []ir.Local{
+ p.vars.GetOrEmpty(ast.InputRootDocument.Value.(ast.Var)),
+ p.vars.GetOrEmpty(ast.DefaultRootDocument.Value.(ast.Var)),
+ }
+ } else if decl, ok := p.decls[operator]; ok {
+ relation = decl.Relation
+ arity = len(decl.Decl.Args())
+ name = operator
+ p.externs[operator] = struct{}{}
+ } else {
+ return fmt.Errorf("illegal call: unknown operator %q", operator)
+ }
+
+ operands := e.Operands()
+
+ if len(operands) < arity || len(operands) > arity+1 {
+ return fmt.Errorf("illegal call: wrong number of operands: got %v, want %v)", len(operands), arity)
+ }
+
+ if relation {
+ return p.planExprCallRelation(name, arity, operands, args, iter)
+ }
+
+ return p.planExprCallFunc(name, arity, operands, args, iter)
+ }
+}
+
+func (p *Planner) planExprCallRelation(name string, arity int, operands []*ast.Term, args []ir.Local, iter planiter) error {
+
+ if len(operands) == arity {
+ return p.planCallArgs(operands, 0, args, func(args []ir.Local) error {
+ p.ltarget = p.newLocal()
+ p.appendStmt(&ir.CallStmt{
+ Func: name,
+ Args: args,
+ Result: p.ltarget,
+ })
+
+ lsize := p.newLocal()
+
+ p.appendStmt(&ir.LenStmt{
+ Source: p.ltarget,
+ Target: lsize,
+ })
+
+ lzero := p.newLocal()
+
+ p.appendStmt(&ir.MakeNumberIntStmt{
+ Value: 0,
+ Target: lzero,
+ })
+
+ p.appendStmt(&ir.NotEqualStmt{
+ A: lsize,
+ B: lzero,
+ })
+
+ return iter()
+ })
+ }
+
+ return p.planCallArgs(operands[:len(operands)-1], 0, args, func(args []ir.Local) error {
+
+ p.ltarget = p.newLocal()
+
+ p.appendStmt(&ir.CallStmt{
+ Func: name,
+ Args: args,
+ Result: p.ltarget,
+ })
+
+ return p.planScanValues(operands[len(operands)-1], func(ir.Local) error {
+ return iter()
+ })
+ })
+}
+
+func (p *Planner) planExprCallFunc(name string, arity int, operands []*ast.Term, args []ir.Local, iter planiter) error {
+
+ if len(operands) == arity {
+ // definition: f(x) = y { ... }
+ // call: f(x) # result not captured
+ return p.planCallArgs(operands, 0, args, func(args []ir.Local) error {
+ p.ltarget = p.newLocal()
+ p.appendStmt(&ir.CallStmt{
+ Func: name,
+ Args: args,
+ Result: p.ltarget,
+ })
+
+ falsy := p.newLocal()
+
+ p.appendStmt(&ir.MakeBooleanStmt{
+ Value: false,
+ Target: falsy,
+ })
+
+ p.appendStmt(&ir.NotEqualStmt{
+ A: p.ltarget,
+ B: falsy,
+ })
+
+ return iter()
+ })
+ }
+
+ // definition: f(x) = y { ... }
+ // call: f(x, 1) # caller captures result
+ return p.planCallArgs(operands[:len(operands)-1], 0, args, func(args []ir.Local) error {
+ result := p.newLocal()
+ p.appendStmt(&ir.CallStmt{
+ Func: name,
+ Args: args,
+ Result: result,
+ })
+ return p.planUnifyLocal(result, operands[len(operands)-1], iter)
+ })
+}
+
+func (p *Planner) planCallArgs(terms []*ast.Term, idx int, args []ir.Local, iter func([]ir.Local) error) error {
+ if idx >= len(terms) {
+ return iter(args)
+ }
+ return p.planTerm(terms[idx], func() error {
+ args = append(args, p.ltarget)
+ return p.planCallArgs(terms, idx+1, args, iter)
+ })
+}
+
+func (p *Planner) planUnify(a, b *ast.Term, iter planiter) error {
+
+ switch va := a.Value.(type) {
+ case ast.Null, ast.Boolean, ast.Number, ast.String, ast.Ref, ast.Set, *ast.SetComprehension, *ast.ArrayComprehension, *ast.ObjectComprehension:
+ return p.planTerm(a, func() error {
+ return p.planUnifyLocal(p.ltarget, b, iter)
+ })
+ case ast.Var:
+ return p.planUnifyVar(va, b, iter)
+ case *ast.Array:
+ switch vb := b.Value.(type) {
+ case ast.Var:
+ return p.planUnifyVar(vb, a, iter)
+ case ast.Ref:
+ return p.planTerm(b, func() error {
+ return p.planUnifyLocalArray(p.ltarget, va, iter)
+ })
+ case *ast.Array:
+ if va.Len() == vb.Len() {
+ return p.planUnifyArraysRec(va, vb, 0, iter)
+ }
+ return nil
+ }
+ case ast.Object:
+ switch vb := b.Value.(type) {
+ case ast.Var:
+ return p.planUnifyVar(vb, a, iter)
+ case ast.Ref:
+ return p.planTerm(b, func() error {
+ return p.planUnifyLocalObject(p.ltarget, va, iter)
+ })
+ case ast.Object:
+ if va.Len() == vb.Len() {
+ return p.planUnifyObjectsRec(va, vb, va.Keys(), 0, iter)
+ }
+ return nil
+ }
+ }
+
+ return fmt.Errorf("not implemented: unify(%v, %v)", a, b)
+}
+
+func (p *Planner) planUnifyVar(a ast.Var, b *ast.Term, iter planiter) error {
+
+ if la, ok := p.vars.Get(a); ok {
+ return p.planUnifyLocal(la, b, iter)
+ }
+
+ return p.planTerm(b, func() error {
+ target := p.newLocal()
+ p.vars.Put(a, target)
+ p.appendStmt(&ir.AssignVarStmt{
+ Source: p.ltarget,
+ Target: target,
+ })
+ return iter()
+ })
+}
+
+func (p *Planner) planUnifyLocal(a ir.Local, b *ast.Term, iter planiter) error {
+ switch vb := b.Value.(type) {
+ case ast.Null, ast.Boolean, ast.Number, ast.String, ast.Ref, ast.Set, *ast.SetComprehension, *ast.ArrayComprehension, *ast.ObjectComprehension:
+ return p.planTerm(b, func() error {
+ p.appendStmt(&ir.EqualStmt{
+ A: a,
+ B: p.ltarget,
+ })
+ return iter()
+ })
+ case ast.Var:
+ if lv, ok := p.vars.Get(vb); ok {
+ p.appendStmt(&ir.EqualStmt{
+ A: a,
+ B: lv,
+ })
+ return iter()
+ }
+ lv := p.newLocal()
+ p.vars.Put(vb, lv)
+ p.appendStmt(&ir.AssignVarStmt{
+ Source: a,
+ Target: lv,
+ })
+ return iter()
+ case *ast.Array:
+ return p.planUnifyLocalArray(a, vb, iter)
+ case ast.Object:
+ return p.planUnifyLocalObject(a, vb, iter)
+ }
+
+ return fmt.Errorf("not implemented: unifyLocal(%v, %v)", a, b)
+}
+
+func (p *Planner) planUnifyLocalArray(a ir.Local, b *ast.Array, iter planiter) error {
+ p.appendStmt(&ir.IsArrayStmt{
+ Source: a,
+ })
+
+ blen := p.newLocal()
+ alen := p.newLocal()
+
+ p.appendStmt(&ir.LenStmt{
+ Source: a,
+ Target: alen,
+ })
+
+ p.appendStmt(&ir.MakeNumberIntStmt{
+ Value: int64(b.Len()),
+ Target: blen,
+ })
+
+ p.appendStmt(&ir.EqualStmt{
+ A: alen,
+ B: blen,
+ })
+
+ lkey := p.newLocal()
+
+ p.appendStmt(&ir.MakeNumberIntStmt{
+ Target: lkey,
+ })
+
+ lval := p.newLocal()
+
+ return p.planUnifyLocalArrayRec(a, 0, b, lkey, lval, iter)
+}
+
+func (p *Planner) planUnifyLocalArrayRec(a ir.Local, index int, b *ast.Array, lkey, lval ir.Local, iter planiter) error {
+ if b.Len() == index {
+ return iter()
+ }
+
+ p.appendStmt(&ir.AssignIntStmt{
+ Value: int64(index),
+ Target: lkey,
+ })
+
+ p.appendStmt(&ir.DotStmt{
+ Source: a,
+ Key: lkey,
+ Target: lval,
+ })
+
+ return p.planUnifyLocal(lval, b.Elem(index), func() error {
+ return p.planUnifyLocalArrayRec(a, index+1, b, lkey, lval, iter)
+ })
+}
+
+func (p *Planner) planUnifyLocalObject(a ir.Local, b ast.Object, iter planiter) error {
+ p.appendStmt(&ir.IsObjectStmt{
+ Source: a,
+ })
+
+ blen := p.newLocal()
+ alen := p.newLocal()
+
+ p.appendStmt(&ir.LenStmt{
+ Source: a,
+ Target: alen,
+ })
+
+ p.appendStmt(&ir.MakeNumberIntStmt{
+ Value: int64(b.Len()),
+ Target: blen,
+ })
+
+ p.appendStmt(&ir.EqualStmt{
+ A: alen,
+ B: blen,
+ })
+
+ lkey := p.newLocal()
+ lval := p.newLocal()
+ bkeys := b.Keys()
+
+ return p.planUnifyLocalObjectRec(a, 0, bkeys, b, lkey, lval, iter)
+}
+
+func (p *Planner) planUnifyLocalObjectRec(a ir.Local, index int, keys []*ast.Term, b ast.Object, lkey, lval ir.Local, iter planiter) error {
+
+ if index == len(keys) {
+ return iter()
+ }
+
+ return p.planTerm(keys[index], func() error {
+ p.appendStmt(&ir.AssignVarStmt{
+ Source: p.ltarget,
+ Target: lkey,
+ })
+ p.appendStmt(&ir.DotStmt{
+ Source: a,
+ Key: lkey,
+ Target: lval,
+ })
+ return p.planUnifyLocal(lval, b.Get(keys[index]), func() error {
+ return p.planUnifyLocalObjectRec(a, index+1, keys, b, lkey, lval, iter)
+ })
+ })
+}
+
+func (p *Planner) planUnifyArraysRec(a, b *ast.Array, index int, iter planiter) error {
+ if index == a.Len() {
+ return iter()
+ }
+ return p.planUnify(a.Elem(index), b.Elem(index), func() error {
+ return p.planUnifyArraysRec(a, b, index+1, iter)
+ })
+}
+
+func (p *Planner) planUnifyObjectsRec(a, b ast.Object, keys []*ast.Term, index int, iter planiter) error {
+ if index == len(keys) {
+ return iter()
+ }
+
+ aval := a.Get(keys[index])
+ bval := b.Get(keys[index])
+ if aval == nil || bval == nil {
+ return nil
+ }
+
+ return p.planUnify(aval, bval, func() error {
+ return p.planUnifyObjectsRec(a, b, keys, index+1, iter)
+ })
+}
+
+func (p *Planner) planBinaryExpr(e *ast.Expr, iter binaryiter) error {
+ return p.planTerm(e.Operand(0), func() error {
+ a := p.ltarget
+ return p.planTerm(e.Operand(1), func() error {
+ b := p.ltarget
+ return iter(a, b)
+ })
+ })
+}
+
+func (p *Planner) planTerm(t *ast.Term, iter planiter) error {
+
+ switch v := t.Value.(type) {
+ case ast.Null:
+ return p.planNull(v, iter)
+ case ast.Boolean:
+ return p.planBoolean(v, iter)
+ case ast.Number:
+ return p.planNumber(v, iter)
+ case ast.String:
+ return p.planString(v, iter)
+ case ast.Var:
+ return p.planVar(v, iter)
+ case ast.Ref:
+ return p.planRef(v, iter)
+ case *ast.Array:
+ return p.planArray(v, iter)
+ case ast.Object:
+ return p.planObject(v, iter)
+ case ast.Set:
+ return p.planSet(v, iter)
+ case *ast.SetComprehension:
+ p.loc = t.Loc()
+ return p.planSetComprehension(v, iter)
+ case *ast.ArrayComprehension:
+ p.loc = t.Loc()
+ return p.planArrayComprehension(v, iter)
+ case *ast.ObjectComprehension:
+ p.loc = t.Loc()
+ return p.planObjectComprehension(v, iter)
+ default:
+ return fmt.Errorf("%v term not implemented", ast.TypeName(v))
+ }
+}
+
+func (p *Planner) planNull(null ast.Null, iter planiter) error {
+
+ target := p.newLocal()
+
+ p.appendStmt(&ir.MakeNullStmt{
+ Target: target,
+ })
+
+ p.ltarget = target
+
+ return iter()
+}
+
+func (p *Planner) planBoolean(b ast.Boolean, iter planiter) error {
+
+ target := p.newLocal()
+
+ p.appendStmt(&ir.MakeBooleanStmt{
+ Value: bool(b),
+ Target: target,
+ })
+
+ p.ltarget = target
+
+ return iter()
+}
+
+func (p *Planner) planNumber(num ast.Number, iter planiter) error {
+
+ index := p.getStringConst(string(num))
+ target := p.newLocal()
+
+ p.appendStmt(&ir.MakeNumberRefStmt{
+ Index: index,
+ Target: target,
+ })
+
+ p.ltarget = target
+ return iter()
+}
+
+func (p *Planner) planNumberFloat(f float64, iter planiter) error {
+
+ target := p.newLocal()
+
+ p.appendStmt(&ir.MakeNumberFloatStmt{
+ Value: f,
+ Target: target,
+ })
+
+ p.ltarget = target
+
+ return iter()
+}
+
+func (p *Planner) planNumberInt(i int64, iter planiter) error {
+
+ target := p.newLocal()
+
+ p.appendStmt(&ir.MakeNumberIntStmt{
+ Value: i,
+ Target: target,
+ })
+
+ p.ltarget = target
+
+ return iter()
+}
+
+func (p *Planner) planString(str ast.String, iter planiter) error {
+
+ index := p.getStringConst(string(str))
+ target := p.newLocal()
+
+ p.appendStmt(&ir.MakeStringStmt{
+ Index: index,
+ Target: target,
+ })
+
+ p.ltarget = target
+
+ return iter()
+}
+
+func (p *Planner) planVar(v ast.Var, iter planiter) error {
+ p.ltarget = p.vars.GetOrElse(v, func() ir.Local {
+ return p.newLocal()
+ })
+ return iter()
+}
+
+func (p *Planner) planArray(arr *ast.Array, iter planiter) error {
+
+ larr := p.newLocal()
+
+ p.appendStmt(&ir.MakeArrayStmt{
+ Capacity: int32(arr.Len()),
+ Target: larr,
+ })
+
+ return p.planArrayRec(arr, 0, larr, iter)
+}
+
+func (p *Planner) planArrayRec(arr *ast.Array, index int, larr ir.Local, iter planiter) error {
+ if index == arr.Len() {
+ p.ltarget = larr
+ return iter()
+ }
+
+ return p.planTerm(arr.Elem(index), func() error {
+
+ p.appendStmt(&ir.ArrayAppendStmt{
+ Value: p.ltarget,
+ Array: larr,
+ })
+
+ return p.planArrayRec(arr, index+1, larr, iter)
+ })
+}
+
+func (p *Planner) planObject(obj ast.Object, iter planiter) error {
+
+ lobj := p.newLocal()
+
+ p.appendStmt(&ir.MakeObjectStmt{
+ Target: lobj,
+ })
+
+ return p.planObjectRec(obj, 0, obj.Keys(), lobj, iter)
+}
+
+func (p *Planner) planObjectRec(obj ast.Object, index int, keys []*ast.Term, lobj ir.Local, iter planiter) error {
+ if index == len(keys) {
+ p.ltarget = lobj
+ return iter()
+ }
+
+ return p.planTerm(keys[index], func() error {
+ lkey := p.ltarget
+
+ return p.planTerm(obj.Get(keys[index]), func() error {
+ lval := p.ltarget
+ p.appendStmt(&ir.ObjectInsertStmt{
+ Key: lkey,
+ Value: lval,
+ Object: lobj,
+ })
+
+ return p.planObjectRec(obj, index+1, keys, lobj, iter)
+ })
+ })
+}
+
+func (p *Planner) planSet(set ast.Set, iter planiter) error {
+ lset := p.newLocal()
+
+ p.appendStmt(&ir.MakeSetStmt{
+ Target: lset,
+ })
+
+ return p.planSetRec(set, 0, set.Slice(), lset, iter)
+}
+
+func (p *Planner) planSetRec(set ast.Set, index int, elems []*ast.Term, lset ir.Local, iter planiter) error {
+ if index == len(elems) {
+ p.ltarget = lset
+ return iter()
+ }
+
+ return p.planTerm(elems[index], func() error {
+ p.appendStmt(&ir.SetAddStmt{
+ Value: p.ltarget,
+ Set: lset,
+ })
+ return p.planSetRec(set, index+1, elems, lset, iter)
+ })
+}
+
+func (p *Planner) planSetComprehension(sc *ast.SetComprehension, iter planiter) error {
+
+ lset := p.newLocal()
+
+ p.appendStmt(&ir.MakeSetStmt{
+ Target: lset,
+ })
+
+ return p.planComprehension(sc.Body, func() error {
+ return p.planTerm(sc.Term, func() error {
+ p.appendStmt(&ir.SetAddStmt{
+ Value: p.ltarget,
+ Set: lset,
+ })
+ return nil
+ })
+ }, lset, iter)
+}
+
+func (p *Planner) planArrayComprehension(ac *ast.ArrayComprehension, iter planiter) error {
+
+ larr := p.newLocal()
+
+ p.appendStmt(&ir.MakeArrayStmt{
+ Target: larr,
+ })
+
+ return p.planComprehension(ac.Body, func() error {
+ return p.planTerm(ac.Term, func() error {
+ p.appendStmt(&ir.ArrayAppendStmt{
+ Value: p.ltarget,
+ Array: larr,
+ })
+ return nil
+ })
+ }, larr, iter)
+}
+
+func (p *Planner) planObjectComprehension(oc *ast.ObjectComprehension, iter planiter) error {
+
+ lobj := p.newLocal()
+
+ p.appendStmt(&ir.MakeObjectStmt{
+ Target: lobj,
+ })
+ return p.planComprehension(oc.Body, func() error {
+ return p.planTerm(oc.Key, func() error {
+ lkey := p.ltarget
+ return p.planTerm(oc.Value, func() error {
+ p.appendStmt(&ir.ObjectInsertOnceStmt{
+ Key: lkey,
+ Value: p.ltarget,
+ Object: lobj,
+ })
+ return nil
+ })
+ })
+ }, lobj, iter)
+}
+
+func (p *Planner) planComprehension(body ast.Body, closureIter planiter, target ir.Local, iter planiter) error {
+
+ prev := p.curr
+ p.curr = &ir.Block{}
+ ploc := p.loc
+
+ if err := p.planQuery(body, 0, func() error {
+ return closureIter()
+ }); err != nil {
+ return err
+ }
+
+ block := p.curr
+ p.curr = prev
+ p.loc = ploc
+
+ p.appendStmt(&ir.BlockStmt{
+ Blocks: []*ir.Block{
+ block,
+ },
+ })
+
+ p.ltarget = target
+ return iter()
+}
+
+func (p *Planner) planRef(ref ast.Ref, iter planiter) error {
+
+ head, ok := ref[0].Value.(ast.Var)
+ if !ok {
+ return fmt.Errorf("illegal ref: non-var head")
+ }
+
+ if head.Compare(ast.DefaultRootDocument.Value) == 0 {
+ virtual := p.rules.Get(ref[0].Value)
+ base := &baseptr{local: p.vars.GetOrEmpty(ast.DefaultRootDocument.Value.(ast.Var))}
+ return p.planRefData(virtual, base, ref, 1, iter)
+ }
+
+ p.ltarget, ok = p.vars.Get(head)
+ if !ok {
+ return fmt.Errorf("illegal ref: unsafe head")
+ }
+
+ return p.planRefRec(ref, 1, iter)
+}
+
+func (p *Planner) planRefRec(ref ast.Ref, index int, iter planiter) error {
+
+ if len(ref) == index {
+ return iter()
+ }
+
+ scan := false
+
+ ast.WalkVars(ref[index], func(v ast.Var) bool {
+ if !scan {
+ _, exists := p.vars.Get(v)
+ if !exists {
+ scan = true
+ }
+ }
+ return scan
+ })
+
+ if !scan {
+ return p.planDot(ref[index], func() error {
+ return p.planRefRec(ref, index+1, iter)
+ })
+ }
+
+ return p.planScan(ref[index], func(lkey ir.Local) error {
+ return p.planRefRec(ref, index+1, iter)
+ })
+}
+
+type baseptr struct {
+ local ir.Local
+ path ast.Ref
+}
+
+// planRefData implements the virtual document model by generating the value of
+// the ref parameter and invoking the iterator with the planner target set to
+// the virtual document and all variables in the reference assigned.
+func (p *Planner) planRefData(virtual *ruletrie, base *baseptr, ref ast.Ref, index int, iter planiter) error {
+
+ // Early-exit if the end of the reference has been reached. In this case the
+ // plan has to materialize the full extent of the referenced value.
+ if index >= len(ref) {
+ return p.planRefDataExtent(virtual, base, iter)
+ }
+
+ // If the reference operand is ground then either continue to the next
+ // operand or invoke the function for the rule referred to by this operand.
+ if ref[index].IsGround() {
+
+ var vchild *ruletrie
+
+ if virtual != nil {
+ vchild = virtual.Get(ref[index].Value)
+ }
+
+ rules := vchild.Rules()
+
+ if len(rules) > 0 {
+ p.ltarget = p.newLocal()
+
+ funcName, err := p.planRules(rules)
+ if err != nil {
+ return err
+ }
+
+ p.appendStmt(&ir.CallStmt{
+ Func: funcName,
+ Args: []ir.Local{
+ p.vars.GetOrEmpty(ast.InputRootDocument.Value.(ast.Var)),
+ p.vars.GetOrEmpty(ast.DefaultRootDocument.Value.(ast.Var)),
+ },
+ Result: p.ltarget,
+ })
+
+ return p.planRefRec(ref, index+1, iter)
+ }
+
+ bchild := *base
+ bchild.path = append(bchild.path, ref[index])
+
+ return p.planRefData(vchild, &bchild, ref, index+1, iter)
+ }
+
+ exclude := ast.NewSet()
+
+ // The planner does not support dynamic dispatch so generate blocks to
+ // evaluate each of the rulesets on the child nodes.
+ if virtual != nil {
+
+ stmt := &ir.BlockStmt{}
+
+ for _, child := range virtual.Children() {
+
+ block := &ir.Block{}
+ prev := p.curr
+ p.curr = block
+ key := ast.NewTerm(child)
+ exclude.Add(key)
+
+ // Assignments in each block due to local unification must be undone
+ // so create a new frame that will be popped after this key is
+ // processed.
+ p.vars.Push(map[ast.Var]ir.Local{})
+
+ if err := p.planTerm(key, func() error {
+ return p.planUnifyLocal(p.ltarget, ref[index], func() error {
+ // Create a copy of the reference with this operand plugged.
+ // This will result in evaluation of the rulesets on the
+ // child node.
+ cpy := ref.Copy()
+ cpy[index] = key
+ return p.planRefData(virtual, base, cpy, index, iter)
+ })
+ }); err != nil {
+ return err
+ }
+
+ p.vars.Pop()
+ p.curr = prev
+ stmt.Blocks = append(stmt.Blocks, block)
+ }
+
+ p.appendStmt(stmt)
+ }
+
+ // If the virtual tree was enumerated then we do not want to enumerate base
+ // trees that are rooted at the same key as any of the virtual sub trees. To
+ // prevent this we build a set of keys that are to be excluded and check
+ // below during the base scan.
+ var lexclude *ir.Local
+
+ if exclude.Len() > 0 {
+ if err := p.planSet(exclude, func() error {
+ v := p.ltarget
+ lexclude = &v
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+
+ p.ltarget = base.local
+
+ // Perform a scan of the base documents starting from the location referred
+ // to by the data pointer. Use the set we built above to avoid revisiting
+ // sub trees.
+ return p.planRefRec(base.path, 0, func() error {
+ return p.planScan(ref[index], func(lkey ir.Local) error {
+ if lexclude != nil {
+ lignore := p.newLocal()
+ p.appendStmt(&ir.NotStmt{
+ Block: p.blockWithStmt(&ir.DotStmt{
+ Source: *lexclude,
+ Key: lkey,
+ Target: lignore,
+ })})
+ }
+
+ // Assume that virtual sub trees have been visited already so
+ // recurse without the virtual node.
+ return p.planRefData(nil, &baseptr{local: p.ltarget}, ref, index+1, iter)
+ })
+ })
+}
+
+// planRefDataExtent generates the full extent (combined) of the base and
+// virtual nodes and then invokes the iterator with the planner target set to
+// the full extent.
+func (p *Planner) planRefDataExtent(virtual *ruletrie, base *baseptr, iter planiter) error {
+
+ vtarget := p.newLocal()
+
+ // Generate the virtual document out of rules contained under the virtual
+ // node (recursively). This document will _ONLY_ contain values generated by
+ // rules. No base document values will be included.
+ if virtual != nil {
+
+ p.appendStmt(&ir.MakeObjectStmt{
+ Target: vtarget,
+ })
+
+ for _, key := range virtual.Children() {
+ child := virtual.Get(key)
+
+ // Skip functions.
+ if child.Arity() > 0 {
+ continue
+ }
+
+ lkey := p.newLocal()
+ idx := p.getStringConst(string(key.(ast.String)))
+ p.appendStmt(&ir.MakeStringStmt{
+ Index: idx,
+ Target: lkey,
+ })
+
+ rules := child.Rules()
+
+ // Build object hierarchy depth-first.
+ if len(rules) == 0 {
+ err := p.planRefDataExtent(child, nil, func() error {
+ p.appendStmt(&ir.ObjectInsertStmt{
+ Object: vtarget,
+ Key: lkey,
+ Value: p.ltarget,
+ })
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Generate virtual document for leaf.
+ lvalue := p.newLocal()
+
+ funcName, err := p.planRules(rules)
+ if err != nil {
+ return err
+ }
+
+ // Add leaf to object if defined.
+ b := &ir.Block{}
+ p.appendStmtToBlock(&ir.CallStmt{
+ Func: funcName,
+ Args: []ir.Local{
+ p.vars.GetOrEmpty(ast.InputRootDocument.Value.(ast.Var)),
+ p.vars.GetOrEmpty(ast.DefaultRootDocument.Value.(ast.Var)),
+ },
+ Result: lvalue,
+ }, b)
+ p.appendStmtToBlock(&ir.ObjectInsertStmt{
+ Object: vtarget,
+ Key: lkey,
+ Value: lvalue,
+ }, b)
+ p.appendStmt(&ir.BlockStmt{Blocks: []*ir.Block{b}})
+ }
+
+ // At this point vtarget refers to the full extent of the virtual
+ // document at ref. If the base pointer is unset, no further processing
+ // is required.
+ if base == nil {
+ p.ltarget = vtarget
+ return iter()
+ }
+ }
+
+ // Obtain the base document value and merge (recursively) with the virtual
+ // document value above if needed.
+ prev := p.curr
+ p.curr = &ir.Block{}
+ p.ltarget = base.local
+ target := p.newLocal()
+
+ err := p.planRefRec(base.path, 0, func() error {
+
+ if virtual == nil {
+ target = p.ltarget
+ } else {
+ stmt := &ir.ObjectMergeStmt{
+ A: p.ltarget,
+ B: vtarget,
+ Target: target,
+ }
+ p.appendStmt(stmt)
+ }
+
+ p.appendStmt(&ir.BreakStmt{Index: 1})
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ inner := p.curr
+
+ // Fallback to virtual document value if base document is undefined.
+ // Otherwise, this block is undefined.
+ p.curr = &ir.Block{}
+ p.appendStmt(&ir.BlockStmt{Blocks: []*ir.Block{inner}})
+
+ if virtual != nil {
+ p.appendStmt(&ir.AssignVarStmt{
+ Source: vtarget,
+ Target: target,
+ })
+ } else {
+ p.appendStmt(&ir.BreakStmt{Index: 1})
+ }
+
+ outer := p.curr
+ p.curr = prev
+ p.appendStmt(&ir.BlockStmt{Blocks: []*ir.Block{outer}})
+
+ // At this point, target refers to either the full extent of the base and
+ // virtual documents at ref or just the base document at ref.
+ p.ltarget = target
+
+ return iter()
+}
+
+func (p *Planner) planDot(key *ast.Term, iter planiter) error {
+
+ source := p.ltarget
+
+ return p.planTerm(key, func() error {
+
+ target := p.newLocal()
+
+ p.appendStmt(&ir.DotStmt{
+ Source: source,
+ Key: p.ltarget,
+ Target: target,
+ })
+
+ p.ltarget = target
+
+ return iter()
+ })
+}
+
+type scaniter func(ir.Local) error
+
+func (p *Planner) planScan(key *ast.Term, iter scaniter) error {
+
+ scan := &ir.ScanStmt{
+ Source: p.ltarget,
+ Key: p.newLocal(),
+ Value: p.newLocal(),
+ Block: &ir.Block{},
+ }
+
+ prev := p.curr
+ p.curr = scan.Block
+
+ if err := p.planUnifyLocal(scan.Key, key, func() error {
+ p.ltarget = scan.Value
+ return iter(scan.Key)
+ }); err != nil {
+ return err
+ }
+
+ p.curr = prev
+ p.appendStmt(scan)
+
+ return nil
+
+}
+
+func (p *Planner) planScanValues(val *ast.Term, iter scaniter) error {
+
+ scan := &ir.ScanStmt{
+ Source: p.ltarget,
+ Key: p.newLocal(),
+ Value: p.newLocal(),
+ Block: &ir.Block{},
+ }
+
+ prev := p.curr
+ p.curr = scan.Block
+
+ if err := p.planUnifyLocal(scan.Value, val, func() error {
+ p.ltarget = scan.Value
+ return iter(scan.Value)
+ }); err != nil {
+ return err
+ }
+
+ p.curr = prev
+ p.appendStmt(scan)
+
+ return nil
+}
+
+// planSaveLocals returns a slice of locals holding temporary variables that
+// have been assigned from the supplied vars.
+func (p *Planner) planSaveLocals(vars ...ir.Local) []ir.Local {
+
+ lsaved := make([]ir.Local, len(vars))
+
+ for i := range vars {
+
+ lsaved[i] = p.newLocal()
+
+ p.appendStmt(&ir.AssignVarStmt{
+ Source: vars[i],
+ Target: lsaved[i],
+ })
+ }
+
+ return lsaved
+}
+
+type termsliceiter func([]ir.Local) error
+
+func (p *Planner) planTermSlice(terms []*ast.Term, iter termsliceiter) error {
+ return p.planTermSliceRec(terms, make([]ir.Local, len(terms)), 0, iter)
+}
+
+func (p *Planner) planTermSliceRec(terms []*ast.Term, locals []ir.Local, index int, iter termsliceiter) error {
+ if index >= len(terms) {
+ return iter(locals)
+ }
+
+ return p.planTerm(terms[index], func() error {
+ locals[index] = p.ltarget
+ return p.planTermSliceRec(terms, locals, index+1, iter)
+ })
+}
+
+func (p *Planner) planExterns() error {
+
+ p.policy.Static.BuiltinFuncs = make([]*ir.BuiltinFunc, 0, len(p.externs))
+
+ for name := range p.externs {
+ p.policy.Static.BuiltinFuncs = append(p.policy.Static.BuiltinFuncs, &ir.BuiltinFunc{Name: name})
+ }
+
+ sort.Slice(p.policy.Static.BuiltinFuncs, func(i, j int) bool {
+ return p.policy.Static.BuiltinFuncs[i].Name < p.policy.Static.BuiltinFuncs[j].Name
+ })
+
+ return nil
+}
+
+func (p *Planner) getStringConst(s string) int {
+ index, ok := p.strings[s]
+ if !ok {
+ index = len(p.policy.Static.Strings)
+ p.policy.Static.Strings = append(p.policy.Static.Strings, &ir.StringConst{
+ Value: s,
+ })
+ p.strings[s] = index
+ }
+ return index
+}
+
+func (p *Planner) getFileConst(s string) int {
+ index, ok := p.files[s]
+ if !ok {
+ index = len(p.policy.Static.Files)
+ p.policy.Static.Files = append(p.policy.Static.Files, &ir.StringConst{
+ Value: s,
+ })
+ p.files[s] = index
+ }
+ return index
+}
+
+func (p *Planner) appendStmt(s ir.Stmt) {
+ p.appendStmtToBlock(s, p.curr)
+}
+
+func (p *Planner) appendStmtToBlock(s ir.Stmt, b *ir.Block) {
+ if p.loc != nil {
+ str := p.loc.File
+ if str == "" {
+ str = ``
+ }
+ s.SetLocation(p.getFileConst(str), p.loc.Row, p.loc.Col, str, string(p.loc.Text))
+ }
+ b.Stmts = append(b.Stmts, s)
+}
+
+func (p *Planner) blockWithStmt(s ir.Stmt) *ir.Block {
+ b := &ir.Block{}
+ p.appendStmtToBlock(s, b)
+ return b
+}
+
+func (p *Planner) appendBlock(b *ir.Block) {
+ p.plan.Blocks = append(p.plan.Blocks, b)
+}
+
+func (p *Planner) appendFunc(f *ir.Func) {
+ p.policy.Funcs.Funcs = append(p.policy.Funcs.Funcs, f)
+}
+
+func (p *Planner) newLocal() ir.Local {
+ x := p.lnext
+ p.lnext++
+ return x
+}
+
+func rewrittenVar(vars map[ast.Var]ast.Var, k ast.Var) ast.Var {
+ rw, ok := vars[k]
+ if !ok {
+ return k
+ }
+ return rw
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
new file mode 100644
index 00000000..e2d9d156
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go
@@ -0,0 +1,156 @@
+package planner
+
+import (
+ "sort"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// funcstack implements a simple map structure used to keep track of virtual
+// document => planned function names. The structure supports Push and Pop
+// operations so that the planner can shadow planned functions when 'with'
+// statements are found.
+type funcstack struct {
+ stack []map[string]string
+ gen int
+}
+
+func newFuncstack() *funcstack {
+ return &funcstack{
+ stack: []map[string]string{
+ map[string]string{},
+ },
+ gen: 0,
+ }
+}
+
+func (p funcstack) Add(key, value string) {
+ p.stack[len(p.stack)-1][key] = value
+}
+
+func (p funcstack) Get(key string) (string, bool) {
+ value, ok := p.stack[len(p.stack)-1][key]
+ return value, ok
+}
+
+func (p *funcstack) Push(funcs map[string]string) {
+ p.stack = append(p.stack, funcs)
+ p.gen++
+}
+
+func (p *funcstack) Pop() map[string]string {
+ last := p.stack[len(p.stack)-1]
+ p.stack = p.stack[:len(p.stack)-1]
+ p.gen++
+ return last
+}
+
+// ruletrie implements a simple trie structure for organizing rules that may be
+// planned. The trie nodes are keyed by the rule path. The ruletrie supports
+// Push and Pop operations that allow the planner to shadow subtrees when 'with'
+// statements are found.
+type ruletrie struct {
+ children map[ast.Value][]*ruletrie
+ rules []*ast.Rule
+}
+
+func newRuletrie() *ruletrie {
+ return &ruletrie{
+ children: map[ast.Value][]*ruletrie{},
+ }
+}
+
+func (t *ruletrie) Arity() int {
+ rules := t.Rules()
+ if len(rules) > 0 {
+ return len(rules[0].Head.Args)
+ }
+ return 0
+}
+
+func (t *ruletrie) Rules() []*ast.Rule {
+ if t != nil {
+ return t.rules
+ }
+ return nil
+}
+
+func (t *ruletrie) Push(key ast.Ref) {
+ node := t
+ for i := 0; i < len(key)-1; i++ {
+ node = node.Get(key[i].Value)
+ if node == nil {
+ return
+ }
+ }
+ elem := key[len(key)-1]
+ node.children[elem.Value] = append(node.children[elem.Value], nil)
+}
+
+func (t *ruletrie) Pop(key ast.Ref) {
+ node := t
+ for i := 0; i < len(key)-1; i++ {
+ node = node.Get(key[i].Value)
+ if node == nil {
+ return
+ }
+ }
+ elem := key[len(key)-1]
+ sl := node.children[elem.Value]
+ node.children[elem.Value] = sl[:len(sl)-1]
+}
+
+func (t *ruletrie) Insert(key ast.Ref) *ruletrie {
+ node := t
+ for _, elem := range key {
+ child := node.Get(elem.Value)
+ if child == nil {
+ child = newRuletrie()
+ node.children[elem.Value] = append(node.children[elem.Value], child)
+ }
+ node = child
+ }
+ return node
+}
+
+func (t *ruletrie) Lookup(key ast.Ref) *ruletrie {
+ node := t
+ for _, elem := range key {
+ node = node.Get(elem.Value)
+ if node == nil {
+ return nil
+ }
+ }
+ return node
+}
+
+func (t *ruletrie) LookupOrInsert(key ast.Ref) *ruletrie {
+ if val := t.Lookup(key); val != nil {
+ return val
+ }
+ return t.Insert(key)
+}
+
+func (t *ruletrie) Children() []ast.Value {
+ sorted := make([]ast.Value, 0, len(t.children))
+ for key := range t.children {
+ if t.Get(key) != nil {
+ sorted = append(sorted, key)
+ }
+ }
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Compare(sorted[j]) < 0
+ })
+ return sorted
+}
+
+func (t *ruletrie) Get(k ast.Value) *ruletrie {
+ if t == nil {
+ return nil
+ }
+ nodes := t.children[k]
+ if len(nodes) == 0 {
+ return nil
+ }
+ return nodes[len(nodes)-1]
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go
new file mode 100644
index 00000000..7dbf8f43
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package planner
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/internal/ir"
+)
+
+type varstack []map[ast.Var]ir.Local
+
+func newVarstack(frames ...map[ast.Var]ir.Local) *varstack {
+ vs := &varstack{}
+ for _, f := range frames {
+ vs.Push(f)
+ }
+ return vs
+}
+
+func (vs varstack) GetOrElse(k ast.Var, orElse func() ir.Local) ir.Local {
+ l, ok := vs.Get(k)
+ if !ok {
+ l = orElse()
+ vs.Put(k, l)
+ }
+ return l
+}
+
+func (vs varstack) GetOrEmpty(k ast.Var) ir.Local {
+ l, _ := vs.Get(k)
+ return l
+}
+
+func (vs varstack) Get(k ast.Var) (ir.Local, bool) {
+ for i := len(vs) - 1; i >= 0; i-- {
+ if l, ok := vs[i][k]; ok {
+ return l, true
+ }
+ }
+ return 0, false
+}
+
+func (vs varstack) Put(k ast.Var, v ir.Local) {
+ vs[len(vs)-1][k] = v
+}
+
+func (vs *varstack) Push(frame map[ast.Var]ir.Local) {
+ *vs = append(*vs, frame)
+}
+
+func (vs *varstack) Pop() map[ast.Var]ir.Local {
+ sl := *vs
+ last := sl[len(sl)-1]
+ *vs = sl[:len(sl)-1]
+ return last
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/semver/LICENSE b/vendor/github.com/open-policy-agent/opa/internal/semver/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/semver/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go
new file mode 100644
index 00000000..3b86d5b6
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/semver/semver.go
@@ -0,0 +1,235 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Semantic Versions http://semver.org
+
+// Package semver has been vendored from:
+// https://github.com/coreos/go-semver/tree/e214231b295a8ea9479f11b70b35d5acf3556d9b/semver
+// A number of the original functions of the package have been removed since
+// they are not required for our built-ins.
+package semver
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Version represents a parsed SemVer
+type Version struct {
+ Major int64
+ Minor int64
+ Patch int64
+ PreRelease PreRelease
+ Metadata string
+}
+
+// PreRelease represents a pre-release suffix string
+type PreRelease string
+
+func splitOff(input *string, delim string) (val string) {
+ parts := strings.SplitN(*input, delim, 2)
+
+ if len(parts) == 2 {
+ *input = parts[0]
+ val = parts[1]
+ }
+
+ return val
+}
+
+// NewVersion constucts new SemVers from strings
+func NewVersion(version string) (*Version, error) {
+ v := Version{}
+
+ if err := v.Set(version); err != nil {
+ return nil, err
+ }
+
+ return &v, nil
+}
+
+// Set parses and updates v from the given version string. Implements flag.Value
+func (v *Version) Set(version string) error {
+ metadata := splitOff(&version, "+")
+ preRelease := PreRelease(splitOff(&version, "-"))
+ dotParts := strings.SplitN(version, ".", 3)
+
+ if len(dotParts) != 3 {
+ return fmt.Errorf("%s is not in dotted-tri format", version)
+ }
+
+ if err := validateIdentifier(string(preRelease)); err != nil {
+ return fmt.Errorf("failed to validate pre-release: %v", err)
+ }
+
+ if err := validateIdentifier(metadata); err != nil {
+ return fmt.Errorf("failed to validate metadata: %v", err)
+ }
+
+ parsed := make([]int64, 3, 3)
+
+ for i, v := range dotParts[:3] {
+ val, err := strconv.ParseInt(v, 10, 64)
+ parsed[i] = val
+ if err != nil {
+ return err
+ }
+ }
+
+ v.Metadata = metadata
+ v.PreRelease = preRelease
+ v.Major = parsed[0]
+ v.Minor = parsed[1]
+ v.Patch = parsed[2]
+ return nil
+}
+
+func (v Version) String() string {
+ var buffer bytes.Buffer
+
+ fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
+
+ if v.PreRelease != "" {
+ fmt.Fprintf(&buffer, "-%s", v.PreRelease)
+ }
+
+ if v.Metadata != "" {
+ fmt.Fprintf(&buffer, "+%s", v.Metadata)
+ }
+
+ return buffer.String()
+}
+
+// Compare tests if v is less than, equal to, or greater than versionB,
+// returning -1, 0, or +1 respectively.
+func (v Version) Compare(versionB Version) int {
+ if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
+ return cmp
+ }
+ return preReleaseCompare(v, versionB)
+}
+
+// Slice converts the comparable parts of the semver into a slice of integers.
+func (v Version) Slice() []int64 {
+ return []int64{v.Major, v.Minor, v.Patch}
+}
+
+// Slice splits the pre-release suffix string
+func (p PreRelease) Slice() []string {
+ preRelease := string(p)
+ return strings.Split(preRelease, ".")
+}
+
+func preReleaseCompare(versionA Version, versionB Version) int {
+ a := versionA.PreRelease
+ b := versionB.PreRelease
+
+ /* Handle the case where if two versions are otherwise equal it is the
+ * one without a PreRelease that is greater */
+ if len(a) == 0 && (len(b) > 0) {
+ return 1
+ } else if len(b) == 0 && (len(a) > 0) {
+ return -1
+ }
+
+ // If there is a prerelease, check and compare each part.
+ return recursivePreReleaseCompare(a.Slice(), b.Slice())
+}
+
+func recursiveCompare(versionA []int64, versionB []int64) int {
+ if len(versionA) == 0 {
+ return 0
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursiveCompare(versionA[1:], versionB[1:])
+}
+
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
+ // A larger set of pre-release fields has a higher precedence than a smaller set,
+ // if all of the preceding identifiers are equal.
+ if len(versionA) == 0 {
+ if len(versionB) > 0 {
+ return -1
+ }
+ return 0
+ } else if len(versionB) == 0 {
+ // We're longer than versionB so return 1.
+ return 1
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ aInt := false
+ bInt := false
+
+ aI, err := strconv.Atoi(versionA[0])
+ if err == nil {
+ aInt = true
+ }
+
+ bI, err := strconv.Atoi(versionB[0])
+ if err == nil {
+ bInt = true
+ }
+
+ // Numeric identifiers always have lower precedence than non-numeric identifiers.
+ if aInt && !bInt {
+ return -1
+ } else if !aInt && bInt {
+ return 1
+ }
+
+ // Handle Integer Comparison
+ if aInt && bInt {
+ if aI > bI {
+ return 1
+ } else if aI < bI {
+ return -1
+ }
+ }
+
+ // Handle String Comparison
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursivePreReleaseCompare(versionA[1:], versionB[1:])
+}
+
+// validateIdentifier makes sure the provided identifier satisfies semver spec
+func validateIdentifier(id string) error {
+ if id != "" && !reIdentifier.MatchString(id) {
+ return fmt.Errorf("%s is not a valid semver identifier", id)
+ }
+ return nil
+}
+
+// reIdentifier is a regular expression used to check that pre-release and metadata
+// identifiers satisfy the spec requirements
+var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go
new file mode 100644
index 00000000..e79e7337
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/uuid/uuid.go
@@ -0,0 +1,22 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "fmt"
+ "io"
+)
+
+// New Create a version 4 random UUID
+func New(r io.Reader) (string, error) {
+ bs := make([]byte, 16)
+ n, err := io.ReadFull(r, bs)
+ if n != len(bs) || err != nil {
+ return "", err
+ }
+ bs[8] = bs[8]&^0xc0 | 0x80
+ bs[6] = bs[6]&^0xf0 | 0x40
+ return fmt.Sprintf("%x-%x-%x-%x-%x", bs[0:4], bs[4:6], bs[6:8], bs[8:10], bs[10:]), nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/version/version.go b/vendor/github.com/open-policy-agent/opa/internal/version/version.go
new file mode 100644
index 00000000..02f1c1b8
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/version/version.go
@@ -0,0 +1,40 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package version implements helper functions for the stored version.
+package version
+
+import (
+ "context"
+ "fmt"
+ "runtime"
+
+ "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/version"
+)
+
+var versionPath = storage.MustParsePath("/system/version")
+
+// Write the build version information into storage. This makes the
+// version information available to the REPL and the HTTP server.
+func Write(ctx context.Context, store storage.Store, txn storage.Transaction) error {
+
+ if err := storage.MakeDir(ctx, store, txn, versionPath); err != nil {
+ return err
+ }
+
+ if err := store.Write(ctx, txn, storage.AddOp, versionPath, map[string]interface{}{
+ "version": version.Version,
+ "build_commit": version.Vcs,
+ "build_timestamp": version.Timestamp,
+ "build_hostname": version.Hostname,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UserAgent defines the current OPA instances User-Agent default header value.
+var UserAgent = fmt.Sprintf("Open Policy Agent/%s (%s, %s)", version.Version, runtime.GOOS, runtime.GOARCH)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/constant/constant.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/constant/constant.go
new file mode 100644
index 00000000..878979fb
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/constant/constant.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package constant contains WASM constant definitions.
+package constant
+
+// Magic bytes at the beginning of every WASM file ("\0asm").
+const Magic = uint32(0x6D736100)
+
+// Version defines the WASM version.
+const Version = uint32(1)
+
+// WASM module section IDs.
+const (
+ CustomSectionID uint8 = iota
+ TypeSectionID
+ ImportSectionID
+ FunctionSectionID
+ TableSectionID
+ MemorySectionID
+ GlobalSectionID
+ ExportSectionID
+ StartSectionID
+ ElementSectionID
+ CodeSectionID
+ DataSectionID
+)
+
+// FunctionTypeID indicates the start of a function type definition.
+const FunctionTypeID = byte(0x60)
+
+// ValueType represents an intrinsic value type in WASM.
+const (
+ ValueTypeF64 byte = iota + 0x7C
+ ValueTypeF32
+ ValueTypeI64
+ ValueTypeI32
+)
+
+// WASM import descriptor types.
+const (
+ ImportDescType byte = iota
+ ImportDescTable
+ ImportDescMem
+ ImportDescGlobal
+)
+
+// WASM export descriptor types.
+const (
+ ExportDescType byte = iota
+ ExportDescTable
+ ExportDescMem
+ ExportDescGlobal
+)
+
+// ElementTypeAnyFunc indicates the type of a table import.
+const ElementTypeAnyFunc byte = 0x70
+
+// BlockTypeEmpty represents a block type.
+const BlockTypeEmpty byte = 0x40
+
+// WASM global varialbe mutability flag.
+const (
+ Const byte = iota
+ Mutable
+)
+
+// NameSectionCustomID is the ID of the "Name" section Custom Section
+const NameSectionCustomID = "name"
+
+// Subtypes of the 'name' custom section
+const (
+ NameSectionModuleType byte = iota
+ NameSectionFunctionsType
+ NameSectionLocalsType
+)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/doc.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/doc.go
new file mode 100644
index 00000000..b2523696
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package encoding implements WASM module reading and writing.
+package encoding
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
new file mode 100644
index 00000000..6ae6ca3c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go
@@ -0,0 +1,930 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package encoding
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/internal/leb128"
+ "github.com/open-policy-agent/opa/internal/wasm/constant"
+ "github.com/open-policy-agent/opa/internal/wasm/instruction"
+ "github.com/open-policy-agent/opa/internal/wasm/module"
+ "github.com/open-policy-agent/opa/internal/wasm/opcode"
+ "github.com/open-policy-agent/opa/internal/wasm/types"
+)
+
+// ReadModule reads a binary-encoded WASM module from r.
+func ReadModule(r io.Reader) (*module.Module, error) {
+
+ wr := &reader{r: r, n: 0}
+ module, err := readModule(wr)
+ if err != nil {
+ return nil, errors.Wrapf(err, "offset 0x%x", wr.n)
+ }
+
+ return module, nil
+}
+
+// ReadCodeEntry reads a binary-encoded WASM code entry from r.
+func ReadCodeEntry(r io.Reader) (*module.CodeEntry, error) {
+
+ wr := &reader{r: r, n: 0}
+ entry, err := readCodeEntry(wr)
+ if err != nil {
+ return nil, errors.Wrapf(err, "offset 0x%x", wr.n)
+ }
+
+ return entry, nil
+}
+
+// CodeEntries returns the WASM code entries contained in r.
+func CodeEntries(m *module.Module) ([]*module.CodeEntry, error) {
+
+ entries := make([]*module.CodeEntry, len(m.Code.Segments))
+
+ for i, s := range m.Code.Segments {
+ buf := bytes.NewBuffer(s.Code)
+ entry, err := ReadCodeEntry(buf)
+ if err != nil {
+ return nil, err
+ }
+ entries[i] = entry
+ }
+
+ return entries, nil
+}
+
+type reader struct {
+ r io.Reader
+ n int
+}
+
+func (r *reader) Read(bs []byte) (int, error) {
+ n, err := r.r.Read(bs)
+ r.n += n
+ return n, err
+}
+
+func readModule(r io.Reader) (*module.Module, error) {
+
+ if err := readMagic(r); err != nil {
+ return nil, err
+ }
+
+ if err := readVersion(r); err != nil {
+ return nil, err
+ }
+
+ var m module.Module
+
+ if err := readSections(r, &m); err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ return &m, nil
+}
+
+func readCodeEntry(r io.Reader) (*module.CodeEntry, error) {
+
+ var entry module.CodeEntry
+
+ if err := readLocals(r, &entry.Func.Locals); err != nil {
+ return nil, errors.Wrapf(err, "local declarations")
+ }
+
+ return &entry, readExpr(r, &entry.Func.Expr)
+}
+
+func readMagic(r io.Reader) error {
+ var v uint32
+ if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
+ return err
+ } else if v != constant.Magic {
+ return fmt.Errorf("illegal magic value")
+ }
+ return nil
+}
+
+func readVersion(r io.Reader) error {
+ var v uint32
+ if err := binary.Read(r, binary.LittleEndian, &v); err != nil {
+ return err
+ } else if v != constant.Version {
+ return fmt.Errorf("illegal wasm version")
+ }
+ return nil
+}
+
+func readSections(r io.Reader, m *module.Module) error {
+ for {
+ id, err := readByte(r)
+ if err != nil {
+ return err
+ }
+
+ size, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, size)
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return err
+ }
+
+ bufr := bytes.NewReader(buf)
+
+ switch id {
+ case constant.StartSectionID, constant.MemorySectionID:
+ continue
+ case constant.CustomSectionID:
+ var name string
+ if err := readByteVectorString(bufr, &name); err != nil {
+ return errors.Wrap(err, "read custom section type")
+ }
+ if name == "name" {
+ if err := readCustomNameSections(bufr, &m.Names); err != nil {
+ return errors.Wrap(err, "custom 'name' section")
+ }
+ } else {
+ if err := readCustomSection(bufr, name, &m.Customs); err != nil {
+ return errors.Wrap(err, "custom section")
+ }
+ }
+ case constant.TypeSectionID:
+ if err := readTypeSection(bufr, &m.Type); err != nil {
+ return errors.Wrap(err, "type section")
+ }
+ case constant.ImportSectionID:
+ if err := readImportSection(bufr, &m.Import); err != nil {
+ return errors.Wrap(err, "import section")
+ }
+ case constant.GlobalSectionID:
+ if err := readGlobalSection(bufr, &m.Global); err != nil {
+ return errors.Wrap(err, "global section")
+ }
+ case constant.TableSectionID:
+ if err := readTableSection(bufr, &m.Table); err != nil {
+ return errors.Wrap(err, "table section")
+ }
+ case constant.FunctionSectionID:
+ if err := readFunctionSection(bufr, &m.Function); err != nil {
+ return errors.Wrap(err, "function section")
+ }
+ case constant.ExportSectionID:
+ if err := readExportSection(bufr, &m.Export); err != nil {
+ return errors.Wrap(err, "export section")
+ }
+ case constant.ElementSectionID:
+ if err := readElementSection(bufr, &m.Element); err != nil {
+ return errors.Wrap(err, "element section")
+ }
+ case constant.DataSectionID:
+ if err := readDataSection(bufr, &m.Data); err != nil {
+ return errors.Wrap(err, "data section")
+ }
+ case constant.CodeSectionID:
+ if err := readRawCodeSection(bufr, &m.Code); err != nil {
+ return errors.Wrap(err, "code section")
+ }
+ default:
+ return fmt.Errorf("illegal section id")
+ }
+ }
+}
+
+func readCustomSection(r io.Reader, name string, s *[]module.CustomSection) error {
+ buf, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+
+ *s = append(*s, module.CustomSection{
+ Name: name,
+ Data: buf,
+ })
+ return nil
+}
+
+func readCustomNameSections(r io.Reader, s *module.NameSection) error {
+ for {
+ id, err := readByte(r)
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+ buf := make([]byte, n)
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return err
+ }
+ bufr := bytes.NewReader(buf)
+ switch id {
+ case constant.NameSectionModuleType:
+ err = readNameSectionModule(bufr, s)
+ case constant.NameSectionFunctionsType:
+ err = readNameSectionFunctions(bufr, s)
+ case constant.NameSectionLocalsType:
+ err = readNameSectionLocals(bufr, s)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func readNameSectionModule(r io.Reader, s *module.NameSection) error {
+ return readByteVectorString(r, &s.Module)
+}
+
+func readNameSectionFunctions(r io.Reader, s *module.NameSection) error {
+ nm, err := readNameMap(r)
+ if err != nil {
+ return err
+ }
+ s.Functions = nm
+ return nil
+}
+
+func readNameMap(r io.Reader) ([]module.NameMap, error) {
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return nil, err
+ }
+ nm := make([]module.NameMap, n)
+ for i := uint32(0); i < n; i++ {
+ var name string
+ id, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := readByteVectorString(r, &name); err != nil {
+ return nil, err
+ }
+ nm[i] = module.NameMap{Index: id, Name: name}
+ }
+ return nm, nil
+}
+
+func readNameSectionLocals(r io.Reader, s *module.NameSection) error {
+ n, err := leb128.ReadVarUint32(r) // length of vec(indirectnameassoc)
+ if err != nil {
+ return err
+ }
+ for i := uint32(0); i < n; i++ {
+ id, err := leb128.ReadVarUint32(r) // func index
+ if err != nil {
+ return err
+ }
+ nm, err := readNameMap(r)
+ if err != nil {
+ return err
+ }
+ for _, m := range nm {
+ s.Locals = append(s.Locals, module.LocalNameMap{
+ FuncIndex: id,
+ NameMap: module.NameMap{
+ Index: m.Index,
+ Name: m.Name,
+ }})
+ }
+ }
+ return nil
+}
+
+func readTypeSection(r io.Reader, s *module.TypeSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+
+ var ftype module.FunctionType
+ if err := readFunctionType(r, &ftype); err != nil {
+ return err
+ }
+
+ s.Functions = append(s.Functions, ftype)
+ }
+
+ return nil
+}
+
+func readImportSection(r io.Reader, s *module.ImportSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+
+ var imp module.Import
+
+ if err := readImport(r, &imp); err != nil {
+ return err
+ }
+
+ s.Imports = append(s.Imports, imp)
+ }
+
+ return nil
+}
+
+func readTableSection(r io.Reader, s *module.TableSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+
+ var table module.Table
+
+ if elem, err := readByte(r); err != nil {
+ return err
+ } else if elem != constant.ElementTypeAnyFunc {
+ return fmt.Errorf("illegal element type")
+ } else {
+ table.Type = types.Anyfunc
+ }
+
+ if err := readLimits(r, &table.Lim); err != nil {
+ return err
+ }
+
+ s.Tables = append(s.Tables, table)
+ }
+
+ return nil
+}
+
+func readGlobalSection(r io.Reader, s *module.GlobalSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+
+ var global module.Global
+
+ if err := readGlobal(r, &global); err != nil {
+ return err
+ }
+
+ s.Globals = append(s.Globals, global)
+ }
+
+ return nil
+}
+
+func readFunctionSection(r io.Reader, s *module.FunctionSection) error {
+ return readVarUint32Vector(r, &s.TypeIndices)
+}
+
+func readExportSection(r io.Reader, s *module.ExportSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+
+ var exp module.Export
+
+ if err := readExport(r, &exp); err != nil {
+ return err
+ }
+
+ s.Exports = append(s.Exports, exp)
+ }
+
+ return nil
+}
+
+func readElementSection(r io.Reader, s *module.ElementSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+
+ var seg module.ElementSegment
+
+ if err := readElementSegment(r, &seg); err != nil {
+ return err
+ }
+
+ s.Segments = append(s.Segments, seg)
+ }
+
+ return nil
+}
+
+func readDataSection(r io.Reader, s *module.DataSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+
+ var seg module.DataSegment
+
+ if err := readDataSegment(r, &seg); err != nil {
+ return err
+ }
+
+ s.Segments = append(s.Segments, seg)
+ }
+
+ return nil
+}
+
+func readRawCodeSection(r io.Reader, s *module.RawCodeSection) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ for i := uint32(0); i < n; i++ {
+ var seg module.RawCodeSegment
+
+ if err := readRawCodeSegment(r, &seg); err != nil {
+ return err
+ }
+
+ s.Segments = append(s.Segments, seg)
+ }
+
+ return nil
+}
+
+func readFunctionType(r io.Reader, ftype *module.FunctionType) error {
+
+ if b, err := readByte(r); err != nil {
+ return err
+ } else if b != constant.FunctionTypeID {
+ return fmt.Errorf("illegal function type id 0x%x", b)
+ }
+
+ if err := readValueTypeVector(r, &ftype.Params); err != nil {
+ return err
+ }
+
+ return readValueTypeVector(r, &ftype.Results)
+}
+
+func readGlobal(r io.Reader, global *module.Global) error {
+
+ if err := readValueType(r, &global.Type); err != nil {
+ return err
+ }
+
+ b, err := readByte(r)
+ if err != nil {
+ return err
+ }
+
+ if b == 1 {
+ global.Mutable = true
+ } else if b != 0 {
+ return fmt.Errorf("illegal mutability flag")
+ }
+
+ if err := readConstantExpr(r, &global.Init); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func readImport(r io.Reader, imp *module.Import) error {
+
+ if err := readByteVectorString(r, &imp.Module); err != nil {
+ return err
+ }
+
+ if err := readByteVectorString(r, &imp.Name); err != nil {
+ return err
+ }
+
+ b, err := readByte(r)
+ if err != nil {
+ return err
+
+ }
+
+ if b == constant.ImportDescType {
+ index, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+ imp.Descriptor = module.FunctionImport{
+ Func: index,
+ }
+ return nil
+ }
+
+ if b == constant.ImportDescTable {
+ if elem, err := readByte(r); err != nil {
+ return err
+ } else if elem != constant.ElementTypeAnyFunc {
+ return fmt.Errorf("illegal element type")
+ }
+ desc := module.TableImport{
+ Type: types.Anyfunc,
+ }
+ if err := readLimits(r, &desc.Lim); err != nil {
+ return err
+ }
+ imp.Descriptor = desc
+ return nil
+ }
+
+ if b == constant.ImportDescMem {
+ desc := module.MemoryImport{}
+ if err := readLimits(r, &desc.Mem.Lim); err != nil {
+ return err
+ }
+ imp.Descriptor = desc
+ return nil
+ }
+
+ if b == constant.ImportDescGlobal {
+ desc := module.GlobalImport{}
+ if err := readValueType(r, &desc.Type); err != nil {
+ return err
+ }
+ b, err := readByte(r)
+ if err != nil {
+ return err
+ }
+ if b == 1 {
+ desc.Mutable = true
+ } else if b != 0 {
+ return fmt.Errorf("illegal mutability flag")
+ }
+ return nil
+ }
+
+ return fmt.Errorf("illegal import descriptor type")
+}
+
+func readExport(r io.Reader, exp *module.Export) error {
+
+ if err := readByteVectorString(r, &exp.Name); err != nil {
+ return err
+ }
+
+ b, err := readByte(r)
+ if err != nil {
+ return err
+ }
+
+ switch b {
+ case constant.ExportDescType:
+ exp.Descriptor.Type = module.FunctionExportType
+ case constant.ExportDescTable:
+ exp.Descriptor.Type = module.TableExportType
+ case constant.ExportDescMem:
+ exp.Descriptor.Type = module.MemoryExportType
+ case constant.ExportDescGlobal:
+ exp.Descriptor.Type = module.GlobalExportType
+ default:
+ return fmt.Errorf("illegal export descriptor type")
+ }
+
+ exp.Descriptor.Index, err = leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func readElementSegment(r io.Reader, seg *module.ElementSegment) error {
+
+ if err := readVarUint32(r, &seg.Index); err != nil {
+ return err
+ }
+
+ if err := readConstantExpr(r, &seg.Offset); err != nil {
+ return err
+ }
+
+ if err := readVarUint32Vector(r, &seg.Indices); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func readDataSegment(r io.Reader, seg *module.DataSegment) error {
+
+ if err := readVarUint32(r, &seg.Index); err != nil {
+ return err
+ }
+
+ if err := readConstantExpr(r, &seg.Offset); err != nil {
+ return err
+ }
+
+ if err := readByteVector(r, &seg.Init); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func readRawCodeSegment(r io.Reader, seg *module.RawCodeSegment) error {
+ return readByteVector(r, &seg.Code)
+}
+
+func readConstantExpr(r io.Reader, expr *module.Expr) error {
+
+ instrs := make([]instruction.Instruction, 0)
+
+ for {
+ b, err := readByte(r)
+ if err != nil {
+ return err
+ }
+
+ switch opcode.Opcode(b) {
+ case opcode.I32Const:
+ i32, err := leb128.ReadVarInt32(r)
+ if err != nil {
+ return err
+ }
+ instrs = append(instrs, instruction.I32Const{Value: i32})
+ case opcode.I64Const:
+ i64, err := leb128.ReadVarInt64(r)
+ if err != nil {
+ return err
+ }
+ instrs = append(instrs, instruction.I64Const{Value: i64})
+ case opcode.End:
+ expr.Instrs = instrs
+ return nil
+ default:
+ return fmt.Errorf("illegal constant expr opcode 0x%x", b)
+ }
+ }
+}
+
+func readExpr(r io.Reader, expr *module.Expr) (err error) {
+
+ defer func() {
+ if r := recover(); r != nil {
+ switch r := r.(type) {
+ case error:
+ err = r
+ default:
+ err = fmt.Errorf("unknown panic")
+ }
+ }
+ }()
+
+ return readInstructions(r, &expr.Instrs)
+}
+
+func readInstructions(r io.Reader, instrs *[]instruction.Instruction) error {
+
+ ret := make([]instruction.Instruction, 0)
+
+ for {
+ b, err := readByte(r)
+ if err != nil {
+ return err
+ }
+
+ switch opcode.Opcode(b) {
+ case opcode.I32Const:
+ ret = append(ret, instruction.I32Const{Value: leb128.MustReadVarInt32(r)})
+ case opcode.I64Const:
+ ret = append(ret, instruction.I64Const{Value: leb128.MustReadVarInt64(r)})
+ case opcode.I32Eqz:
+ ret = append(ret, instruction.I32Eqz{})
+ case opcode.GetLocal:
+ ret = append(ret, instruction.GetLocal{Index: leb128.MustReadVarUint32(r)})
+ case opcode.SetLocal:
+ ret = append(ret, instruction.SetLocal{Index: leb128.MustReadVarUint32(r)})
+ case opcode.Call:
+ ret = append(ret, instruction.Call{Index: leb128.MustReadVarUint32(r)})
+ case opcode.BrIf:
+ ret = append(ret, instruction.BrIf{Index: leb128.MustReadVarUint32(r)})
+ case opcode.Return:
+ ret = append(ret, instruction.Return{})
+ case opcode.Block:
+ block := instruction.Block{}
+ if err := readBlockValueType(r, block.Type); err != nil {
+ return err
+ }
+ if err := readInstructions(r, &block.Instrs); err != nil {
+ return err
+ }
+ ret = append(ret, block)
+ case opcode.Loop:
+ loop := instruction.Loop{}
+ if err := readBlockValueType(r, loop.Type); err != nil {
+ return err
+ }
+ if err := readInstructions(r, &loop.Instrs); err != nil {
+ return err
+ }
+ ret = append(ret, loop)
+ case opcode.End:
+ *instrs = ret
+ return nil
+ default:
+ return fmt.Errorf("illegal opcode 0x%x", b)
+ }
+ }
+}
+
+func readLimits(r io.Reader, l *module.Limit) error {
+
+ b, err := readByte(r)
+ if err != nil {
+ return err
+ }
+
+ min, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ l.Min = min
+
+ if b == 1 {
+ max, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+ l.Max = &max
+ } else if b != 0 {
+ return fmt.Errorf("illegal limit flag")
+ }
+
+ return nil
+}
+
+func readLocals(r io.Reader, locals *[]module.LocalDeclaration) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ ret := make([]module.LocalDeclaration, n)
+
+ for i := uint32(0); i < n; i++ {
+ if err := readVarUint32(r, &ret[i].Count); err != nil {
+ return err
+ }
+ if err := readValueType(r, &ret[i].Type); err != nil {
+ return err
+ }
+ }
+
+ *locals = ret
+ return nil
+}
+
+func readByteVector(r io.Reader, v *[]byte) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, n)
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return err
+ }
+
+ *v = buf
+ return nil
+}
+
+func readByteVectorString(r io.Reader, v *string) error {
+
+ var buf []byte
+
+ if err := readByteVector(r, &buf); err != nil {
+ return err
+ }
+
+ *v = string(buf)
+ return nil
+}
+
+func readVarUint32Vector(r io.Reader, v *[]uint32) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ ret := make([]uint32, n)
+
+ for i := uint32(0); i < n; i++ {
+ if err := readVarUint32(r, &ret[i]); err != nil {
+ return err
+ }
+ }
+
+ *v = ret
+ return nil
+}
+
+func readValueTypeVector(r io.Reader, v *[]types.ValueType) error {
+
+ n, err := leb128.ReadVarUint32(r)
+ if err != nil {
+ return err
+ }
+
+ ret := make([]types.ValueType, n)
+
+ for i := uint32(0); i < n; i++ {
+ if err := readValueType(r, &ret[i]); err != nil {
+ return err
+ }
+ }
+
+ *v = ret
+ return nil
+}
+
+func readVarUint32(r io.Reader, v *uint32) error {
+ var err error
+ *v, err = leb128.ReadVarUint32(r)
+ return err
+}
+
+func readValueType(r io.Reader, v *types.ValueType) error {
+ if b, err := readByte(r); err != nil {
+ return err
+ } else if b == constant.ValueTypeI32 {
+ *v = types.I32
+ } else if b == constant.ValueTypeI64 {
+ *v = types.I64
+ } else if b == constant.ValueTypeF32 {
+ *v = types.F32
+ } else if b == constant.ValueTypeF64 {
+ *v = types.F64
+ } else {
+ return fmt.Errorf("illegal value type: 0x%x", b)
+ }
+ return nil
+}
+
+func readBlockValueType(r io.Reader, v *types.ValueType) error {
+ if b, err := readByte(r); err != nil {
+ return err
+ } else if b == constant.ValueTypeI32 {
+ *v = types.I32
+ } else if b == constant.ValueTypeI64 {
+ *v = types.I64
+ } else if b == constant.ValueTypeF32 {
+ *v = types.F32
+ } else if b == constant.ValueTypeF64 {
+ *v = types.F64
+ } else if b != constant.BlockTypeEmpty {
+ return fmt.Errorf("illegal value type: 0x%x", b)
+ }
+ return nil
+}
+
+func readByte(r io.Reader) (byte, error) {
+ buf := make([]byte, 1)
+ _, err := io.ReadFull(r, buf)
+ return buf[0], err
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go
new file mode 100644
index 00000000..413e3d9d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/writer.go
@@ -0,0 +1,730 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package encoding
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/open-policy-agent/opa/internal/leb128"
+ "github.com/open-policy-agent/opa/internal/wasm/constant"
+ "github.com/open-policy-agent/opa/internal/wasm/instruction"
+ "github.com/open-policy-agent/opa/internal/wasm/module"
+ "github.com/open-policy-agent/opa/internal/wasm/opcode"
+ "github.com/open-policy-agent/opa/internal/wasm/types"
+)
+
+// WriteModule writes a binary-encoded representation of module to w.
+func WriteModule(w io.Writer, module *module.Module) error {
+
+ if err := writeMagic(w); err != nil {
+ return err
+ }
+
+ if err := writeVersion(w); err != nil {
+ return err
+ }
+
+ if module == nil {
+ return nil
+ }
+
+ if err := writeTypeSection(w, module.Type); err != nil {
+ return err
+ }
+
+ if err := writeImportSection(w, module.Import); err != nil {
+ return err
+ }
+
+ if err := writeFunctionSection(w, module.Function); err != nil {
+ return err
+ }
+
+ if err := writeTableSection(w, module.Table); err != nil {
+ return err
+ }
+
+ if err := writeGlobalSection(w, module.Global); err != nil {
+ return err
+ }
+
+ if err := writeExportSection(w, module.Export); err != nil {
+ return err
+ }
+
+ if err := writeElementSection(w, module.Element); err != nil {
+ return err
+ }
+
+ if err := writeRawCodeSection(w, module.Code); err != nil {
+ return err
+ }
+
+ if err := writeDataSection(w, module.Data); err != nil {
+ return err
+ }
+
+ if err := writeNameSection(w, module.Names); err != nil {
+ return err
+ }
+
+ for _, custom := range module.Customs {
+ if err := writeCustomSection(w, custom); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// WriteCodeEntry writes a binary encoded representation of entry to w.
+func WriteCodeEntry(w io.Writer, entry *module.CodeEntry) error {
+
+ if err := leb128.WriteVarUint32(w, uint32(len(entry.Func.Locals))); err != nil {
+ return err
+ }
+
+ for _, local := range entry.Func.Locals {
+
+ if err := leb128.WriteVarUint32(w, local.Count); err != nil {
+ return err
+ }
+
+ if err := writeValueType(w, local.Type); err != nil {
+ return err
+ }
+ }
+
+ return writeInstructions(w, entry.Func.Expr.Instrs)
+}
+
+func writeMagic(w io.Writer) error {
+ return binary.Write(w, binary.LittleEndian, constant.Magic)
+}
+
+func writeVersion(w io.Writer) error {
+ return binary.Write(w, binary.LittleEndian, constant.Version)
+}
+
+func writeTypeSection(w io.Writer, s module.TypeSection) error {
+
+ if len(s.Functions) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.TypeSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Functions))); err != nil {
+ return err
+ }
+
+ for _, fsig := range s.Functions {
+ if err := writeFunctionType(&buf, fsig); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeImportSection(w io.Writer, s module.ImportSection) error {
+
+ if len(s.Imports) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.ImportSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Imports))); err != nil {
+ return err
+ }
+
+ for _, imp := range s.Imports {
+ if err := writeImport(&buf, imp); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeGlobalSection(w io.Writer, s module.GlobalSection) error {
+
+ if len(s.Globals) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.GlobalSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Globals))); err != nil {
+ return err
+ }
+
+ for _, global := range s.Globals {
+ if err := writeGlobal(&buf, global); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeFunctionSection(w io.Writer, s module.FunctionSection) error {
+
+ if len(s.TypeIndices) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.FunctionSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.TypeIndices))); err != nil {
+ return err
+ }
+
+ for _, idx := range s.TypeIndices {
+ if err := leb128.WriteVarUint32(&buf, uint32(idx)); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeTableSection(w io.Writer, s module.TableSection) error {
+
+ if len(s.Tables) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.TableSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Tables))); err != nil {
+ return err
+ }
+
+ for _, table := range s.Tables {
+ switch table.Type {
+ case types.Anyfunc:
+ if err := writeByte(&buf, constant.ElementTypeAnyFunc); err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("illegal table element type")
+ }
+ if err := writeLimits(&buf, table.Lim); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+
+}
+
+func writeExportSection(w io.Writer, s module.ExportSection) error {
+
+ if len(s.Exports) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.ExportSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Exports))); err != nil {
+ return err
+ }
+
+ for _, exp := range s.Exports {
+ if err := writeByteVector(&buf, []byte(exp.Name)); err != nil {
+ return err
+ }
+ var tpe byte
+ switch exp.Descriptor.Type {
+ case module.FunctionExportType:
+ tpe = constant.ExportDescType
+ case module.TableExportType:
+ tpe = constant.ExportDescTable
+ case module.MemoryExportType:
+ tpe = constant.ExportDescMem
+ case module.GlobalExportType:
+ tpe = constant.ExportDescGlobal
+ default:
+ return fmt.Errorf("illegal export descriptor type 0x%x", exp.Descriptor.Type)
+ }
+ if err := writeByte(&buf, tpe); err != nil {
+ return err
+ }
+ if err := leb128.WriteVarUint32(&buf, exp.Descriptor.Index); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeElementSection(w io.Writer, s module.ElementSection) error {
+
+ if len(s.Segments) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.ElementSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Segments))); err != nil {
+ return err
+ }
+
+ for _, seg := range s.Segments {
+ if err := leb128.WriteVarUint32(&buf, seg.Index); err != nil {
+ return err
+ }
+ if err := writeInstructions(&buf, seg.Offset.Instrs); err != nil {
+ return err
+ }
+ if err := writeVarUint32Vector(&buf, seg.Indices); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeRawCodeSection(w io.Writer, s module.RawCodeSection) error {
+
+ if len(s.Segments) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.CodeSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Segments))); err != nil {
+ return err
+ }
+
+ for _, seg := range s.Segments {
+ if err := leb128.WriteVarUint32(&buf, uint32(len(seg.Code))); err != nil {
+ return err
+ }
+ if _, err := buf.Write(seg.Code); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeDataSection(w io.Writer, s module.DataSection) error {
+
+ if len(s.Segments) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.DataSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+
+ if err := leb128.WriteVarUint32(&buf, uint32(len(s.Segments))); err != nil {
+ return err
+ }
+
+ for _, seg := range s.Segments {
+ if err := leb128.WriteVarUint32(&buf, seg.Index); err != nil {
+ return err
+ }
+ if err := writeInstructions(&buf, seg.Offset.Instrs); err != nil {
+ return err
+ }
+ if err := writeByteVector(&buf, seg.Init); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeNameSection(w io.Writer, s module.NameSection) error {
+ if s.Module == "" && len(s.Functions) == 0 && len(s.Locals) == 0 {
+ return nil
+ }
+
+ if err := writeByte(w, constant.CustomSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+ if err := writeByteVector(&buf, []byte(constant.NameSectionCustomID)); err != nil {
+ return err
+ }
+
+ // "module" subsection
+ if s.Module != "" {
+ if err := writeByte(&buf, constant.NameSectionModuleType); err != nil {
+ return err
+ }
+ var mbuf bytes.Buffer
+ if err := writeByteVector(&mbuf, []byte(s.Module)); err != nil {
+ return err
+ }
+ if err := writeRawSection(&buf, &mbuf); err != nil {
+ return err
+ }
+ }
+
+ // "functions" subsection
+ if len(s.Functions) != 0 {
+ if err := writeByte(&buf, constant.NameSectionFunctionsType); err != nil {
+ return err
+ }
+
+ var fbuf bytes.Buffer
+ if err := writeNameMap(&fbuf, s.Functions); err != nil {
+ return err
+ }
+ if err := writeRawSection(&buf, &fbuf); err != nil {
+ return err
+ }
+ }
+
+ // "locals" subsection
+ if len(s.Locals) != 0 {
+ if err := writeByte(&buf, constant.NameSectionLocalsType); err != nil {
+ return err
+ }
+ funs := map[uint32][]module.NameMap{}
+ for _, e := range s.Locals {
+ funs[e.FuncIndex] = append(funs[e.FuncIndex], module.NameMap{Index: e.Index, Name: e.Name})
+ }
+ var lbuf bytes.Buffer
+ if err := leb128.WriteVarUint32(&lbuf, uint32(len(funs))); err != nil {
+ return err
+ }
+ for fidx, e := range funs {
+ if err := leb128.WriteVarUint32(&lbuf, fidx); err != nil {
+ return err
+ }
+ if err := writeNameMap(&lbuf, e); err != nil {
+ return err
+ }
+ }
+ if err := writeRawSection(&buf, &lbuf); err != nil {
+ return err
+ }
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeNameMap(buf io.Writer, nm []module.NameMap) error {
+ if err := leb128.WriteVarUint32(buf, uint32(len(nm))); err != nil {
+ return err
+ }
+ for _, m := range nm {
+ if err := leb128.WriteVarUint32(buf, m.Index); err != nil {
+ return err
+ }
+ if err := writeByteVector(buf, []byte(m.Name)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCustomSection(w io.Writer, s module.CustomSection) error {
+
+ if err := writeByte(w, constant.CustomSectionID); err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+ if err := writeByteVector(&buf, []byte(s.Name)); err != nil {
+ return err
+ }
+
+ if _, err := io.Copy(&buf, bytes.NewReader(s.Data)); err != nil {
+ return err
+ }
+
+ return writeRawSection(w, &buf)
+}
+
+func writeFunctionType(w io.Writer, fsig module.FunctionType) error {
+
+ if err := writeByte(w, constant.FunctionTypeID); err != nil {
+ return err
+ }
+
+ if err := writeValueTypeVector(w, fsig.Params); err != nil {
+ return err
+ }
+
+ return writeValueTypeVector(w, fsig.Results)
+}
+
+func writeImport(w io.Writer, imp module.Import) error {
+
+ if err := writeByteVector(w, []byte(imp.Module)); err != nil {
+ return err
+ }
+
+ if err := writeByteVector(w, []byte(imp.Name)); err != nil {
+ return err
+ }
+
+ switch desc := imp.Descriptor.(type) {
+ case module.FunctionImport:
+ if err := writeByte(w, constant.ImportDescType); err != nil {
+ return err
+ }
+ return leb128.WriteVarUint32(w, desc.Func)
+ case module.TableImport:
+ if err := writeByte(w, constant.ImportDescTable); err != nil {
+ return err
+ }
+ if err := writeByte(w, constant.ElementTypeAnyFunc); err != nil {
+ return err
+ }
+ return writeLimits(w, desc.Lim)
+ case module.MemoryImport:
+ if err := writeByte(w, constant.ImportDescMem); err != nil {
+ return err
+ }
+ return writeLimits(w, desc.Mem.Lim)
+ case module.GlobalImport:
+ if err := writeByte(w, constant.ImportDescGlobal); err != nil {
+ return err
+ }
+ if err := writeValueType(w, desc.Type); err != nil {
+ return err
+ }
+ if desc.Mutable {
+ return writeByte(w, constant.Mutable)
+ }
+ return writeByte(w, constant.Const)
+ default:
+ return fmt.Errorf("illegal import descriptor type")
+ }
+}
+
+func writeGlobal(w io.Writer, global module.Global) error {
+
+ if err := writeValueType(w, global.Type); err != nil {
+ return err
+ }
+
+ var err error
+
+ if global.Mutable {
+ err = writeByte(w, constant.Mutable)
+ } else {
+ err = writeByte(w, constant.Const)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if err := writeInstructions(w, global.Init.Instrs); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func writeInstructions(w io.Writer, instrs []instruction.Instruction) error {
+
+ for i, instr := range instrs {
+
+ _, err := w.Write([]byte{byte(instr.Op())})
+ if err != nil {
+ return err
+ }
+
+ for _, arg := range instr.ImmediateArgs() {
+ var err error
+ switch arg := arg.(type) {
+ case int32:
+ err = leb128.WriteVarInt32(w, arg)
+ case int64:
+ err = leb128.WriteVarInt64(w, arg)
+ case uint32:
+ err = leb128.WriteVarUint32(w, arg)
+ case uint64:
+ err = leb128.WriteVarUint64(w, arg)
+ case float32:
+ u32 := math.Float32bits(arg)
+ err = binary.Write(w, binary.LittleEndian, u32)
+ case float64:
+ u64 := math.Float64bits(arg)
+ err = binary.Write(w, binary.LittleEndian, u64)
+ default:
+ return fmt.Errorf("illegal immediate argument type on instruction %d", i)
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ if si, ok := instr.(instruction.StructuredInstruction); ok {
+ if err := writeBlockValueType(w, si.BlockType()); err != nil {
+ return err
+ }
+ if err := writeInstructions(w, si.Instructions()); err != nil {
+ return err
+ }
+ }
+
+ }
+
+ _, err := w.Write([]byte{byte(opcode.End)})
+ return err
+}
+
+func writeLimits(w io.Writer, lim module.Limit) error {
+ if lim.Max == nil {
+ if err := writeByte(w, 0); err != nil {
+ return err
+ }
+ } else {
+ if err := writeByte(w, 1); err != nil {
+ return err
+ }
+ }
+ if err := leb128.WriteVarUint32(w, lim.Min); err != nil {
+ return err
+ }
+ if lim.Max != nil {
+ return leb128.WriteVarUint32(w, *lim.Max)
+ }
+ return nil
+}
+
+func writeVarUint32Vector(w io.Writer, v []uint32) error {
+
+ if err := leb128.WriteVarUint32(w, uint32(len(v))); err != nil {
+ return err
+ }
+
+ for i := range v {
+ if err := leb128.WriteVarUint32(w, v[i]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writeValueTypeVector(w io.Writer, v []types.ValueType) error {
+
+ if err := leb128.WriteVarUint32(w, uint32(len(v))); err != nil {
+ return err
+ }
+
+ for i := range v {
+ if err := writeValueType(w, v[i]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writeBlockValueType(w io.Writer, v *types.ValueType) error {
+ var b byte
+ if v != nil {
+ switch *v {
+ case types.I32:
+ b = constant.ValueTypeI32
+ case types.I64:
+ b = constant.ValueTypeI64
+ case types.F32:
+ b = constant.ValueTypeF32
+ case types.F64:
+ b = constant.ValueTypeF64
+ }
+ } else {
+ b = constant.BlockTypeEmpty
+ }
+ return writeByte(w, b)
+}
+
+func writeValueType(w io.Writer, v types.ValueType) error {
+ var b byte
+ switch v {
+ case types.I32:
+ b = constant.ValueTypeI32
+ case types.I64:
+ b = constant.ValueTypeI64
+ case types.F32:
+ b = constant.ValueTypeF32
+ case types.F64:
+ b = constant.ValueTypeF64
+ }
+ return writeByte(w, b)
+}
+
+func writeRawSection(w io.Writer, buf *bytes.Buffer) error {
+
+ size := buf.Len()
+
+ if err := leb128.WriteVarUint32(w, uint32(size)); err != nil {
+ return err
+ }
+
+ _, err := io.Copy(w, buf)
+ return err
+}
+
+func writeByteVector(w io.Writer, bs []byte) error {
+
+ if err := leb128.WriteVarUint32(w, uint32(len(bs))); err != nil {
+ return err
+ }
+
+ _, err := w.Write(bs)
+ return err
+}
+
+func writeByte(w io.Writer, b byte) error {
+ buf := make([]byte, 1)
+ buf[0] = b
+ _, err := w.Write(buf)
+ return err
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go
new file mode 100644
index 00000000..51567153
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/control.go
@@ -0,0 +1,139 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package instruction
+
+import (
+ "github.com/open-policy-agent/opa/internal/wasm/opcode"
+ "github.com/open-policy-agent/opa/internal/wasm/types"
+)
+
+// Unreachable reprsents an unreachable opcode.
+type Unreachable struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (Unreachable) Op() opcode.Opcode {
+ return opcode.Unreachable
+}
+
+// Nop represents a WASM no-op instruction.
+type Nop struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (Nop) Op() opcode.Opcode {
+ return opcode.Nop
+}
+
+// Block represents a WASM block instruction.
+type Block struct {
+ NoImmediateArgs
+ Type *types.ValueType
+ Instrs []Instruction
+}
+
+// Op returns the opcode of the instruction
+func (Block) Op() opcode.Opcode {
+ return opcode.Block
+}
+
+// BlockType returns the type of the block's return value.
+func (i Block) BlockType() *types.ValueType {
+ return i.Type
+}
+
+// Instructions returns the instructions contained in the block.
+func (i Block) Instructions() []Instruction {
+ return i.Instrs
+}
+
+// Loop represents a WASM loop instruction.
+type Loop struct {
+ NoImmediateArgs
+ Type *types.ValueType
+ Instrs []Instruction
+}
+
+// Op returns the opcode of the instruction.
+func (Loop) Op() opcode.Opcode {
+ return opcode.Loop
+}
+
+// BlockType returns the type of the loop's return value.
+func (i Loop) BlockType() *types.ValueType {
+ return i.Type
+}
+
+// Instructions represents the instructions contained in the loop.
+func (i Loop) Instructions() []Instruction {
+ return i.Instrs
+}
+
+// Br represents a WASM br instruction.
+type Br struct {
+ Index uint32
+}
+
+// Op returns the opcode of the instruction.
+func (Br) Op() opcode.Opcode {
+ return opcode.Br
+}
+
+// ImmediateArgs returns the block index to break to.
+func (i Br) ImmediateArgs() []interface{} {
+ return []interface{}{i.Index}
+}
+
+// BrIf represents a WASM br_if instruction.
+type BrIf struct {
+ Index uint32
+}
+
+// Op returns the opcode of the instruction.
+func (BrIf) Op() opcode.Opcode {
+ return opcode.BrIf
+}
+
+// ImmediateArgs returns the block index to break to.
+func (i BrIf) ImmediateArgs() []interface{} {
+ return []interface{}{i.Index}
+}
+
+// Call represents a WASM call instruction.
+type Call struct {
+ Index uint32
+}
+
+// Op returns the opcode of the instruction.
+func (Call) Op() opcode.Opcode {
+ return opcode.Call
+}
+
+// ImmediateArgs returns the function index.
+func (i Call) ImmediateArgs() []interface{} {
+ return []interface{}{i.Index}
+}
+
+// Return represents a WASM return instruction.
+type Return struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (Return) Op() opcode.Opcode {
+ return opcode.Return
+}
+
+// End represents the special WASM end instruction.
+type End struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (End) Op() opcode.Opcode {
+ return opcode.End
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go
new file mode 100644
index 00000000..066be77c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/instruction.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package instruction defines WASM instruction types.
+package instruction
+
+import (
+ "github.com/open-policy-agent/opa/internal/wasm/opcode"
+ "github.com/open-policy-agent/opa/internal/wasm/types"
+)
+
+// NoImmediateArgs indicates the instruction has no immediate arguments.
+type NoImmediateArgs struct {
+}
+
+// ImmediateArgs returns the immedate arguments of an instruction.
+func (NoImmediateArgs) ImmediateArgs() []interface{} {
+ return nil
+}
+
+// Instruction represents a single WASM instruction.
+type Instruction interface {
+ Op() opcode.Opcode
+ ImmediateArgs() []interface{}
+}
+
+// StructuredInstruction represents a structured control instruction like br_if.
+type StructuredInstruction interface {
+ Instruction
+ BlockType() *types.ValueType
+ Instructions() []Instruction
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go
new file mode 100644
index 00000000..c449cb1b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/memory.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package instruction
+
+import "github.com/open-policy-agent/opa/internal/wasm/opcode"
+
+// I32Load represents the WASM i32.load instruction.
+type I32Load struct {
+ Offset int32
+ Align int32 // expressed as a power of two
+}
+
+// Op returns the opcode of the instruction.
+func (I32Load) Op() opcode.Opcode {
+ return opcode.I32Load
+}
+
+// ImmediateArgs returns the static offset and alignment operands.
+func (i I32Load) ImmediateArgs() []interface{} {
+ return []interface{}{i.Align, i.Offset}
+}
+
+// I32Store represents the WASM i32.store instruction.
+type I32Store struct {
+ Offset int32
+ Align int32 // expressed as a power of two
+}
+
+// Op returns the opcode of the instruction.
+func (I32Store) Op() opcode.Opcode {
+ return opcode.I32Store
+}
+
+// ImmediateArgs returns the static offset and alignment operands.
+func (i I32Store) ImmediateArgs() []interface{} {
+ return []interface{}{i.Align, i.Offset}
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go
new file mode 100644
index 00000000..f1acb31f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/numeric.go
@@ -0,0 +1,139 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package instruction
+
+import (
+ "github.com/open-policy-agent/opa/internal/wasm/opcode"
+)
+
+// I32Const represents the WASM i32.const instruction.
+type I32Const struct {
+ Value int32
+}
+
+// Op returns the opcode of the instruction.
+func (I32Const) Op() opcode.Opcode {
+ return opcode.I32Const
+}
+
+// ImmediateArgs returns the i32 value to push onto the stack.
+func (i I32Const) ImmediateArgs() []interface{} {
+ return []interface{}{i.Value}
+}
+
+// I64Const represents the WASM i64.const instruction.
+type I64Const struct {
+ Value int64
+}
+
+// Op returns the opcode of the instruction.
+func (I64Const) Op() opcode.Opcode {
+ return opcode.I64Const
+}
+
+// ImmediateArgs returns the i64 value to push onto the stack.
+func (i I64Const) ImmediateArgs() []interface{} {
+ return []interface{}{i.Value}
+}
+
+// F32Const represents the WASM f32.const instruction.
+type F32Const struct {
+ Value int32
+}
+
+// Op returns the opcode of the instruction.
+func (F32Const) Op() opcode.Opcode {
+ return opcode.F32Const
+}
+
+// ImmediateArgs returns the f32 value to push onto the stack.
+func (i F32Const) ImmediateArgs() []interface{} {
+ return []interface{}{i.Value}
+}
+
+// F64Const represents the WASM f64.const instruction.
+type F64Const struct {
+ Value float64
+}
+
+// Op returns the opcode of the instruction.
+func (F64Const) Op() opcode.Opcode {
+ return opcode.F64Const
+}
+
+// ImmediateArgs returns the f64 value to push onto the stack.
+func (i F64Const) ImmediateArgs() []interface{} {
+ return []interface{}{i.Value}
+}
+
+// I32Eqz represents the WASM i32.eqz instruction.
+type I32Eqz struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (I32Eqz) Op() opcode.Opcode {
+ return opcode.I32Eqz
+}
+
+// I32Eq represents the WASM i32.eq instruction.
+type I32Eq struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (I32Eq) Op() opcode.Opcode {
+ return opcode.I32Eq
+}
+
+// I32Ne represents the WASM i32.ne instruction.
+type I32Ne struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (I32Ne) Op() opcode.Opcode {
+ return opcode.I32Ne
+}
+
+// I32GtS represents the WASM i32.gt_s instruction.
+type I32GtS struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (I32GtS) Op() opcode.Opcode {
+ return opcode.I32GtS
+}
+
+// I32GeS represents the WASM i32.ge_s instruction.
+type I32GeS struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (I32GeS) Op() opcode.Opcode {
+ return opcode.I32GeS
+}
+
+// I32LtS represents the WASM i32.lt_s instruction.
+type I32LtS struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (I32LtS) Op() opcode.Opcode {
+ return opcode.I32LtS
+}
+
+// I32LeS represents the WASM i32.le_s instruction.
+type I32LeS struct {
+ NoImmediateArgs
+}
+
+// Op returns the opcode of the instruction.
+func (I32LeS) Op() opcode.Opcode {
+ return opcode.I32LeS
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go
new file mode 100644
index 00000000..063ffdb9
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/instruction/variable.go
@@ -0,0 +1,54 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package instruction
+
+import "github.com/open-policy-agent/opa/internal/wasm/opcode"
+
+// GetLocal represents the WASM get_local instruction.
+type GetLocal struct {
+ Index uint32
+}
+
+// Op returns the opcode of the instruction.
+func (GetLocal) Op() opcode.Opcode {
+ return opcode.GetLocal
+}
+
+// ImmediateArgs returns the index of the local variable to push onto the stack.
+func (i GetLocal) ImmediateArgs() []interface{} {
+ return []interface{}{i.Index}
+}
+
+// SetLocal represents the WASM set_local instruction.
+type SetLocal struct {
+ Index uint32
+}
+
+// Op returns the opcode of the instruction.
+func (SetLocal) Op() opcode.Opcode {
+ return opcode.SetLocal
+}
+
+// ImmediateArgs returns the index of the local variable to set with the top of
+// the stack.
+func (i SetLocal) ImmediateArgs() []interface{} {
+ return []interface{}{i.Index}
+}
+
+// TeeLocal represents the WASM tee_local instruction.
+type TeeLocal struct {
+ Index uint32
+}
+
+// Op returns the opcode of the instruction.
+func (TeeLocal) Op() opcode.Opcode {
+ return opcode.TeeLocal
+}
+
+// ImmediateArgs returns the index of the local variable to "tee" with the top of
+// the stack (like set, but retaining the top of the stack).
+func (i TeeLocal) ImmediateArgs() []interface{} {
+ return []interface{}{i.Index}
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go
new file mode 100644
index 00000000..6da4350e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/module.go
@@ -0,0 +1,368 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package module
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/open-policy-agent/opa/internal/wasm/instruction"
+ "github.com/open-policy-agent/opa/internal/wasm/types"
+)
+
+type (
+ // Module represents a WASM module.
+ Module struct {
+ Version uint32
+ Type TypeSection
+ Import ImportSection
+ Function FunctionSection
+ Table TableSection
+ Element ElementSection
+ Global GlobalSection
+ Export ExportSection
+ Code RawCodeSection
+ Data DataSection
+ Customs []CustomSection
+ Names NameSection
+ }
+
+ // TypeSection represents a WASM type section.
+ TypeSection struct {
+ Functions []FunctionType
+ }
+
+ // ImportSection represents a WASM import section.
+ ImportSection struct {
+ Imports []Import
+ }
+
+ // FunctionSection represents a WASM function section.
+ FunctionSection struct {
+ TypeIndices []uint32
+ }
+
+ // TableSection represents a WASM table section.
+ TableSection struct {
+ Tables []Table
+ }
+
+ // ElementSection represents a WASM element section.
+ ElementSection struct {
+ Segments []ElementSegment
+ }
+
+ // GlobalSection represents a WASM global section.
+ GlobalSection struct {
+ Globals []Global
+ }
+
+ // ExportSection represents a WASM export section.
+ ExportSection struct {
+ Exports []Export
+ }
+
+ // RawCodeSection represents a WASM code section. The code section is left as a
+ // raw byte sequence.
+ RawCodeSection struct {
+ Segments []RawCodeSegment
+ }
+
+ // DataSection represents a WASM data section.
+ DataSection struct {
+ Segments []DataSegment
+ }
+
+ // CustomSection represents a WASM custom section.
+ CustomSection struct {
+ Name string
+ Data []byte
+ }
+
+ // NameSection represents the WASM custom section "name".
+ NameSection struct {
+ Module string
+ Functions []NameMap
+ Locals []LocalNameMap
+ }
+
+ // NameMap maps function or local arg indices to their names.
+ NameMap struct {
+ Index uint32
+ Name string
+ }
+
+ // LocalNameMap maps function indices, and argument indices for the args
+ // of the indexed function to their names.
+ LocalNameMap struct {
+ FuncIndex uint32
+ NameMap
+ }
+
+ // FunctionType represents a WASM function type definition.
+ FunctionType struct {
+ Params []types.ValueType
+ Results []types.ValueType
+ }
+
+ // Import represents a WASM import statement.
+ Import struct {
+ Module string
+ Name string
+ Descriptor ImportDescriptor
+ }
+
+ // ImportDescriptor represents a WASM import descriptor.
+ ImportDescriptor interface {
+ fmt.Stringer
+ Kind() ImportDescriptorType
+ }
+
+ // ImportDescriptorType defines allowed kinds of import descriptors.
+ ImportDescriptorType int
+
+ // FunctionImport represents a WASM function import statement.
+ FunctionImport struct {
+ Func uint32
+ }
+
+ // MemoryImport represents a WASM memory import statement.
+ MemoryImport struct {
+ Mem MemType
+ }
+
+ // MemType defines the attributes of a memory import.
+ MemType struct {
+ Lim Limit
+ }
+
+ // TableImport represents a WASM table import statement.
+ TableImport struct {
+ Type types.ElementType
+ Lim Limit
+ }
+
+ // ElementSegment represents a WASM element segment.
+ ElementSegment struct {
+ Index uint32
+ Offset Expr
+ Indices []uint32
+ }
+
+ // GlobalImport represents a WASM global variable import statement.
+ GlobalImport struct {
+ Type types.ValueType
+ Mutable bool
+ }
+
+ // Limit represents a WASM limit.
+ Limit struct {
+ Min uint32
+ Max *uint32
+ }
+
+ // Table represents a WASM table statement.
+ Table struct {
+ Type types.ElementType
+ Lim Limit
+ }
+
+ // Global represents a WASM global statement.
+ Global struct {
+ Type types.ValueType
+ Mutable bool
+ Init Expr
+ }
+
+ // Export represents a WASM export statement.
+ Export struct {
+ Name string
+ Descriptor ExportDescriptor
+ }
+
+ // ExportDescriptor represents a WASM export descriptor.
+ ExportDescriptor struct {
+ Type ExportDescriptorType
+ Index uint32
+ }
+
+ // ExportDescriptorType defines the allowed kinds of export descriptors.
+ ExportDescriptorType int
+
+ // RawCodeSegment represents a binary-encoded WASM code segment.
+ RawCodeSegment struct {
+ Code []byte
+ }
+
+ // DataSegment represents a WASM data segment.
+ DataSegment struct {
+ Index uint32
+ Offset Expr
+ Init []byte
+ }
+
+ // Expr represents a WASM expression.
+ Expr struct {
+ Instrs []instruction.Instruction
+ }
+
+ // CodeEntry represents a code segment entry.
+ CodeEntry struct {
+ Func Function
+ }
+
+ // Function represents a function in a code segment.
+ Function struct {
+ Locals []LocalDeclaration
+ Expr Expr
+ }
+
+ // LocalDeclaration represents a local variable declaration.
+ LocalDeclaration struct {
+ Count uint32
+ Type types.ValueType
+ }
+)
+
+// Defines the allowed kinds of imports.
+const (
+ FunctionImportType ImportDescriptorType = iota
+ TableImportType
+ MemoryImportType
+ GlobalImportType
+)
+
+func (x ImportDescriptorType) String() string {
+ switch x {
+ case FunctionImportType:
+ return "func"
+ case TableImportType:
+ return "table"
+ case MemoryImportType:
+ return "memory"
+ case GlobalImportType:
+ return "global"
+ }
+ panic("illegal value")
+}
+
+// Defines the allowed kinds of exports.
+const (
+ FunctionExportType ExportDescriptorType = iota
+ TableExportType
+ MemoryExportType
+ GlobalExportType
+)
+
+func (x ExportDescriptorType) String() string {
+ switch x {
+ case FunctionExportType:
+ return "func"
+ case TableExportType:
+ return "table"
+ case MemoryExportType:
+ return "memory"
+ case GlobalExportType:
+ return "global"
+ }
+ panic("illegal value")
+}
+
+// Kind returns the function import type kind.
+func (i FunctionImport) Kind() ImportDescriptorType {
+ return FunctionImportType
+}
+
+func (i FunctionImport) String() string {
+ return fmt.Sprintf("%v[type=%v]", i.Kind(), i.Func)
+}
+
+// Kind returns the memory import type kind.
+func (i MemoryImport) Kind() ImportDescriptorType {
+ return MemoryImportType
+}
+
+func (i MemoryImport) String() string {
+ return fmt.Sprintf("%v[%v]", i.Kind(), i.Mem.Lim)
+}
+
+// Kind returns the table import type kind.
+func (i TableImport) Kind() ImportDescriptorType {
+ return TableImportType
+}
+
+func (i TableImport) String() string {
+ return fmt.Sprintf("%v[%v, %v]", i.Kind(), i.Type, i.Lim)
+}
+
+// Kind returns the global import type kind.
+func (i GlobalImport) Kind() ImportDescriptorType {
+ return GlobalImportType
+}
+
+func (i GlobalImport) String() string {
+ return fmt.Sprintf("%v[%v, mut=%v]", i.Kind(), i.Type, i.Mutable)
+}
+
+func (tpe FunctionType) String() string {
+ params := make([]string, len(tpe.Params))
+ results := make([]string, len(tpe.Results))
+ for i := range tpe.Params {
+ params[i] = tpe.Params[i].String()
+ }
+ for i := range tpe.Results {
+ results[i] = tpe.Results[i].String()
+ }
+ return "(" + strings.Join(params, ", ") + ") -> (" + strings.Join(results, ", ") + ")"
+}
+
+// Equal returns true if tpe equals other.
+func (tpe FunctionType) Equal(other FunctionType) bool {
+
+ if len(tpe.Params) != len(other.Params) || len(tpe.Results) != len(other.Results) {
+ return false
+ }
+
+ for i := range tpe.Params {
+ if tpe.Params[i] != other.Params[i] {
+ return false
+ }
+ }
+
+ for i := range tpe.Results {
+ if tpe.Results[i] != other.Results[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (imp Import) String() string {
+ return fmt.Sprintf("%v %v.%v", imp.Descriptor.String(), imp.Module, imp.Name)
+}
+
+func (exp Export) String() string {
+ return fmt.Sprintf("%v[%v] %v", exp.Descriptor.Type, exp.Descriptor.Index, exp.Name)
+}
+
+func (seg RawCodeSegment) String() string {
+ return fmt.Sprintf("", len(seg.Code))
+}
+
+func (seg DataSegment) String() string {
+ return fmt.Sprintf("", seg.Index, seg.Offset, len(seg.Init))
+}
+
+func (e Expr) String() string {
+ return fmt.Sprintf("%d instr(s)", len(e.Instrs))
+}
+
+func (lim Limit) String() string {
+ if lim.Max == nil {
+ return fmt.Sprintf("min=%v", lim.Min)
+ }
+ return fmt.Sprintf("min=%v max=%v", lim.Min, lim.Max)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/module/pretty.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/pretty.go
new file mode 100644
index 00000000..2b28ad85
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/module/pretty.go
@@ -0,0 +1,84 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package module
+
+import (
+ "encoding/hex"
+ "fmt"
+ "io"
+)
+
+// PrettyOption defines options for controlling pretty printing.
+type PrettyOption struct {
+ Contents bool // show raw byte content of data+code sections.
+}
+
+// Pretty writes a human-readable representation of m to w.
+func Pretty(w io.Writer, m *Module, opts ...PrettyOption) {
+ fmt.Fprintln(w, "version:", m.Version)
+ fmt.Fprintln(w, "types:")
+ for _, fn := range m.Type.Functions {
+ fmt.Fprintln(w, " -", fn)
+ }
+ fmt.Fprintln(w, "imports:")
+ for i, imp := range m.Import.Imports {
+ if imp.Descriptor.Kind() == FunctionImportType {
+ fmt.Printf(" - [%d] %v\n", i, imp)
+ } else {
+ fmt.Fprintln(w, " -", imp)
+ }
+ }
+ fmt.Fprintln(w, "functions:")
+ for _, fn := range m.Function.TypeIndices {
+ if fn >= uint32(len(m.Type.Functions)) {
+ fmt.Fprintln(w, " -", "???")
+ } else {
+ fmt.Fprintln(w, " -", m.Type.Functions[fn])
+ }
+ }
+ fmt.Fprintln(w, "exports:")
+ for _, exp := range m.Export.Exports {
+ fmt.Fprintln(w, " -", exp)
+ }
+ fmt.Fprintln(w, "code:")
+ for _, seg := range m.Code.Segments {
+ fmt.Fprintln(w, " -", seg)
+ }
+ fmt.Fprintln(w, "data:")
+ for _, seg := range m.Data.Segments {
+ fmt.Fprintln(w, " -", seg)
+ }
+ if len(opts) == 0 {
+ return
+ }
+ fmt.Fprintln(w)
+ for _, opt := range opts {
+ if opt.Contents {
+ newline := false
+ if len(m.Data.Segments) > 0 {
+ fmt.Fprintln(w, "data section:")
+ for _, seg := range m.Data.Segments {
+ if newline {
+ fmt.Fprintln(w)
+ }
+ fmt.Fprintln(w, hex.Dump(seg.Init))
+ newline = true
+ }
+ newline = false
+ }
+ if len(m.Code.Segments) > 0 {
+ fmt.Fprintln(w, "code section:")
+ for _, seg := range m.Code.Segments {
+ if newline {
+ fmt.Fprintln(w)
+ }
+ fmt.Fprintln(w, hex.Dump(seg.Code))
+ newline = true
+ }
+ newline = false
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/opcode/opcode.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/opcode/opcode.go
new file mode 100644
index 00000000..7d35a301
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/opcode/opcode.go
@@ -0,0 +1,218 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package opcode contains constants and utilities for working with WASM opcodes.
+package opcode
+
+// Opcode represents a WASM instruction opcode.
+type Opcode byte
+
+// Control instructions.
+const (
+ Unreachable Opcode = iota
+ Nop
+ Block
+ Loop
+ If
+ Else
+)
+
+const (
+ // End defines the special end WASM opcode.
+ End Opcode = 0x0B
+)
+
+// Extended control instructions.
+const (
+ Br Opcode = iota + 0x0C
+ BrIf
+ BrTable
+ Return
+ Call
+ CallIndirect
+)
+
+// Parameter instructions.
+const (
+ Drop Opcode = iota + 0x1A
+ Select
+)
+
+// Variable instructions.
+const (
+ GetLocal Opcode = iota + 0x20
+ SetLocal
+ TeeLocal
+ GetGlobal
+ SetGlobal
+)
+
+// Memory instructions.
+const (
+ I32Load Opcode = iota + 0x28
+ I64Load
+ F32Load
+ F64Load
+ I32Load8S
+ I32Load8U
+ I32Load16S
+ I32Load16U
+ I64Load8S
+ I64Load8U
+ I64Load16S
+ I64Load16U
+ I64Load32S
+ I64Load32U
+ I32Store
+ I64Store
+ F32Store
+ F64Store
+ I32Store8
+ I32Store16
+ I64Store8
+ I64Store16
+ I64Store32
+ MemorySize
+ MemoryGrow
+)
+
+// Numeric instructions.
+const (
+ I32Const Opcode = iota + 0x41
+ I64Const
+ F32Const
+ F64Const
+
+ I32Eqz
+ I32Eq
+ I32Ne
+ I32LtS
+ I32LtU
+ I32GtS
+ I32GtU
+ I32LeS
+ I32LeU
+ I32GeS
+ I32GeU
+
+ I64Eqz
+ I64Eq
+ I64Ne
+ I64LtS
+ I64LtU
+ I64GtS
+ I64GtU
+ I64LeS
+ I64LeU
+ I64GeS
+ I64GeU
+
+ F32Eq
+ F32Ne
+ F32Lt
+ F32Gt
+ F32Le
+ F32Ge
+
+ F64Eq
+ F64Ne
+ F64Lt
+ F64Gt
+ F64Le
+ F64Ge
+
+ I32Clz
+ I32Ctz
+ I32Popcnt
+ I32Add
+ I32Sub
+ I32Mul
+ I32DivS
+ I32DivU
+ I32RemS
+ I32RemU
+ I32And
+ I32Or
+ I32Xor
+ I32Shl
+ I32ShrS
+ I32ShrU
+ I32Rotl
+ I32Rotr
+
+ I64Clz
+ I64Ctz
+ I64Popcnt
+ I64Add
+ I64Sub
+ I64Mul
+ I64DivS
+ I64DivU
+ I64RemS
+ I64RemU
+ I64And
+ I64Or
+ I64Xor
+ I64Shl
+ I64ShrS
+ I64ShrU
+ I64Rotl
+ I64Rotr
+
+ F32Abs
+ F32Neg
+ F32Ceil
+ F32Floor
+ F32Trunc
+ F32Nearest
+ F32Sqrt
+ F32Add
+ F32Sub
+ F32Mul
+ F32Div
+ F32Min
+ F32Max
+ F32Copysign
+
+ F64Abs
+ F64Neg
+ F64Ceil
+ F64Floor
+ F64Trunc
+ F64Nearest
+ F64Sqrt
+ F64Add
+ F64Sub
+ F64Mul
+ F64Div
+ F64Min
+ F64Max
+ F64Copysign
+
+ I32WrapI64
+ I32TruncSF32
+ I32TruncUF32
+ I32TruncSF64
+ I32TruncUF64
+ I64ExtendSI32
+ I64ExtendUI32
+ I64TruncSF32
+ I64TruncUF32
+ I64TruncSF64
+ I64TruncUF64
+ F32ConvertSI32
+ F32ConvertUI32
+ F32ConvertSI64
+ F32ConvertUI64
+ F32DemoteF64
+ F64ConvertSI32
+ F64ConvertUI32
+ F64ConvertSI64
+ F64ConvertUI64
+ F64PromoteF32
+ I32ReinterpretF32
+ I64ReinterpretF64
+ F32ReinterpretI32
+ F64ReinterpretI64
+)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/bindings.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/bindings.go
new file mode 100644
index 00000000..522c8a11
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/bindings.go
@@ -0,0 +1,96 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package wasm
+
+// #include
+//
+// extern void opa_println(void *context, int32_t addr);
+// extern void opa_abort(void *context, int32_t addr);
+// extern int32_t opa_builtin0(void *context, int32_t builtin_id, int32_t ctx);
+// extern int32_t opa_builtin1(void *context, int32_t builtin_id, int32_t ctx, int32_t arg0);
+// extern int32_t opa_builtin2(void *context, int32_t builtin_id, int32_t ctx, int32_t arg0, int32_t arg1);
+// extern int32_t opa_builtin3(void *context, int32_t builtin_id, int32_t ctx, int32_t arg0, int32_t arg1, int32_t arg2);
+// extern int32_t opa_builtin4(void *context, int32_t builtin_id, int32_t ctx, int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3);
+import "C"
+
+import (
+ "unsafe"
+
+ wasm "github.com/wasmerio/go-ext-wasm/wasmer"
+)
+
+func opaFunctions(imports *wasm.Imports) (*wasm.Imports, error) {
+ imports, err := imports.AppendFunction("opa_println", opa_println, C.opa_println)
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err = imports.AppendFunction("opa_abort", opa_abort, C.opa_abort)
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err = imports.AppendFunction("opa_builtin0", opa_builtin0, C.opa_builtin0)
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err = imports.AppendFunction("opa_builtin1", opa_builtin1, C.opa_builtin1)
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err = imports.AppendFunction("opa_builtin2", opa_builtin2, C.opa_builtin2)
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err = imports.AppendFunction("opa_builtin3", opa_builtin3, C.opa_builtin3)
+ if err != nil {
+ return nil, err
+ }
+
+ return imports.AppendFunction("opa_builtin4", opa_builtin4, C.opa_builtin4)
+}
+
+//export opa_println
+func opa_println(ctx unsafe.Pointer, addr int32) {
+ getVM(ctx).Println(addr)
+}
+
+//export opa_abort
+func opa_abort(ctx unsafe.Pointer, addr int32) {
+ getVM(ctx).Abort(addr)
+}
+
+//export opa_builtin0
+func opa_builtin0(ctx unsafe.Pointer, builtinID, context int32) int32 {
+ return getVM(ctx).Builtin(builtinID, context)
+}
+
+//export opa_builtin1
+func opa_builtin1(ctx unsafe.Pointer, builtinID, context, arg0 int32) int32 {
+ return getVM(ctx).Builtin(builtinID, context, arg0)
+}
+
+//export opa_builtin2
+func opa_builtin2(ctx unsafe.Pointer, builtinID, context, arg0, arg1 int32) int32 {
+ return getVM(ctx).Builtin(builtinID, context, arg0, arg1)
+}
+
+//export opa_builtin3
+func opa_builtin3(ctx unsafe.Pointer, builtinID, context, arg0, arg1, arg2 int32) int32 {
+ return getVM(ctx).Builtin(builtinID, context, arg0, arg1, arg2)
+}
+
+//export opa_builtin4
+func opa_builtin4(ctx unsafe.Pointer, builtinID, context, arg0, arg1, arg2, arg3 int32) int32 {
+ return getVM(ctx).Builtin(builtinID, context, arg0, arg1, arg2, arg3)
+}
+
+func getVM(ctx unsafe.Pointer) *VM {
+ ictx := wasm.IntoInstanceContext(ctx)
+ return ictx.Data().(*VM)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/pool.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/pool.go
new file mode 100644
index 00000000..16b3fa05
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/pool.go
@@ -0,0 +1,367 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package wasm
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/errors"
+ "github.com/open-policy-agent/opa/metrics"
+)
+
+// Pool maintains a pool of WebAssemly VM instances.
+type Pool struct {
+ available chan struct{}
+ mutex sync.Mutex
+ dataMtx sync.Mutex
+ initialized bool
+ closed bool
+ policy []byte
+ parsedData []byte // Parsed parsedData memory segment, used to seed new VM's
+ parsedDataAddr int32 // Address for parsedData value root, used to seed new VM's
+ memoryMinPages uint32
+ memoryMaxPages uint32
+ vms []*VM // All current VM instances, acquired or not.
+ acquired []bool
+ pendingReinit *VM
+ blockedReinit chan struct{}
+}
+
+// NewPool constructs a new pool with the pool and VM configuration provided.
+func NewPool(poolSize, memoryMinPages, memoryMaxPages uint32) *Pool {
+ available := make(chan struct{}, poolSize)
+ for i := uint32(0); i < poolSize; i++ {
+ available <- struct{}{}
+ }
+
+ return &Pool{
+ memoryMinPages: memoryMinPages,
+ memoryMaxPages: memoryMaxPages,
+ available: available,
+ vms: make([]*VM, 0),
+ acquired: make([]bool, 0),
+ }
+}
+
+// ParsedData returns a reference to the pools parsed external data used to
+// initialize new VM's.
+func (p *Pool) ParsedData() (int32, []byte) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+ return p.parsedDataAddr, p.parsedData
+}
+
+// Policy returns the raw policy Wasm module used by VM's in the pool
+func (p *Pool) Policy() []byte {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+ return p.policy
+}
+
+// Size returns the current number of VM's in the pool
+func (p *Pool) Size() int {
+ return len(p.vms)
+}
+
+// Acquire obtains a VM from the pool, waiting if all VMms are in use
+// and building one as necessary. Returns either ErrNotReady or
+// ErrInternal if an error.
+func (p *Pool) Acquire(ctx context.Context, metrics metrics.Metrics) (*VM, error) {
+ metrics.Timer("wasm_pool_acquire").Start()
+ defer metrics.Timer("wasm_pool_acquire").Stop()
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-p.available:
+ }
+
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+
+ if !p.initialized || p.closed {
+ return nil, errors.ErrNotReady
+ }
+
+ for i, vm := range p.vms {
+ if !p.acquired[i] {
+ p.acquired[i] = true
+ return vm, nil
+ }
+ }
+
+ policy, parsedData, parsedDataAddr := p.policy, p.parsedData, p.parsedDataAddr
+
+ p.mutex.Unlock()
+ vm, err := newVM(vmOpts{
+ policy: policy,
+ data: nil,
+ parsedData: parsedData,
+ parsedDataAddr: parsedDataAddr,
+ memoryMin: p.memoryMinPages,
+ memoryMax: p.memoryMaxPages,
+ })
+ p.mutex.Lock()
+
+ if err != nil {
+ p.available <- struct{}{}
+ return nil, fmt.Errorf("%v: %w", err, errors.ErrInternal)
+ }
+
+ p.acquired = append(p.acquired, true)
+ p.vms = append(p.vms, vm)
+ return vm, nil
+}
+
+// Release releases the VM back to the pool.
+func (p *Pool) Release(vm *VM, metrics metrics.Metrics) {
+ metrics.Timer("wasm_pool_release").Start()
+ defer metrics.Timer("wasm_pool_release").Stop()
+
+ p.mutex.Lock()
+
+ // If the policy data setting is waiting for this one, don't release it back to the general consumption.
+ // Note the reinit is responsible for pushing to available channel once done with the VM.
+ if vm == p.pendingReinit {
+ p.mutex.Unlock()
+ p.blockedReinit <- struct{}{}
+ return
+ }
+
+ for i := range p.vms {
+ if p.vms[i] == vm {
+ p.acquired[i] = false
+ p.mutex.Unlock()
+ p.available <- struct{}{}
+ return
+ }
+ }
+
+ // VM instance not found anymore, hence pool reconfigured and can release the VM.
+
+ p.mutex.Unlock()
+ p.available <- struct{}{}
+
+ vm.Close()
+}
+
+// SetPolicyData re-initializes the vms within the pool with the new policy
+// and data. The re-initialization takes place atomically: all new vms
+// are constructed in advance before touching the pool. Returns
+// either ErrNotReady, ErrInvalidPolicy or ErrInternal if an error
+// occurs.
+func (p *Pool) SetPolicyData(policy []byte, data []byte) error {
+ p.dataMtx.Lock()
+ defer p.dataMtx.Unlock()
+
+ p.mutex.Lock()
+
+ if !p.initialized {
+ vm, err := newVM(vmOpts{
+ policy: policy,
+ data: data,
+ parsedData: nil,
+ parsedDataAddr: 0,
+ memoryMin: p.memoryMinPages,
+ memoryMax: p.memoryMaxPages,
+ })
+
+ if err == nil {
+ parsedDataAddr, parsedData := vm.cloneDataSegment()
+ p.memoryMinPages = Pages(vm.memory.Length())
+ p.vms = append(p.vms, vm)
+ p.acquired = append(p.acquired, false)
+ p.initialized = true
+ p.policy, p.parsedData, p.parsedDataAddr = policy, parsedData, parsedDataAddr
+ } else {
+ err = fmt.Errorf("%v: %w", err, errors.ErrInvalidPolicyOrData)
+ }
+
+ p.mutex.Unlock()
+ return err
+ }
+
+ if p.closed {
+ p.mutex.Unlock()
+ return errors.ErrNotReady
+ }
+
+ currentPolicy, currentData := p.policy, p.parsedData
+ p.mutex.Unlock()
+
+ if bytes.Equal(policy, currentPolicy) && bytes.Equal(data, currentData) {
+ return nil
+
+ }
+
+ err := p.setPolicyData(policy, data)
+ if err != nil {
+ return fmt.Errorf("%v: %w", err, errors.ErrInternal)
+ }
+
+ return nil
+}
+
+// SetDataPath will update the current data on the VMs by setting the value at the
+// specified path. If an error occurs the instance is still in a valid state, however
+// the data will not have been modified.
+func (p *Pool) SetDataPath(path []string, value interface{}) error {
+ p.dataMtx.Lock()
+ defer p.dataMtx.Unlock()
+ return p.updateVMs(func(vm *VM, opts vmOpts) error {
+ return vm.SetDataPath(path, value)
+ })
+}
+
+// RemoveDataPath will update the current data on the VMs by removing the value at the
+// specified path. If an error occurs the instance is still in a valid state, however
+// the data will not have been modified.
+func (p *Pool) RemoveDataPath(path []string) error {
+ p.dataMtx.Lock()
+ defer p.dataMtx.Unlock()
+ return p.updateVMs(func(vm *VM, _ vmOpts) error {
+ return vm.RemoveDataPath(path)
+ })
+}
+
+// setPolicyData reinitializes the VMs one at a time.
+func (p *Pool) setPolicyData(policy []byte, data []byte) error {
+ return p.updateVMs(func(vm *VM, opts vmOpts) error {
+ opts.policy = policy
+ opts.data = data
+ return vm.SetPolicyData(opts)
+ })
+}
+
+// updateVMs Iterates over each VM, waiting for each to safely acquire them,
+// and applies the update function. If the first update succeeds any subsequent
+// failures will remove the VM and continue through the pool. Otherwise an error
+// will be returned.
+func (p *Pool) updateVMs(update func(vm *VM, opts vmOpts) error) error {
+ var policy []byte
+ var parsedData []byte
+ var parsedDataAddr int32
+ seedMemorySize := p.memoryMinPages
+ activated := false
+ i := 0
+ for {
+ vm := p.Wait(i)
+ if vm == nil {
+ // All have been updated or removed.
+ return nil
+ }
+
+ err := update(vm, vmOpts{
+ policy: policy,
+ parsedData: parsedData,
+ parsedDataAddr: parsedDataAddr,
+ memoryMin: seedMemorySize,
+ memoryMax: p.memoryMaxPages, // The max pages cannot be changed while updating.
+ })
+
+ if err != nil {
+ // No guarantee about the VM state after an error; hence, remove.
+ p.remove(i)
+ p.Release(vm, metrics.New())
+
+ // After the first successful activation, proceed through all the VMs, ignoring the remaining errors.
+ if !activated {
+ return err
+ }
+ // Note: Do not increment i when it has been removed! That index is
+ // replaced by the last VM in the list so we must re-run with the
+ // same index.
+ } else {
+ if !activated {
+ // Activate the policy and data, now that a single VM has been reset without errors.
+ activated = true
+ policy = vm.policy
+ parsedDataAddr, parsedData = vm.cloneDataSegment()
+ seedMemorySize = Pages(vm.memory.Length())
+ p.activate(policy, parsedData, parsedDataAddr, seedMemorySize)
+ }
+
+ p.Release(vm, metrics.New())
+
+ // Only increment on success
+ i++
+ }
+ }
+}
+
+// Close waits for all the evaluations to finish and then releases the VMs.
+func (p *Pool) Close() {
+ for range p.vms {
+ <-p.available
+ }
+
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+
+ for _, vm := range p.vms {
+ if vm != nil {
+ vm.Close()
+ }
+ }
+
+ p.closed = true
+ p.vms = nil
+}
+
+// Wait steals the i'th VM instance. The VM has to be released afterwards.
+func (p *Pool) Wait(i int) *VM {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+
+ if i == len(p.vms) {
+ return nil
+ }
+
+ vm := p.vms[i]
+ isActive := p.acquired[i]
+ p.acquired[i] = true
+
+ if isActive {
+ p.blockedReinit = make(chan struct{}, 1)
+ p.pendingReinit = vm
+ }
+
+ p.mutex.Unlock()
+
+ if isActive {
+ <-p.blockedReinit
+ } else {
+ <-p.available
+ }
+
+ p.mutex.Lock()
+ p.pendingReinit = nil
+ return vm
+}
+
+// remove removes the i'th vm.
+func (p *Pool) remove(i int) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+
+ n := len(p.vms)
+ if n > 1 {
+ p.vms[i] = p.vms[n-1]
+ p.acquired[i] = p.acquired[n-1]
+ }
+
+ p.vms = p.vms[0 : n-1]
+ p.acquired = p.acquired[0 : n-1]
+}
+
+func (p *Pool) activate(policy []byte, data []byte, dataAddr int32, minMemoryPages uint32) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+
+ p.policy, p.parsedData, p.parsedDataAddr, p.memoryMinPages = policy, data, dataAddr, minMemoryPages
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/util.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/util.go
new file mode 100644
index 00000000..f4b16de9
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/util.go
@@ -0,0 +1,17 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package wasm
+
+const wasmPageSize = 65535
+
+// Pages converts a byte size to Pages, rounding up as necessary.
+func Pages(n uint32) uint32 {
+ pages := n / wasmPageSize
+ if pages*wasmPageSize == n {
+ return pages
+ }
+
+ return pages + 1
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/vm.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/vm.go
new file mode 100644
index 00000000..9a2d72e9
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm/vm.go
@@ -0,0 +1,662 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package wasm
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+ "time"
+
+ wasm "github.com/wasmerio/go-ext-wasm/wasmer"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/topdown"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+// VM is a wrapper around a Wasm VM instance
+type VM struct {
+ instance *wasm.Instance // Pointer to avoid unintented destruction (triggering finalizers within).
+ policy []byte
+ data []byte
+ memory *wasm.Memory
+ memoryMin uint32
+ memoryMax uint32
+ bctx *topdown.BuiltinContext
+ builtins map[int32]topdown.BuiltinFunc
+ builtinResult *ast.Term
+ entrypointIDs map[string]int32
+ baseHeapPtr int32
+ dataAddr int32
+ evalHeapPtr int32
+ eval func(...interface{}) (wasm.Value, error)
+ evalCtxGetResult func(...interface{}) (wasm.Value, error)
+ evalCtxNew func(...interface{}) (wasm.Value, error)
+ evalCtxSetData func(...interface{}) (wasm.Value, error)
+ evalCtxSetInput func(...interface{}) (wasm.Value, error)
+ evalCtxSetEntrypoint func(...interface{}) (wasm.Value, error)
+ heapPtrGet func(...interface{}) (wasm.Value, error)
+ heapPtrSet func(...interface{}) (wasm.Value, error)
+ heapTopGet func(...interface{}) (wasm.Value, error)
+ heapTopSet func(...interface{}) (wasm.Value, error)
+ jsonDump func(...interface{}) (wasm.Value, error)
+ jsonParse func(...interface{}) (wasm.Value, error)
+ valueDump func(...interface{}) (wasm.Value, error)
+ valueParse func(...interface{}) (wasm.Value, error)
+ malloc func(...interface{}) (wasm.Value, error)
+ free func(...interface{}) (wasm.Value, error)
+ valueAddPath func(...interface{}) (wasm.Value, error)
+ valueRemovePath func(...interface{}) (wasm.Value, error)
+}
+
+type vmOpts struct {
+ policy []byte
+ data []byte
+ parsedData []byte
+ parsedDataAddr int32
+ memoryMin uint32
+ memoryMax uint32
+}
+
+func newVM(opts vmOpts) (*VM, error) {
+ memory, err := wasm.NewMemory(opts.memoryMin, opts.memoryMax)
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err := opaFunctions(wasm.NewImports())
+ if err != nil {
+ return nil, err
+ }
+
+ imports, err = imports.AppendMemory("memory", memory)
+ if err != nil {
+ panic(err)
+ }
+
+ i, err := wasm.NewInstanceWithImports(opts.policy, imports)
+ if err != nil {
+ return nil, err
+ }
+
+ v := &VM{
+ instance: &i,
+ policy: opts.policy,
+ memory: memory,
+ memoryMin: opts.memoryMin,
+ memoryMax: opts.memoryMax,
+ builtins: make(map[int32]topdown.BuiltinFunc),
+ entrypointIDs: make(map[string]int32),
+ dataAddr: 0,
+ eval: i.Exports["eval"],
+ evalCtxGetResult: i.Exports["opa_eval_ctx_get_result"],
+ evalCtxNew: i.Exports["opa_eval_ctx_new"],
+ evalCtxSetData: i.Exports["opa_eval_ctx_set_data"],
+ evalCtxSetInput: i.Exports["opa_eval_ctx_set_input"],
+ evalCtxSetEntrypoint: i.Exports["opa_eval_ctx_set_entrypoint"],
+ free: i.Exports["opa_free"],
+ heapPtrGet: i.Exports["opa_heap_ptr_get"],
+ heapPtrSet: i.Exports["opa_heap_ptr_set"],
+ heapTopGet: i.Exports["opa_heap_top_get"],
+ heapTopSet: i.Exports["opa_heap_top_set"],
+ jsonDump: i.Exports["opa_json_dump"],
+ jsonParse: i.Exports["opa_json_parse"],
+ valueDump: i.Exports["opa_value_dump"],
+ valueParse: i.Exports["opa_value_parse"],
+ malloc: i.Exports["opa_malloc"],
+ valueAddPath: i.Exports["opa_value_add_path"],
+ valueRemovePath: i.Exports["opa_value_remove_path"],
+ }
+
+ // Initialize the heap.
+
+ if _, err := v.malloc(0); err != nil {
+ return nil, err
+ }
+
+ if v.baseHeapPtr, err = v.getHeapState(); err != nil {
+ return nil, err
+ }
+
+ // Optimization for cloning a vm, if provided a parsed data memory buffer
+ // insert it directly into the new vm's buffer and set pointers accordingly.
+ // This only works because the placement is deterministic (eg, for a given policy
+ // the base heap pointer and parsed data layout will always be the same).
+ if opts.parsedData != nil {
+ if memory.Length()-uint32(v.baseHeapPtr) < uint32(len(opts.parsedData)) {
+ delta := uint32(len(opts.parsedData)) - (memory.Length() - uint32(v.baseHeapPtr))
+ err := memory.Grow(Pages(delta))
+ if err != nil {
+ return nil, err
+ }
+ }
+ mem := memory.Data()
+ for src, dest := 0, v.baseHeapPtr; src < len(opts.parsedData); src, dest = src+1, dest+1 {
+ mem[dest] = opts.parsedData[src]
+ }
+ v.dataAddr = opts.parsedDataAddr
+ v.evalHeapPtr = v.baseHeapPtr + int32(len(opts.parsedData))
+ err := v.setHeapState(v.evalHeapPtr)
+ if err != nil {
+ return nil, err
+ }
+ } else if opts.data != nil {
+ if v.dataAddr, err = v.toRegoJSON(opts.data, true); err != nil {
+ return nil, err
+ }
+ }
+
+ if v.evalHeapPtr, err = v.getHeapState(); err != nil {
+ return nil, err
+ }
+
+ // For the opa builtin functions to access the instance.
+ i.SetContextData(v)
+
+ // Construct the builtin id to name mappings.
+
+ val, err := i.Exports["builtins"]()
+ if err != nil {
+ return nil, err
+ }
+
+ builtins, err := v.fromRegoJSON(val.ToI32(), true)
+ if err != nil {
+ return nil, err
+ }
+
+ for name, id := range builtins.(map[string]interface{}) {
+ f := topdown.GetBuiltin(name)
+ if f == nil {
+ return nil, fmt.Errorf("builtin '%s' not found", name)
+ }
+
+ n, err := id.(json.Number).Int64()
+ if err != nil {
+ panic(err)
+ }
+
+ v.builtins[int32(n)] = f
+ }
+
+ // Extract the entrypoint ID's
+ val, err = i.Exports["entrypoints"]()
+ if err != nil {
+ return nil, err
+ }
+
+ epMap, err := v.fromRegoJSON(val.ToI32(), true)
+ if err != nil {
+ return nil, err
+ }
+
+ for ep, value := range epMap.(map[string]interface{}) {
+ id, err := value.(json.Number).Int64()
+ if err != nil {
+ return nil, err
+ }
+ v.entrypointIDs[ep] = int32(id)
+ }
+
+ return v, nil
+}
+
+// Eval performs an evaluation of the specified entrypoint, with any provided
+// input, and returns the resulting value dumped to a string.
+func (i *VM) Eval(ctx context.Context, entrypoint int32, input *interface{}, metrics metrics.Metrics) ([]byte, error) {
+ metrics.Timer("wasm_vm_eval").Start()
+ defer metrics.Timer("wasm_vm_eval").Stop()
+
+ metrics.Timer("wasm_vm_eval_prepare_input").Start()
+ err := i.setHeapState(i.evalHeapPtr)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ i.bctx = nil
+ }()
+
+ // Parse the input JSON and activate it with the data.
+
+ addr, err := i.evalCtxNew()
+ if err != nil {
+ return nil, err
+ }
+
+ ctxAddr := addr.ToI32()
+
+ if i.dataAddr != 0 {
+ if _, err := i.evalCtxSetData(ctxAddr, i.dataAddr); err != nil {
+ return nil, err
+ }
+ }
+
+ _, err = i.evalCtxSetEntrypoint(ctxAddr, int32(entrypoint))
+ if err != nil {
+ return nil, err
+ }
+
+ if input != nil {
+ inputAddr, err := i.toRegoJSON(*input, false)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := i.evalCtxSetInput(ctxAddr, inputAddr); err != nil {
+ return nil, err
+ }
+ }
+ metrics.Timer("wasm_vm_eval_prepare_input").Stop()
+
+ // Evaluate the policy.
+ metrics.Timer("wasm_vm_eval_execute").Start()
+ func() {
+ defer func() {
+ if e := recover(); e != nil {
+ switch e := e.(type) {
+ case abortError:
+ err = errors.New(e.message)
+ case builtinError:
+ err = e.err
+ if _, ok := err.(topdown.Halt); !ok {
+ err = nil
+ }
+ default:
+ panic(e)
+ }
+
+ }
+ }()
+ _, err = i.eval(ctxAddr)
+ }()
+
+ metrics.Timer("wasm_vm_eval_execute").Stop()
+
+ if err != nil {
+ return nil, err
+ }
+
+ metrics.Timer("wasm_vm_eval_prepare_result").Start()
+ resultAddr, err := i.evalCtxGetResult(ctxAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ serialized, err := i.valueDump(resultAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ data := i.memory.Data()[serialized.ToI32():]
+ n := bytes.IndexByte(data, 0)
+ if n < 0 {
+ n = 0
+ }
+
+ metrics.Timer("wasm_vm_eval_prepare_result").Stop()
+
+ // Skip free'ing input and result JSON as the heap will be reset next round anyway.
+
+ return data[0:n], err
+}
+
+// SetPolicyData Will either update the VM's data or, if the policy changed,
+// re-initialize the VM.
+func (i *VM) SetPolicyData(opts vmOpts) error {
+ if !bytes.Equal(opts.policy, i.policy) {
+ // Swap the instance to a new one, with new policy.
+ n, err := newVM(opts)
+ if err != nil {
+ return err
+ }
+
+ i.Close()
+
+ *i = *n
+ return nil
+ }
+
+ i.dataAddr = 0
+
+ var err error
+ if err = i.setHeapState(i.baseHeapPtr); err != nil {
+ return err
+ }
+
+ if opts.parsedData != nil {
+ if i.memory.Length()-uint32(i.baseHeapPtr) < uint32(len(opts.parsedData)) {
+ delta := uint32(len(opts.parsedData)) - (i.memory.Length() - uint32(i.baseHeapPtr))
+ err := i.memory.Grow(Pages(delta))
+ if err != nil {
+ return err
+ }
+ }
+ mem := i.memory.Data()
+ for src, dest := 0, i.baseHeapPtr; src < len(opts.parsedData); src, dest = src+1, dest+1 {
+ mem[dest] = opts.parsedData[src]
+ }
+ i.dataAddr = opts.parsedDataAddr
+ i.evalHeapPtr = i.baseHeapPtr + int32(len(opts.parsedData))
+ err := i.setHeapState(i.evalHeapPtr)
+ if err != nil {
+ return err
+ }
+ } else if opts.data != nil {
+ if i.dataAddr, err = i.toRegoJSON(opts.data, true); err != nil {
+ return err
+ }
+ }
+
+ if i.evalHeapPtr, err = i.getHeapState(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Close the VM instance.
+func (i *VM) Close() {
+ i.memory.Close()
+ i.instance.Close()
+}
+
+type abortError struct {
+ message string
+}
+
+// Abort is invoked by the policy if an internal error occurs during
+// the policy execution.
+func (i *VM) Abort(arg int32) {
+ data := i.memory.Data()[arg:]
+ n := bytes.IndexByte(data, 0)
+ if n == -1 {
+ panic("invalid abort argument")
+ }
+
+ panic(abortError{message: string(data[:n])})
+}
+
+// Println is invoked if the policy WASM code calls opa_println().
+func (i *VM) Println(arg int32) {
+ data := i.memory.Data()[arg:]
+ n := bytes.IndexByte(data, 0)
+ if n == -1 {
+ panic("invalid opa_println argument")
+ }
+
+ fmt.Printf("opa_println(): %s\n", string(data[:n]))
+}
+
+type builtinError struct {
+ err error
+}
+
+// Builtin executes a builtin for the policy.
+func (i *VM) Builtin(builtinID, ctx int32, args ...int32) int32 {
+
+ // TODO: Returning proper errors instead of panicing.
+ // TODO: To avoid growing the heap with every built-in call, recycle the JSON buffers since the free implementation is no-op.
+
+ convertedArgs := make([]*ast.Term, len(args))
+ for j, arg := range args {
+ x, err := i.fromRegoValue(arg, true)
+ if err != nil {
+ panic(builtinError{err: err})
+ }
+ convertedArgs[j] = x
+ }
+
+ if i.bctx == nil {
+ i.bctx = &topdown.BuiltinContext{
+ Context: context.Background(),
+ Cancel: nil,
+ Runtime: nil,
+ Time: ast.NumberTerm(json.Number(strconv.FormatInt(time.Now().UnixNano(), 10))),
+ Metrics: metrics.New(),
+ Cache: make(builtins.Cache),
+ Location: nil,
+ Tracers: nil,
+ QueryID: 0,
+ ParentID: 0,
+ }
+ }
+
+ err := i.builtins[builtinID](*i.bctx, convertedArgs, i.iter)
+ if err != nil {
+ panic(builtinError{err: err})
+ }
+
+ result, err := ast.JSON(i.builtinResult.Value)
+ if err != nil {
+ panic(builtinError{err: err})
+ }
+
+ addr, err := i.toRegoJSON(result, true)
+ if err != nil {
+ panic(builtinError{err: err})
+ }
+
+ return addr
+}
+
+// Entrypoints returns a mapping of entrypoint name to ID for use by Eval().
+func (i *VM) Entrypoints() map[string]int32 {
+ return i.entrypointIDs
+}
+
+// SetDataPath will update the current data on the VM by setting the value at the
+// specified path. If an error occurs the instance is still in a valid state, however
+// the data will not have been modified.
+func (i *VM) SetDataPath(path []string, value interface{}) error {
+
+ // Reset the heap ptr before patching the vm to try and keep any
+ // new allocations safe from subsequent heap resets on eval.
+ err := i.setHeapState(i.evalHeapPtr)
+ if err != nil {
+ return err
+ }
+
+ valueAddr, err := i.toRegoJSON(value, true)
+ if err != nil {
+ return err
+ }
+
+ pathAddr, err := i.toRegoJSON(path, true)
+ if err != nil {
+ return err
+ }
+
+ result, err := i.valueAddPath(i.dataAddr, pathAddr, valueAddr)
+ if err != nil {
+ return err
+ }
+
+ // We don't need to free the value, assume it is "owned" as part of the
+ // overall data object now.
+ // We do need to free the path
+
+ _, err = i.free(pathAddr)
+ if err != nil {
+ return err
+ }
+
+ // Update the eval heap pointer to accommodate for any new allocations done
+ // while patching.
+ i.evalHeapPtr, err = i.getHeapState()
+ if err != nil {
+ return err
+ }
+
+ errc := result.ToI32()
+ if errc != 0 {
+ return fmt.Errorf("unable to set data value for path %v, err=%d", path, errc)
+ }
+
+ return nil
+}
+
+// RemoveDataPath will update the current data on the VM by removing the value at the
+// specified path. If an error occurs the instance is still in a valid state, however
+// the data will not have been modified.
+func (i *VM) RemoveDataPath(path []string) error {
+ pathAddr, err := i.toRegoJSON(path, true)
+ if err != nil {
+ return err
+ }
+
+ result, err := i.valueRemovePath(i.dataAddr, pathAddr)
+ if err != nil {
+ return err
+ }
+
+ if _, err := i.free(pathAddr); err != nil {
+ return err
+ }
+
+ errc := result.ToI32()
+ if errc != 0 {
+ return fmt.Errorf("unable to set data value for path %v, err=%d", path, errc)
+ }
+
+ return nil
+}
+
+func (i *VM) iter(result *ast.Term) error {
+ i.builtinResult = result
+ return nil
+}
+
+// fromRegoJSON parses serialized JSON from the Wasm memory buffer into
+// native go types.
+func (i *VM) fromRegoJSON(addr int32, free bool) (interface{}, error) {
+ serialized, err := i.jsonDump(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ data := i.memory.Data()[serialized.ToI32():]
+ n := bytes.IndexByte(data, 0)
+ if n < 0 {
+ n = 0
+ }
+
+ // Parse the result into go types.
+
+ decoder := json.NewDecoder(bytes.NewReader(data[0:n]))
+ decoder.UseNumber()
+
+ var result interface{}
+ if err := decoder.Decode(&result); err != nil {
+ return nil, err
+ }
+
+ if free {
+ if _, err := i.free(serialized.ToI32()); err != nil {
+ return nil, err
+ }
+ }
+
+ return result, nil
+}
+
+// toRegoJSON converts go native JSON to Rego JSON. If the value is
+// an AST type it will be dumped using its stringer.
+func (i *VM) toRegoJSON(v interface{}, free bool) (int32, error) {
+ var raw []byte
+ switch v := v.(type) {
+ case []byte:
+ raw = v
+ case *ast.Term:
+ raw = []byte(v.String())
+ case ast.Value:
+ raw = []byte(v.String())
+ default:
+ var err error
+ raw, err = json.Marshal(v)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ n := int32(len(raw))
+ pos, err := i.malloc(n)
+ if err != nil {
+ return 0, err
+ }
+
+ p := pos.ToI32()
+ copy(i.memory.Data()[p:p+n], raw)
+
+ addr, err := i.valueParse(p, n)
+ if err != nil {
+ return 0, err
+ }
+
+ if free {
+ if _, err := i.free(p); err != nil {
+ return 0, err
+ }
+ }
+
+ return addr.ToI32(), nil
+}
+
+// fromRegoValue parses serialized opa values from the Wasm memory buffer into
+// Rego AST types.
+func (i *VM) fromRegoValue(addr int32, free bool) (*ast.Term, error) {
+ serialized, err := i.valueDump(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ data := i.memory.Data()[serialized.ToI32():]
+ n := bytes.IndexByte(data, 0)
+ if n < 0 {
+ n = 0
+ }
+
+ // Parse the result into ast types.
+ result, err := ast.ParseTerm(string(data[0:n]))
+ if err != nil {
+ return nil, err
+ }
+
+ if free {
+ if _, err := i.free(serialized.ToI32()); err != nil {
+ return nil, err
+ }
+ }
+
+ return result, nil
+}
+
+func (i *VM) getHeapState() (int32, error) {
+ ptr, err := i.heapPtrGet()
+ if err != nil {
+ return 0, err
+ }
+
+ return ptr.ToI32(), nil
+}
+
+func (i *VM) setHeapState(ptr int32) error {
+ _, err := i.heapPtrSet(ptr)
+ return err
+}
+
+func (i *VM) cloneDataSegment() (int32, []byte) {
+ // The parsed data values sit between the base heap address and end
+ // at the eval heap pointer address.
+ srcData := i.memory.Data()[i.baseHeapPtr:i.evalHeapPtr]
+ patchedData := make([]byte, len(srcData))
+ copy(patchedData, srcData)
+ return i.dataAddr, patchedData
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/config.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/config.go
new file mode 100644
index 00000000..e75d1cee
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/config.go
@@ -0,0 +1,99 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package opa
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm"
+ "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/errors"
+)
+
+// WithPolicyFile configures a policy file to load.
+func (o *OPA) WithPolicyFile(fileName string) *OPA {
+ policy, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ o.configErr = fmt.Errorf("%v: %w", err.Error(), errors.ErrInvalidConfig)
+ return o
+ }
+
+ o.policy = policy
+ return o
+}
+
+// WithPolicyBytes configures the compiled policy to load.
+func (o *OPA) WithPolicyBytes(policy []byte) *OPA {
+ o.policy = policy
+ return o
+}
+
+// WithDataFile configures the JSON data file to load.
+func (o *OPA) WithDataFile(fileName string) *OPA {
+ data, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ o.configErr = fmt.Errorf("%v: %w", err.Error(), errors.ErrInvalidConfig)
+ return o
+ }
+
+ o.data = data
+ return o
+}
+
+// WithDataBytes configures the JSON data to load.
+func (o *OPA) WithDataBytes(data []byte) *OPA {
+ o.data = data
+ return o
+}
+
+// WithDataJSON configures the JSON data to load.
+func (o *OPA) WithDataJSON(data interface{}) *OPA {
+ v, err := json.Marshal(data)
+ if err != nil {
+ o.configErr = fmt.Errorf("%v: %w", err.Error(), errors.ErrInvalidConfig)
+ return o
+ }
+
+ o.data = v
+ return o
+}
+
+// WithMemoryLimits configures the memory limits (in bytes) for a single policy
+// evaluation.
+func (o *OPA) WithMemoryLimits(min, max uint32) *OPA {
+ if min < 2*65535 {
+ o.configErr = fmt.Errorf("too low minimum memory limit: %w", errors.ErrInvalidConfig)
+ return o
+ }
+
+ if max != 0 && min > max {
+ o.configErr = fmt.Errorf("too low maximum memory limit: %w", errors.ErrInvalidConfig)
+ return o
+ }
+
+ o.memoryMinPages, o.memoryMaxPages = wasm.Pages(min), wasm.Pages(max)
+ return o
+}
+
+// WithPoolSize configures the maximum number of simultaneous policy
+// evaluations, i.e., the maximum number of underlying WASM instances
+// active at any time. The default is the number of logical CPUs
+// usable for the process as per runtime.NumCPU().
+func (o *OPA) WithPoolSize(size uint32) *OPA {
+ if size == 0 {
+ o.configErr = fmt.Errorf("pool size: %w", errors.ErrInvalidConfig)
+ return o
+ }
+
+ o.poolSize = size
+ return o
+}
+
+// WithErrorLogger configures an error logger invoked with all the errors.
+func (o *OPA) WithErrorLogger(logger func(error)) *OPA {
+ o.logError = logger
+ return o
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/errors/errors.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/errors/errors.go
new file mode 100644
index 00000000..bfdafc45
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/errors/errors.go
@@ -0,0 +1,26 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package errors
+
+import (
+ "errors"
+)
+
+var (
+ // ErrInvalidConfig is the error returned if the OPA initialization fails due to an invalid config.
+ ErrInvalidConfig = errors.New("invalid config")
+ // ErrInvalidPolicyOrData is the error returned if either policy or data is invalid.
+ ErrInvalidPolicyOrData = errors.New("invalid policy or data")
+ // ErrInvalidBundle is the error returned if the bundle loaded is corrupted.
+ ErrInvalidBundle = errors.New("invalid bundle")
+ // ErrNotReady is the error returned if the OPA instance is not initialized.
+ ErrNotReady = errors.New("not ready")
+ // ErrUndefined is the error returned if the evaluation result is undefined.
+ ErrUndefined = errors.New("undefined decision")
+ // ErrNonBoolean is the error returned if the evaluation result is not of boolean value.
+ ErrNonBoolean = errors.New("non-boolean decision")
+ // ErrInternal is the error returned if the evaluation fails due to an internal error.
+ ErrInternal = errors.New("internal error")
+)
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/opa.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/opa.go
new file mode 100644
index 00000000..ca715ee6
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/sdk/opa/opa.go
@@ -0,0 +1,212 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package opa
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "runtime"
+ "sync"
+
+ "github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm"
+ "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/errors"
+ "github.com/open-policy-agent/opa/metrics"
+)
+
+// OPA executes WebAssembly compiled Rego policies.
+type OPA struct {
+ configErr error // Delayed configuration error, if any.
+ memoryMinPages uint32
+ memoryMaxPages uint32 // 0 means no limit.
+ poolSize uint32
+ pool *wasm.Pool
+ mutex sync.Mutex // To serialize access to SetPolicy, SetData and Close.
+ policy []byte // Current policy.
+ data []byte // Current data.
+ logError func(error)
+}
+
+// Result holds the evaluation result.
+type Result struct {
+ Result []byte
+}
+
+// New constructs a new OPA SDK instance, ready to be configured with
+// With functions. If no policy is provided as a part of
+// configuration, policy (and data) needs to be set before invoking
+// Eval. Once constructed and configured, the instance needs to be
+// initialized before invoking the Eval.
+func New() *OPA {
+ opa := &OPA{
+ memoryMinPages: 16,
+ memoryMaxPages: 0,
+ poolSize: uint32(runtime.GOMAXPROCS(0)),
+ logError: func(error) {},
+ }
+
+ return opa
+}
+
+// Init initializes the SDK instance after the construction and
+// configuration. If the configuration is invalid, it returns
+// ErrInvalidConfig.
+func (o *OPA) Init() (*OPA, error) {
+ if o.configErr != nil {
+ return nil, o.configErr
+ }
+
+ o.pool = wasm.NewPool(o.poolSize, o.memoryMinPages, o.memoryMaxPages)
+
+ if len(o.policy) != 0 {
+ if err := o.pool.SetPolicyData(o.policy, o.data); err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// SetData updates the data for the subsequent Eval calls. Returns
+// either ErrNotReady, ErrInvalidPolicyOrData, or ErrInternal if an
+// error occurs.
+func (o *OPA) SetData(v interface{}) error {
+ if o.pool == nil {
+ return errors.ErrNotReady
+ }
+
+ raw, err := json.Marshal(v)
+ if err != nil {
+ return fmt.Errorf("%v: %w", err, errors.ErrInvalidPolicyOrData)
+ }
+
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ return o.setPolicyData(o.policy, raw)
+}
+
+// SetDataPath will update the current data on the VMs by setting the value at the
+// specified path. If an error occurs the instance is still in a valid state, however
+// the data will not have been modified.
+func (o *OPA) SetDataPath(path []string, value interface{}) error {
+ return o.pool.SetDataPath(path, value)
+}
+
+// RemoveDataPath will update the current data on the VMs by removing the value at the
+// specified path. If an error occurs the instance is still in a valid state, however
+// the data will not have been modified.
+func (o *OPA) RemoveDataPath(path []string) error {
+ return o.pool.RemoveDataPath(path)
+}
+
+// SetPolicy updates the policy for the subsequent Eval calls.
+// Returns either ErrNotReady, ErrInvalidPolicy or ErrInternal if an
+// error occurs.
+func (o *OPA) SetPolicy(p []byte) error {
+ if o.pool == nil {
+ return errors.ErrNotReady
+ }
+
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ return o.setPolicyData(p, o.data)
+}
+
+// SetPolicyData updates both the policy and data for the subsequent
+// Eval calls. Returns either ErrNotReady, ErrInvalidPolicyOrData, or
+// ErrInternal if an error occurs.
+func (o *OPA) SetPolicyData(policy []byte, data *interface{}) error {
+ if o.pool == nil {
+ return errors.ErrNotReady
+ }
+
+ var raw []byte
+ if data != nil {
+ var err error
+ raw, err = json.Marshal(*data)
+ if err != nil {
+ return fmt.Errorf("%v: %w", err, errors.ErrInvalidPolicyOrData)
+ }
+ }
+
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ return o.setPolicyData(policy, raw)
+}
+
+func (o *OPA) setPolicyData(policy []byte, data []byte) error {
+ if err := o.pool.SetPolicyData(policy, data); err != nil {
+ return err
+ }
+
+ o.policy = policy
+ o.data = data
+ return nil
+}
+
+// EvalOpts define options for performing an evaluation
+type EvalOpts struct {
+ Entrypoint int32
+ Input *interface{}
+ Metrics metrics.Metrics
+}
+
+// Eval evaluates the policy with the given input, returning the
+// evaluation results. If no policy was configured at construction
+// time nor set after, the function returns ErrNotReady. It returns
+// ErrInternal if any other error occurs.
+func (o *OPA) Eval(ctx context.Context, opts EvalOpts) (*Result, error) {
+ if o.pool == nil {
+ return nil, errors.ErrNotReady
+ }
+
+ m := opts.Metrics
+ if m == nil {
+ m = metrics.New()
+ }
+
+ instance, err := o.pool.Acquire(ctx, m)
+ if err != nil {
+ return nil, err
+ }
+
+ defer o.pool.Release(instance, m)
+
+ result, err := instance.Eval(ctx, opts.Entrypoint, opts.Input, m)
+ if err != nil {
+ return nil, fmt.Errorf("%v: %w", err, errors.ErrInternal)
+ }
+
+ return &Result{result}, nil
+}
+
+// Close waits until all the pending evaluations complete and then
+// releases all the resources allocated. Eval will return ErrClosed
+// afterwards.
+func (o *OPA) Close() {
+ if o.pool == nil {
+ return
+ }
+
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ o.pool.Close()
+}
+
+// Entrypoints returns a mapping of entrypoint name to ID for use by Eval() and EvalBool().
+func (o *OPA) Entrypoints(ctx context.Context) (map[string]int32, error) {
+ instance, err := o.pool.Acquire(ctx, metrics.New())
+ if err != nil {
+ return nil, err
+ }
+
+ defer o.pool.Release(instance, metrics.New())
+
+ return instance.Entrypoints(), nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/internal/wasm/types/types.go b/vendor/github.com/open-policy-agent/opa/internal/wasm/types/types.go
new file mode 100644
index 00000000..4e2b7762
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/internal/wasm/types/types.go
@@ -0,0 +1,36 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package types defines the WASM value type constants.
+package types
+
+// ValueType represents an intrinsic value in WASM.
+type ValueType int
+
+// Defines the intrinsic value types.
+const (
+ I32 ValueType = iota
+ I64
+ F32
+ F64
+)
+
+func (tpe ValueType) String() string {
+ if tpe == I32 {
+ return "i32"
+ } else if tpe == I64 {
+ return "i64"
+ } else if tpe == F32 {
+ return "f32"
+ }
+ return "f64"
+}
+
+// ElementType defines the type of table elements.
+type ElementType int
+
+const (
+ // Anyfunc is the union of all table types.
+ Anyfunc ElementType = iota
+)
diff --git a/vendor/github.com/open-policy-agent/opa/keys/keys.go b/vendor/github.com/open-policy-agent/opa/keys/keys.go
new file mode 100644
index 00000000..8272168e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/keys/keys.go
@@ -0,0 +1,100 @@
+package keys
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+const defaultSigningAlgorithm = "RS256"
+
+var supportedAlgos = map[string]struct{}{
+ "ES256": {}, "ES384": {}, "ES512": {},
+ "HS256": {}, "HS384": {}, "HS512": {},
+ "PS256": {}, "PS384": {}, "PS512": {},
+ "RS256": {}, "RS384": {}, "RS512": {},
+}
+
+// IsSupportedAlgorithm true if provided alg is supported
+func IsSupportedAlgorithm(alg string) bool {
+ _, ok := supportedAlgos[alg]
+ return ok
+}
+
+// Config holds the keys used to sign or verify bundles and tokens
+type Config struct {
+ Key string `json:"key"`
+ PrivateKey string `json:"private_key"`
+ Algorithm string `json:"algorithm"`
+ Scope string `json:"scope"`
+}
+
+// Equal returns true if this key config is equal to the other.
+func (k *Config) Equal(other *Config) bool {
+ return other != nil && *k == *other
+}
+
+func (k *Config) validateAndInjectDefaults(id string) error {
+ if k.Key == "" && k.PrivateKey == "" {
+ return fmt.Errorf("invalid keys configuration: no keys provided for key ID %v", id)
+ }
+
+ if k.Algorithm == "" {
+ k.Algorithm = defaultSigningAlgorithm
+ }
+
+ if !IsSupportedAlgorithm(k.Algorithm) {
+ return fmt.Errorf("unsupported algorithm '%v'", k.Algorithm)
+ }
+
+ return nil
+}
+
+// NewKeyConfig return a new Config
+func NewKeyConfig(key, alg, scope string) (*Config, error) {
+ var pubKey string
+ if _, err := os.Stat(key); err == nil {
+ bs, err := ioutil.ReadFile(key)
+ if err != nil {
+ return nil, err
+ }
+ pubKey = string(bs)
+ } else if os.IsNotExist(err) {
+ pubKey = key
+ } else {
+ return nil, err
+ }
+
+ return &Config{
+ Key: pubKey,
+ Algorithm: alg,
+ Scope: scope,
+ }, nil
+}
+
+// ParseKeysConfig returns a map containing the key and the signing algorithm
+func ParseKeysConfig(raw json.RawMessage) (map[string]*Config, error) {
+ keys := map[string]*Config{}
+ var obj map[string]json.RawMessage
+
+ if err := util.Unmarshal(raw, &obj); err == nil {
+ for k := range obj {
+ var keyConfig Config
+ if err = util.Unmarshal(obj[k], &keyConfig); err != nil {
+ return nil, err
+ }
+
+ if err = keyConfig.validateAndInjectDefaults(k); err != nil {
+ return nil, err
+ }
+
+ keys[k] = &keyConfig
+ }
+ } else {
+ return nil, err
+ }
+ return keys, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/loader/errors.go b/vendor/github.com/open-policy-agent/opa/loader/errors.go
new file mode 100644
index 00000000..b2f29c64
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/loader/errors.go
@@ -0,0 +1,68 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package loader
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// Errors is a wrapper for multiple loader errors.
+type Errors []error
+
+func (e Errors) Error() string {
+ if len(e) == 0 {
+ return "no error(s)"
+ }
+ if len(e) == 1 {
+ return "1 error occurred during loading: " + e[0].Error()
+ }
+ buf := make([]string, len(e))
+ for i := range buf {
+ buf[i] = e[i].Error()
+ }
+ return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n")
+}
+
+func (e *Errors) add(err error) {
+ if errs, ok := err.(ast.Errors); ok {
+ for i := range errs {
+ *e = append(*e, errs[i])
+ }
+ } else {
+ *e = append(*e, err)
+ }
+}
+
+type unsupportedDocumentType string
+
+func (path unsupportedDocumentType) Error() string {
+ return string(path) + ": bad document type"
+}
+
+type unrecognizedFile string
+
+func (path unrecognizedFile) Error() string {
+ return string(path) + ": can't recognize file type"
+}
+
+func isUnrecognizedFile(err error) bool {
+ _, ok := err.(unrecognizedFile)
+ return ok
+}
+
+type mergeError string
+
+func (e mergeError) Error() string {
+ return string(e) + ": merge error"
+}
+
+type emptyModuleError string
+
+func (e emptyModuleError) Error() string {
+ return string(e) + ": empty policy"
+}
diff --git a/vendor/github.com/open-policy-agent/opa/loader/loader.go b/vendor/github.com/open-policy-agent/opa/loader/loader.go
new file mode 100644
index 00000000..79c740c7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/loader/loader.go
@@ -0,0 +1,548 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package loader contains utilities for loading files into OPA.
+package loader
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/ghodss/yaml"
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/metrics"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/bundle"
+ fileurl "github.com/open-policy-agent/opa/internal/file/url"
+ "github.com/open-policy-agent/opa/internal/merge"
+ "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/storage/inmem"
+ "github.com/open-policy-agent/opa/util"
+)
+
+// Result represents the result of successfully loading zero or more files.
+type Result struct {
+ Documents map[string]interface{}
+ Modules map[string]*RegoFile
+ path []string
+}
+
+// ParsedModules returns the parsed modules stored on the result.
+func (l *Result) ParsedModules() map[string]*ast.Module {
+ modules := make(map[string]*ast.Module)
+ for _, module := range l.Modules {
+ modules[module.Name] = module.Parsed
+ }
+ return modules
+}
+
+// Compiler returns a Compiler object with the compiled modules from this loader
+// result.
+func (l *Result) Compiler() (*ast.Compiler, error) {
+ compiler := ast.NewCompiler()
+ compiler.Compile(l.ParsedModules())
+ if compiler.Failed() {
+ return nil, compiler.Errors
+ }
+ return compiler, nil
+}
+
+// Store returns a Store object with the documents from this loader result.
+func (l *Result) Store() (storage.Store, error) {
+ return inmem.NewFromObject(l.Documents), nil
+}
+
+// RegoFile represents the result of loading a single Rego source file.
+type RegoFile struct {
+ Name string
+ Parsed *ast.Module
+ Raw []byte
+}
+
+// Filter defines the interface for filtering files during loading. If the
+// filter returns true, the file should be excluded from the result.
+type Filter func(abspath string, info os.FileInfo, depth int) bool
+
+// GlobExcludeName excludes files and directories whose names do not match the
+// shell style pattern at minDepth or greater.
+func GlobExcludeName(pattern string, minDepth int) Filter {
+ return func(abspath string, info os.FileInfo, depth int) bool {
+ match, _ := filepath.Match(pattern, info.Name())
+ return match && depth >= minDepth
+ }
+}
+
+// FileLoader defines an interface for loading OPA data files
+// and Rego policies.
+type FileLoader interface {
+ All(paths []string) (*Result, error)
+ Filtered(paths []string, filter Filter) (*Result, error)
+ AsBundle(path string) (*bundle.Bundle, error)
+ WithMetrics(m metrics.Metrics) FileLoader
+ WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader
+ WithSkipBundleVerification(skipVerify bool) FileLoader
+}
+
+// NewFileLoader returns a new FileLoader instance.
+func NewFileLoader() FileLoader {
+ return &fileLoader{
+ metrics: metrics.New(),
+ files: make(map[string]bundle.FileInfo),
+ }
+}
+
+type descriptor struct {
+ result *Result
+ path string
+ relPath string
+ depth int
+}
+
+type fileLoader struct {
+ metrics metrics.Metrics
+ bvc *bundle.VerificationConfig
+ skipVerify bool
+ descriptors []*descriptor
+ files map[string]bundle.FileInfo
+}
+
+// WithMetrics provides the metrics instance to use while loading
+func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader {
+ fl.metrics = m
+ return fl
+}
+
+// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle
+func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader {
+ fl.bvc = config
+ return fl
+}
+
+// WithSkipBundleVerification skips verification of a signed bundle
+func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader {
+ fl.skipVerify = skipVerify
+ return fl
+}
+
+// All returns a Result object loaded (recursively) from the specified paths.
+func (fl fileLoader) All(paths []string) (*Result, error) {
+ return fl.Filtered(paths, nil)
+}
+
+// Filtered returns a Result object loaded (recursively) from the specified
+// paths while applying the given filters. If any filter returns true, the
+// file/directory is excluded.
+func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) {
+ return all(paths, filter, func(curr *Result, path string, depth int) error {
+
+ bs, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ result, err := loadKnownTypes(path, bs, fl.metrics)
+ if err != nil {
+ if !isUnrecognizedFile(err) {
+ return err
+ }
+ if depth > 0 {
+ return nil
+ }
+ result, err = loadFileForAnyType(path, bs, fl.metrics)
+ if err != nil {
+ return err
+ }
+ }
+
+ return curr.merge(path, result)
+ })
+}
+
+// AsBundle loads a path as a bundle. If it is a single file
+// it will be treated as a normal tarball bundle. If a directory
+// is supplied it will be loaded as an unzipped bundle tree.
+func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) {
+ bundleLoader, isDir, err := GetBundleDirectoryLoader(path)
+ if err != nil {
+ return nil, err
+ }
+
+ br := bundle.NewCustomReader(bundleLoader).WithMetrics(fl.metrics).WithBundleVerificationConfig(fl.bvc).
+ WithSkipBundleVerification(fl.skipVerify)
+
+ // For bundle directories add the full path in front of module file names
+ // to simplify debugging.
+ if isDir {
+ br.WithBaseDir(path)
+ }
+
+ b, err := br.Read()
+ if err != nil {
+ err = errors.Wrap(err, fmt.Sprintf("bundle %s", path))
+ }
+
+ return &b, err
+}
+
+// GetBundleDirectoryLoader returns a bundle directory loader which can be used to load
+// files in the directory.
+func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) {
+ path, err := fileurl.Clean(path)
+ if err != nil {
+ return nil, false, err
+ }
+
+ fi, err := os.Stat(path)
+ if err != nil {
+ return nil, false, fmt.Errorf("error reading %q: %s", path, err)
+ }
+
+ var bundleLoader bundle.DirectoryLoader
+
+ if fi.IsDir() {
+ bundleLoader = bundle.NewDirectoryLoader(path)
+ } else {
+ fh, err := os.Open(path)
+ if err != nil {
+ return nil, false, err
+ }
+ bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path)
+ }
+ return bundleLoader, fi.IsDir(), nil
+}
+
+// FilteredPaths return a list of files from the specified
+// paths while applying the given filters. If any filter returns true, the
+// file/directory is excluded.
+func FilteredPaths(paths []string, filter Filter) ([]string, error) {
+ result := []string{}
+
+ _, err := all(paths, filter, func(_ *Result, path string, _ int) error {
+ result = append(result, path)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// All returns a Result object loaded (recursively) from the specified paths.
+// Deprecated: Use FileLoader.Filtered() instead.
+func All(paths []string) (*Result, error) {
+ return NewFileLoader().Filtered(paths, nil)
+}
+
+// Filtered returns a Result object loaded (recursively) from the specified
+// paths while applying the given filters. If any filter returns true, the
+// file/directory is excluded.
+// Deprecated: Use FileLoader.Filtered() instead.
+func Filtered(paths []string, filter Filter) (*Result, error) {
+ return NewFileLoader().Filtered(paths, filter)
+}
+
+// AsBundle loads a path as a bundle. If it is a single file
+// it will be treated as a normal tarball bundle. If a directory
+// is supplied it will be loaded as an unzipped bundle tree.
+// Deprecated: Use FileLoader.AsBundle() instead.
+func AsBundle(path string) (*bundle.Bundle, error) {
+ return NewFileLoader().AsBundle(path)
+}
+
+// AllRegos returns a Result object loaded (recursively) with all Rego source
+// files from the specified paths.
+func AllRegos(paths []string) (*Result, error) {
+ return NewFileLoader().Filtered(paths, func(_ string, info os.FileInfo, depth int) bool {
+ return !info.IsDir() && !strings.HasSuffix(info.Name(), bundle.RegoExt)
+ })
+}
+
+// Rego returns a RegoFile object loaded from the given path.
+func Rego(path string) (*RegoFile, error) {
+ path, err := fileurl.Clean(path)
+ if err != nil {
+ return nil, err
+ }
+ bs, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ return loadRego(path, bs, metrics.New())
+}
+
+// CleanPath returns the normalized version of a path that can be used as an identifier.
+func CleanPath(path string) string {
+ return strings.Trim(path, "/")
+}
+
+// Paths returns a sorted list of files contained at path. If recurse is true
+// and path is a directory, then Paths will walk the directory structure
+// recursively and list files at each level.
+func Paths(path string, recurse bool) (paths []string, err error) {
+ path, err = fileurl.Clean(path)
+ if err != nil {
+ return nil, err
+ }
+ err = filepath.Walk(path, func(f string, info os.FileInfo, err error) error {
+ if !recurse {
+ if path != f && path != filepath.Dir(f) {
+ return filepath.SkipDir
+ }
+ }
+ paths = append(paths, f)
+ return nil
+ })
+ return paths, err
+}
+
+// Dirs resolves filepaths to directories. It will return a list of unique
+// directories.
+func Dirs(paths []string) []string {
+ unique := map[string]struct{}{}
+
+ for _, path := range paths {
+ // TODO: /dir/dir will register top level directory /dir
+ dir := filepath.Dir(path)
+ unique[dir] = struct{}{}
+ }
+
+ var u []string
+ for k := range unique {
+ u = append(u, k)
+ }
+ sort.Strings(u)
+ return u
+}
+
+// SplitPrefix returns a tuple specifying the document prefix and the file
+// path.
+func SplitPrefix(path string) ([]string, string) {
+ // Non-prefixed URLs can be returned without modification and their contents
+ // can be rooted directly under data.
+ if strings.Index(path, "://") == strings.Index(path, ":") {
+ return nil, path
+ }
+ parts := strings.SplitN(path, ":", 2)
+ if len(parts) == 2 && len(parts[0]) > 0 {
+ return strings.Split(parts[0], "."), parts[1]
+ }
+ return nil, path
+}
+
+func (l *Result) merge(path string, result interface{}) error {
+ switch result := result.(type) {
+ case bundle.Bundle:
+ for _, module := range result.Modules {
+ l.Modules[module.Path] = &RegoFile{
+ Name: module.Path,
+ Parsed: module.Parsed,
+ Raw: module.Raw,
+ }
+ }
+ return l.mergeDocument(path, result.Data)
+ case *RegoFile:
+ l.Modules[CleanPath(path)] = result
+ return nil
+ default:
+ return l.mergeDocument(path, result)
+ }
+}
+
+func (l *Result) mergeDocument(path string, doc interface{}) error {
+ obj, ok := makeDir(l.path, doc)
+ if !ok {
+ return unsupportedDocumentType(path)
+ }
+ merged, ok := merge.InterfaceMaps(l.Documents, obj)
+ if !ok {
+ return mergeError(path)
+ }
+ for k := range merged {
+ l.Documents[k] = merged[k]
+ }
+ return nil
+}
+
+func (l *Result) withParent(p string) *Result {
+ path := append(l.path, p)
+ return &Result{
+ Documents: l.Documents,
+ Modules: l.Modules,
+ path: path,
+ }
+}
+
+func newResult() *Result {
+ return &Result{
+ Documents: map[string]interface{}{},
+ Modules: map[string]*RegoFile{},
+ }
+}
+
+func all(paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) {
+ errors := Errors{}
+ root := newResult()
+
+ for _, path := range paths {
+
+ // Paths can be prefixed with a string that specifies where content should be
+ // loaded under data. E.g., foo.bar:/path/to/some.json will load the content
+ // of some.json under {"foo": {"bar": ...}}.
+ loaded := root
+ prefix, path := SplitPrefix(path)
+ if len(prefix) > 0 {
+ for _, part := range prefix {
+ loaded = loaded.withParent(part)
+ }
+ }
+
+ allRec(path, filter, &errors, loaded, 0, f)
+ }
+
+ if len(errors) > 0 {
+ return nil, errors
+ }
+
+ return root, nil
+}
+
+func allRec(path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) {
+
+ path, err := fileurl.Clean(path)
+ if err != nil {
+ errors.add(err)
+ return
+ }
+
+ info, err := os.Stat(path)
+ if err != nil {
+ errors.add(err)
+ return
+ }
+
+ if filter != nil && filter(path, info, depth) {
+ return
+ }
+
+ if !info.IsDir() {
+ if err := f(loaded, path, depth); err != nil {
+ errors.add(err)
+ }
+ return
+ }
+
+ // If we are recursing on directories then content must be loaded under path
+ // specified by directory hierarchy.
+ if depth > 0 {
+ loaded = loaded.withParent(info.Name())
+ }
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ errors.add(err)
+ return
+ }
+
+ for _, file := range files {
+ allRec(filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f)
+ }
+}
+
+func loadKnownTypes(path string, bs []byte, m metrics.Metrics) (interface{}, error) {
+ switch filepath.Ext(path) {
+ case ".json":
+ return loadJSON(path, bs, m)
+ case ".rego":
+ return loadRego(path, bs, m)
+ case ".yaml", ".yml":
+ return loadYAML(path, bs, m)
+ default:
+ if strings.HasSuffix(path, ".tar.gz") {
+ r, err := loadBundleFile(path, bs, m)
+ if err != nil {
+ err = errors.Wrap(err, fmt.Sprintf("bundle %s", path))
+ }
+ return r, err
+ }
+ }
+ return nil, unrecognizedFile(path)
+}
+
+func loadFileForAnyType(path string, bs []byte, m metrics.Metrics) (interface{}, error) {
+ module, err := loadRego(path, bs, m)
+ if err == nil {
+ return module, nil
+ }
+ doc, err := loadJSON(path, bs, m)
+ if err == nil {
+ return doc, nil
+ }
+ doc, err = loadYAML(path, bs, m)
+ if err == nil {
+ return doc, nil
+ }
+ return nil, unrecognizedFile(path)
+}
+
+func loadBundleFile(path string, bs []byte, m metrics.Metrics) (bundle.Bundle, error) {
+ tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path)
+ br := bundle.NewCustomReader(tl).WithMetrics(m).WithSkipBundleVerification(true).IncludeManifestInData(true)
+ return br.Read()
+}
+
+func loadRego(path string, bs []byte, m metrics.Metrics) (*RegoFile, error) {
+ m.Timer(metrics.RegoModuleParse).Start()
+ module, err := ast.ParseModule(path, string(bs))
+ m.Timer(metrics.RegoModuleParse).Stop()
+ if err != nil {
+ return nil, err
+ }
+ result := &RegoFile{
+ Name: path,
+ Parsed: module,
+ Raw: bs,
+ }
+ return result, nil
+}
+
+func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) {
+ m.Timer(metrics.RegoDataParse).Start()
+ buf := bytes.NewBuffer(bs)
+ decoder := util.NewJSONDecoder(buf)
+ var x interface{}
+ err := decoder.Decode(&x)
+ m.Timer(metrics.RegoDataParse).Stop()
+ if err != nil {
+ return nil, errors.Wrap(err, path)
+ }
+ return x, nil
+}
+
+func loadYAML(path string, bs []byte, m metrics.Metrics) (interface{}, error) {
+ m.Timer(metrics.RegoDataParse).Start()
+ bs, err := yaml.YAMLToJSON(bs)
+ m.Timer(metrics.RegoDataParse).Stop()
+ if err != nil {
+ return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err)
+ }
+ return loadJSON(path, bs, m)
+}
+
+func makeDir(path []string, x interface{}) (map[string]interface{}, bool) {
+ if len(path) == 0 {
+ obj, ok := x.(map[string]interface{})
+ if !ok {
+ return nil, false
+ }
+ return obj, true
+ }
+ return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x})
+}
diff --git a/vendor/github.com/open-policy-agent/opa/metrics/metrics.go b/vendor/github.com/open-policy-agent/opa/metrics/metrics.go
new file mode 100644
index 00000000..1cf7d4df
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/metrics/metrics.go
@@ -0,0 +1,288 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package metrics contains helpers for performance metric management inside the policy engine.
+package metrics
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ go_metrics "github.com/rcrowley/go-metrics"
+)
+
+// Well-known metric names.
+const (
+ ServerHandler = "server_handler"
+ ServerQueryCacheHit = "server_query_cache_hit"
+ RegoQueryCompile = "rego_query_compile"
+ RegoQueryEval = "rego_query_eval"
+ RegoQueryParse = "rego_query_parse"
+ RegoModuleParse = "rego_module_parse"
+ RegoDataParse = "rego_data_parse"
+ RegoModuleCompile = "rego_module_compile"
+ RegoPartialEval = "rego_partial_eval"
+ RegoInputParse = "rego_input_parse"
+ RegoLoadFiles = "rego_load_files"
+ RegoLoadBundles = "rego_load_bundles"
+ RegoExternalResolve = "rego_external_resolve"
+)
+
+// Info contains attributes describing the underlying metrics provider.
+type Info struct {
+ Name string `json:"name"` // name is a unique human-readable identifier for the provider.
+}
+
+// Metrics defines the interface for a collection of performance metrics in the
+// policy engine.
+type Metrics interface {
+ Info() Info
+ Timer(name string) Timer
+ Histogram(name string) Histogram
+ Counter(name string) Counter
+ All() map[string]interface{}
+ Clear()
+ json.Marshaler
+}
+
+type metrics struct {
+ mtx sync.Mutex
+ timers map[string]Timer
+ histograms map[string]Histogram
+ counters map[string]Counter
+}
+
+// New returns a new Metrics object.
+func New() Metrics {
+ m := &metrics{}
+ m.Clear()
+ return m
+}
+
+type metric struct {
+ Key string
+ Value interface{}
+}
+
+func (m *metrics) Info() Info {
+ return Info{
+ Name: "",
+ }
+}
+
+func (m *metrics) String() string {
+
+ all := m.All()
+ sorted := make([]metric, 0, len(all))
+
+ for key, value := range all {
+ sorted = append(sorted, metric{
+ Key: key,
+ Value: value,
+ })
+ }
+
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Key < sorted[j].Key
+ })
+
+ buf := make([]string, len(sorted))
+ for i := range sorted {
+ buf[i] = fmt.Sprintf("%v:%v", sorted[i].Key, sorted[i].Value)
+ }
+
+ return strings.Join(buf, " ")
+}
+
+func (m *metrics) MarshalJSON() ([]byte, error) {
+ return json.Marshal(m.All())
+}
+
+func (m *metrics) Timer(name string) Timer {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ t, ok := m.timers[name]
+ if !ok {
+ t = &timer{}
+ m.timers[name] = t
+ }
+ return t
+}
+
+func (m *metrics) Histogram(name string) Histogram {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ h, ok := m.histograms[name]
+ if !ok {
+ h = newHistogram()
+ m.histograms[name] = h
+ }
+ return h
+}
+
+func (m *metrics) Counter(name string) Counter {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ c, ok := m.counters[name]
+ if !ok {
+ zero := counter{}
+ c = &zero
+ m.counters[name] = c
+ }
+ return c
+}
+
+func (m *metrics) All() map[string]interface{} {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ result := map[string]interface{}{}
+ for name, timer := range m.timers {
+ result[m.formatKey(name, timer)] = timer.Value()
+ }
+ for name, hist := range m.histograms {
+ result[m.formatKey(name, hist)] = hist.Value()
+ }
+ for name, cntr := range m.counters {
+ result[m.formatKey(name, cntr)] = cntr.Value()
+ }
+ return result
+}
+
+func (m *metrics) Clear() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ m.timers = map[string]Timer{}
+ m.histograms = map[string]Histogram{}
+ m.counters = map[string]Counter{}
+}
+
+func (m *metrics) formatKey(name string, metrics interface{}) string {
+ switch metrics.(type) {
+ case Timer:
+ return "timer_" + name + "_ns"
+ case Histogram:
+ return "histogram_" + name
+ case Counter:
+ return "counter_" + name
+ default:
+ return name
+ }
+}
+
+// Timer defines the interface for a restartable timer that accumulates elapsed
+// time.
+type Timer interface {
+ Value() interface{}
+ Int64() int64
+ Start()
+ Stop() int64
+}
+
+type timer struct {
+ mtx sync.Mutex
+ start time.Time
+ value int64
+}
+
+func (t *timer) Start() {
+ t.mtx.Lock()
+ defer t.mtx.Unlock()
+ t.start = time.Now()
+}
+
+func (t *timer) Stop() int64 {
+ t.mtx.Lock()
+ defer t.mtx.Unlock()
+ delta := time.Now().Sub(t.start).Nanoseconds()
+ t.value += delta
+ return delta
+}
+
+func (t *timer) Value() interface{} {
+ return t.Int64()
+}
+
+func (t *timer) Int64() int64 {
+ t.mtx.Lock()
+ defer t.mtx.Unlock()
+ return t.value
+}
+
+// Histogram defines the interface for a histogram with hardcoded percentiles.
+type Histogram interface {
+ Value() interface{}
+ Update(int64)
+}
+
+type histogram struct {
+ hist go_metrics.Histogram // is thread-safe because of the underlying ExpDecaySample
+}
+
+func newHistogram() Histogram {
+ // NOTE(tsandall): the reservoir size and alpha factor are taken from
+ // https://github.com/rcrowley/go-metrics. They may need to be tweaked in
+ // the future.
+ sample := go_metrics.NewExpDecaySample(1028, 0.015)
+ hist := go_metrics.NewHistogram(sample)
+ return &histogram{hist}
+}
+
+func (h *histogram) Update(v int64) {
+ h.hist.Update(v)
+}
+
+func (h *histogram) Value() interface{} {
+ values := map[string]interface{}{}
+ snap := h.hist.Snapshot()
+ percentiles := snap.Percentiles([]float64{
+ 0.5,
+ 0.75,
+ 0.9,
+ 0.95,
+ 0.99,
+ 0.999,
+ 0.9999,
+ })
+ values["count"] = snap.Count()
+ values["min"] = snap.Min()
+ values["max"] = snap.Max()
+ values["mean"] = snap.Mean()
+ values["stddev"] = snap.StdDev()
+ values["median"] = percentiles[0]
+ values["75%"] = percentiles[1]
+ values["90%"] = percentiles[2]
+ values["95%"] = percentiles[3]
+ values["99%"] = percentiles[4]
+ values["99.9%"] = percentiles[5]
+ values["99.99%"] = percentiles[6]
+ return values
+}
+
+// Counter defines the interface for a monotonic increasing counter.
+type Counter interface {
+ Value() interface{}
+ Incr()
+ Add(n uint64)
+}
+
+type counter struct {
+ c uint64
+}
+
+func (c *counter) Incr() {
+ atomic.AddUint64(&c.c, 1)
+}
+
+func (c *counter) Add(n uint64) {
+ atomic.AddUint64(&c.c, n)
+}
+
+func (c *counter) Value() interface{} {
+ return atomic.LoadUint64(&c.c)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/rego/rego.go b/vendor/github.com/open-policy-agent/opa/rego/rego.go
new file mode 100644
index 00000000..27a42606
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/rego/rego.go
@@ -0,0 +1,2270 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package rego exposes high level APIs for evaluating Rego policies.
+package rego
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/bundle"
+ bundleUtils "github.com/open-policy-agent/opa/internal/bundle"
+ "github.com/open-policy-agent/opa/internal/compiler/wasm"
+ "github.com/open-policy-agent/opa/internal/ir"
+ "github.com/open-policy-agent/opa/internal/planner"
+ "github.com/open-policy-agent/opa/internal/wasm/encoding"
+ "github.com/open-policy-agent/opa/loader"
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/resolver"
+ "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/storage/inmem"
+ "github.com/open-policy-agent/opa/topdown"
+ "github.com/open-policy-agent/opa/topdown/cache"
+ "github.com/open-policy-agent/opa/types"
+ "github.com/open-policy-agent/opa/util"
+)
+
+const defaultPartialNamespace = "partial"
+
+// CompileResult represents the result of compiling a Rego query, zero or more
+// Rego modules, and arbitrary contextual data into an executable.
+type CompileResult struct {
+ Bytes []byte `json:"bytes"`
+}
+
+// PartialQueries contains the queries and support modules produced by partial
+// evaluation.
+type PartialQueries struct {
+ Queries []ast.Body `json:"queries,omitempty"`
+ Support []*ast.Module `json:"modules,omitempty"`
+}
+
+// PartialResult represents the result of partial evaluation. The result can be
+// used to generate a new query that can be run when inputs are known.
+type PartialResult struct {
+ compiler *ast.Compiler
+ store storage.Store
+ body ast.Body
+ builtinDecls map[string]*ast.Builtin
+ builtinFuncs map[string]*topdown.Builtin
+}
+
+// Rego returns an object that can be evaluated to produce a query result.
+func (pr PartialResult) Rego(options ...func(*Rego)) *Rego {
+ options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body))
+ r := New(options...)
+
+ // Propagate any custom builtins.
+ for k, v := range pr.builtinDecls {
+ r.builtinDecls[k] = v
+ }
+ for k, v := range pr.builtinFuncs {
+ r.builtinFuncs[k] = v
+ }
+ return r
+}
+
+// preparedQuery is a wrapper around a Rego object which has pre-processed
+// state stored on it. Once prepared there are a more limited number of actions
+// that can be taken with it. It will, however, be able to evaluate faster since
+// it will not have to re-parse or compile as much.
+type preparedQuery struct {
+ r *Rego
+ cfg *PrepareConfig
+}
+
+// EvalContext defines the set of options allowed to be set at evaluation
+// time. Any other options will need to be set on a new Rego object.
+type EvalContext struct {
+ hasInput bool
+ time time.Time
+ rawInput *interface{}
+ parsedInput ast.Value
+ metrics metrics.Metrics
+ txn storage.Transaction
+ instrument bool
+ instrumentation *topdown.Instrumentation
+ partialNamespace string
+ queryTracers []topdown.QueryTracer
+ compiledQuery compiledQuery
+ unknowns []string
+ disableInlining []ast.Ref
+ parsedUnknowns []*ast.Term
+ indexing bool
+ interQueryBuiltinCache cache.InterQueryCache
+ resolvers []refResolver
+}
+
+// EvalOption defines a function to set an option on an EvalConfig
+type EvalOption func(*EvalContext)
+
+// EvalInput configures the input for a Prepared Query's evaluation
+func EvalInput(input interface{}) EvalOption {
+ return func(e *EvalContext) {
+ e.rawInput = &input
+ e.hasInput = true
+ }
+}
+
+// EvalParsedInput configures the input for a Prepared Query's evaluation
+func EvalParsedInput(input ast.Value) EvalOption {
+ return func(e *EvalContext) {
+ e.parsedInput = input
+ e.hasInput = true
+ }
+}
+
+// EvalMetrics configures the metrics for a Prepared Query's evaluation
+func EvalMetrics(metric metrics.Metrics) EvalOption {
+ return func(e *EvalContext) {
+ e.metrics = metric
+ }
+}
+
+// EvalTransaction configures the Transaction for a Prepared Query's evaluation
+func EvalTransaction(txn storage.Transaction) EvalOption {
+ return func(e *EvalContext) {
+ e.txn = txn
+ }
+}
+
+// EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation
+func EvalInstrument(instrument bool) EvalOption {
+ return func(e *EvalContext) {
+ e.instrument = instrument
+ }
+}
+
+// EvalTracer configures a tracer for a Prepared Query's evaluation
+// Deprecated: Use EvalQueryTracer instead.
+func EvalTracer(tracer topdown.Tracer) EvalOption {
+ return func(e *EvalContext) {
+ if tracer != nil {
+ e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer))
+ }
+ }
+}
+
+// EvalQueryTracer configures a tracer for a Prepared Query's evaluation
+func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption {
+ return func(e *EvalContext) {
+ if tracer != nil {
+ e.queryTracers = append(e.queryTracers, tracer)
+ }
+ }
+}
+
+// EvalPartialNamespace returns an argument that sets the namespace to use for
+// partial evaluation results. The namespace must be a valid package path
+// component.
+func EvalPartialNamespace(ns string) EvalOption {
+ return func(e *EvalContext) {
+ e.partialNamespace = ns
+ }
+}
+
+// EvalUnknowns returns an argument that sets the values to treat as
+// unknown during partial evaluation.
+func EvalUnknowns(unknowns []string) EvalOption {
+ return func(e *EvalContext) {
+ e.unknowns = unknowns
+ }
+}
+
+// EvalDisableInlining returns an argument that adds a set of paths to exclude from
+// partial evaluation inlining.
+func EvalDisableInlining(paths []ast.Ref) EvalOption {
+ return func(e *EvalContext) {
+ e.disableInlining = paths
+ }
+}
+
+// EvalParsedUnknowns returns an argument that sets the values to treat
+// as unknown during partial evaluation.
+func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption {
+ return func(e *EvalContext) {
+ e.parsedUnknowns = unknowns
+ }
+}
+
+// EvalRuleIndexing will disable indexing optimizations for the
+// evaluation. This should only be used when tracing in debug mode.
+func EvalRuleIndexing(enabled bool) EvalOption {
+ return func(e *EvalContext) {
+ e.indexing = enabled
+ }
+}
+
+// EvalTime sets the wall clock time to use during policy evaluation.
+// time.now_ns() calls will return this value.
+func EvalTime(x time.Time) EvalOption {
+ return func(e *EvalContext) {
+ e.time = x
+ }
+}
+
+// EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize
+// during evaluation.
+func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption {
+ return func(e *EvalContext) {
+ e.interQueryBuiltinCache = c
+ }
+}
+
+// EvalResolver sets a Resolver for a specified ref path for this evaluation.
+func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption {
+ return func(e *EvalContext) {
+ e.resolvers = append(e.resolvers, refResolver{ref, r})
+ }
+}
+
+func (pq preparedQuery) Modules() map[string]*ast.Module {
+ mods := make(map[string]*ast.Module)
+
+ for name, mod := range pq.r.parsedModules {
+ mods[name] = mod
+ }
+
+ for path, b := range pq.r.bundles {
+ for name, mod := range b.ParsedModules(path) {
+ mods[name] = mod
+ }
+ }
+
+ return mods
+}
+
+// newEvalContext creates a new EvalContext overlaying any EvalOptions over top
+// the Rego object on the preparedQuery. The returned function should be called
+// once the evaluation is complete to close any transactions that might have
+// been opened.
+func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) {
+ ectx := &EvalContext{
+ hasInput: false,
+ rawInput: nil,
+ parsedInput: nil,
+ metrics: nil,
+ txn: nil,
+ instrument: false,
+ instrumentation: nil,
+ partialNamespace: pq.r.partialNamespace,
+ queryTracers: nil,
+ unknowns: pq.r.unknowns,
+ parsedUnknowns: pq.r.parsedUnknowns,
+ compiledQuery: compiledQuery{},
+ indexing: true,
+ resolvers: pq.r.resolvers,
+ }
+
+ for _, o := range options {
+ o(ectx)
+ }
+
+ if ectx.metrics == nil {
+ ectx.metrics = metrics.New()
+ }
+
+ if ectx.instrument {
+ ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics)
+ }
+
+ // Default to an empty "finish" function
+ finishFunc := func(context.Context) {}
+
+ var err error
+ ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining)
+ if err != nil {
+ return nil, finishFunc, err
+ }
+
+ if ectx.txn == nil {
+ ectx.txn, err = pq.r.store.NewTransaction(ctx)
+ if err != nil {
+ return nil, finishFunc, err
+ }
+ finishFunc = func(ctx context.Context) {
+ pq.r.store.Abort(ctx, ectx.txn)
+ }
+ }
+
+ // If we didn't get an input specified in the Eval options
+ // then fall back to the Rego object's input fields.
+ if !ectx.hasInput {
+ ectx.rawInput = pq.r.rawInput
+ ectx.parsedInput = pq.r.parsedInput
+ }
+
+ if ectx.parsedInput == nil {
+ if ectx.rawInput == nil {
+ // Fall back to the original Rego objects input if none was specified
+ // Note that it could still be nil
+ ectx.rawInput = pq.r.rawInput
+ }
+ ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics)
+ if err != nil {
+ return nil, finishFunc, err
+ }
+ }
+
+ return ectx, finishFunc, nil
+}
+
+// PreparedEvalQuery holds the prepared Rego state that has been pre-processed
+// for subsequent evaluations.
+type PreparedEvalQuery struct {
+ preparedQuery
+}
+
+// Eval evaluates this PartialResult's Rego object with additional eval options
+// and returns a ResultSet.
+// If options are provided they will override the original Rego options respective value.
+// The original Rego object transaction will *not* be re-used. A new transaction will be opened
+// if one is not provided with an EvalOption.
+func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) {
+ ectx, finish, err := pq.newEvalContext(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ defer finish(ctx)
+
+ ectx.compiledQuery = pq.r.compiledQueries[evalQueryType]
+
+ return pq.r.eval(ctx, ectx)
+}
+
+// PreparedPartialQuery holds the prepared Rego state that has been pre-processed
+// for partial evaluations.
+type PreparedPartialQuery struct {
+ preparedQuery
+}
+
+// Partial runs partial evaluation on the prepared query and returns the result.
+// The original Rego object transaction will *not* be re-used. A new transaction will be opened
+// if one is not provided with an EvalOption.
+func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) {
+ ectx, finish, err := pq.newEvalContext(ctx, options)
+ if err != nil {
+ return nil, err
+ }
+ defer finish(ctx)
+
+ ectx.compiledQuery = pq.r.compiledQueries[partialQueryType]
+
+ return pq.r.partial(ctx, ectx)
+}
+
+// Result defines the output of Rego evaluation.
+type Result struct {
+ Expressions []*ExpressionValue `json:"expressions"`
+ Bindings Vars `json:"bindings,omitempty"`
+}
+
+func newResult() Result {
+ return Result{
+ Bindings: Vars{},
+ }
+}
+
+// Location defines a position in a Rego query or module.
+type Location struct {
+ Row int `json:"row"`
+ Col int `json:"col"`
+}
+
+// ExpressionValue defines the value of an expression in a Rego query.
+type ExpressionValue struct {
+ Value interface{} `json:"value"`
+ Text string `json:"text"`
+ Location *Location `json:"location"`
+}
+
+func newExpressionValue(expr *ast.Expr, value interface{}) *ExpressionValue {
+ result := &ExpressionValue{
+ Value: value,
+ }
+ if expr.Location != nil {
+ result.Text = string(expr.Location.Text)
+ result.Location = &Location{
+ Row: expr.Location.Row,
+ Col: expr.Location.Col,
+ }
+ }
+ return result
+}
+
+func (ev *ExpressionValue) String() string {
+ return fmt.Sprint(ev.Value)
+}
+
+// ResultSet represents a collection of output from Rego evaluation. An empty
+// result set represents an undefined query.
+type ResultSet []Result
+
+// Vars represents a collection of variable bindings. The keys are the variable
+// names and the values are the binding values.
+type Vars map[string]interface{}
+
+// WithoutWildcards returns a copy of v with wildcard variables removed.
+func (v Vars) WithoutWildcards() Vars {
+ n := Vars{}
+ for k, v := range v {
+ if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() {
+ continue
+ }
+ n[k] = v
+ }
+ return n
+}
+
+// Errors represents a collection of errors returned when evaluating Rego.
+type Errors []error
+
+func (errs Errors) Error() string {
+ if len(errs) == 0 {
+ return "no error"
+ }
+ if len(errs) == 1 {
+ return fmt.Sprintf("1 error occurred: %v", errs[0].Error())
+ }
+ buf := []string{fmt.Sprintf("%v errors occurred", len(errs))}
+ for _, err := range errs {
+ buf = append(buf, err.Error())
+ }
+ return strings.Join(buf, "\n")
+}
+
+var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective")
+
+// IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by
+// this package to indicate that partial evaluation was ineffective.
+func IsPartialEvaluationNotEffectiveErr(err error) bool {
+ errs, ok := err.(Errors)
+ if !ok {
+ return false
+ }
+ return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective
+}
+
+type compiledQuery struct {
+ query ast.Body
+ compiler ast.QueryCompiler
+}
+
+type queryType int
+
+// Define a query type for each of the top level Rego
+// API's that compile queries differently.
+const (
+ evalQueryType queryType = iota
+ partialResultQueryType queryType = iota
+ partialQueryType queryType = iota
+ compileQueryType queryType = iota
+)
+
+type loadPaths struct {
+ paths []string
+ filter loader.Filter
+}
+
+// Rego constructs a query and can be evaluated to obtain results.
+type Rego struct {
+ query string
+ parsedQuery ast.Body
+ compiledQueries map[queryType]compiledQuery
+ pkg string
+ parsedPackage *ast.Package
+ imports []string
+ parsedImports []*ast.Import
+ rawInput *interface{}
+ parsedInput ast.Value
+ unknowns []string
+ parsedUnknowns []*ast.Term
+ disableInlining []string
+ shallowInlining bool
+ skipPartialNamespace bool
+ partialNamespace string
+ modules []rawModule
+ parsedModules map[string]*ast.Module
+ compiler *ast.Compiler
+ store storage.Store
+ ownStore bool
+ txn storage.Transaction
+ metrics metrics.Metrics
+ queryTracers []topdown.QueryTracer
+ tracebuf *topdown.BufferTracer
+ trace bool
+ instrumentation *topdown.Instrumentation
+ instrument bool
+ capture map[*ast.Expr]ast.Var // map exprs to generated capture vars
+ termVarID int
+ dump io.Writer
+ runtime *ast.Term
+ time time.Time
+ builtinDecls map[string]*ast.Builtin
+ builtinFuncs map[string]*topdown.Builtin
+ unsafeBuiltins map[string]struct{}
+ loadPaths loadPaths
+ bundlePaths []string
+ bundles map[string]*bundle.Bundle
+ skipBundleVerification bool
+ interQueryBuiltinCache cache.InterQueryCache
+ strictBuiltinErrors bool
+ resolvers []refResolver
+}
+
+// Function represents a built-in function that is callable in Rego.
+type Function struct {
+ Name string
+ Decl *types.Function
+ Memoize bool
+}
+
+// BuiltinContext contains additional attributes from the evaluator that
+// built-in functions can use, e.g., the request context.Context, caches, etc.
+type BuiltinContext = topdown.BuiltinContext
+
+type (
+ // Builtin1 defines a built-in function that accepts 1 argument.
+ Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error)
+
+ // Builtin2 defines a built-in function that accepts 2 arguments.
+ Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error)
+
+ // Builtin3 defines a built-in function that accepts 3 argument.
+ Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error)
+
+ // Builtin4 defines a built-in function that accepts 4 argument.
+ Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error)
+
+ // BuiltinDyn defines a built-in function that accepts a list of arguments.
+ BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error)
+)
+
+// RegisterBuiltin1 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin1(decl *Function, impl Builtin1) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltin2 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin2(decl *Function, impl Builtin2) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltin3 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin3(decl *Function, impl Builtin3) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltin4 adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltin4(decl *Function, impl Builtin4) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime.
+func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) {
+ ast.RegisterBuiltin(&ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ })
+ topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function1 returns an option that adds a built-in function to the Rego object.
+func Function1(decl *Function, f Builtin1) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function2 returns an option that adds a built-in function to the Rego object.
+func Function2(decl *Function, f Builtin2) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function3 returns an option that adds a built-in function to the Rego object.
+func Function3(decl *Function, f Builtin3) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// Function4 returns an option that adds a built-in function to the Rego object.
+func Function4(decl *Function, f Builtin4) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// FunctionDyn returns an option that adds a built-in function to the Rego object.
+func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) {
+ return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) })
+ return finishFunction(decl.Name, bctx, result, err, iter)
+ })
+}
+
+// FunctionDecl returns an option that adds a custom-built-in function
+// __declaration__. NO implementation is provided. This is used for
+// non-interpreter execution envs (e.g., Wasm).
+func FunctionDecl(decl *Function) func(*Rego) {
+ return newDecl(decl)
+}
+
+func newDecl(decl *Function) func(*Rego) {
+ return func(r *Rego) {
+ r.builtinDecls[decl.Name] = &ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ }
+ }
+}
+
+type memo struct {
+ term *ast.Term
+ err error
+}
+
+type memokey string
+
+func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) {
+
+ if !decl.Memoize {
+ return ifEmpty()
+ }
+
+ // NOTE(tsandall): we assume memoization is applied to infrequent built-in
+ // calls that do things like fetch data from remote locations. As such,
+ // converting the terms to strings is acceptable for now.
+ var b strings.Builder
+ if _, err := b.WriteString(decl.Name); err != nil {
+ return nil, err
+ }
+
+ // The term slice _may_ include an output term depending on how the caller
+ // referred to the built-in function. Only use the arguments as the cache
+ // key. Unification ensures we don't get false positive matches.
+ for i := 0; i < len(decl.Decl.Args()); i++ {
+ if _, err := b.WriteString(terms[i].String()); err != nil {
+ return nil, err
+ }
+ }
+
+ key := memokey(b.String())
+ hit, ok := bctx.Cache.Get(key)
+ var m memo
+ if ok {
+ m = hit.(memo)
+ } else {
+ m.term, m.err = ifEmpty()
+ bctx.Cache.Put(key, m)
+ }
+
+ return m.term, m.err
+}
+
+// Dump returns an argument that sets the writer to dump debugging information to.
+func Dump(w io.Writer) func(r *Rego) {
+ return func(r *Rego) {
+ r.dump = w
+ }
+}
+
+// Query returns an argument that sets the Rego query.
+func Query(q string) func(r *Rego) {
+ return func(r *Rego) {
+ r.query = q
+ }
+}
+
+// ParsedQuery returns an argument that sets the Rego query.
+func ParsedQuery(q ast.Body) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedQuery = q
+ }
+}
+
+// Package returns an argument that sets the Rego package on the query's
+// context.
+func Package(p string) func(r *Rego) {
+ return func(r *Rego) {
+ r.pkg = p
+ }
+}
+
+// ParsedPackage returns an argument that sets the Rego package on the query's
+// context.
+func ParsedPackage(pkg *ast.Package) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedPackage = pkg
+ }
+}
+
+// Imports returns an argument that adds a Rego import to the query's context.
+func Imports(p []string) func(r *Rego) {
+ return func(r *Rego) {
+ r.imports = append(r.imports, p...)
+ }
+}
+
+// ParsedImports returns an argument that adds Rego imports to the query's
+// context.
+func ParsedImports(imp []*ast.Import) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedImports = append(r.parsedImports, imp...)
+ }
+}
+
+// Input returns an argument that sets the Rego input document. Input should be
+// a native Go value representing the input document.
+func Input(x interface{}) func(r *Rego) {
+ return func(r *Rego) {
+ r.rawInput = &x
+ }
+}
+
+// ParsedInput returns an argument that sets the Rego input document.
+func ParsedInput(x ast.Value) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedInput = x
+ }
+}
+
+// Unknowns returns an argument that sets the values to treat as unknown during
+// partial evaluation.
+func Unknowns(unknowns []string) func(r *Rego) {
+ return func(r *Rego) {
+ r.unknowns = unknowns
+ }
+}
+
+// ParsedUnknowns returns an argument that sets the values to treat as unknown
+// during partial evaluation.
+func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) {
+ return func(r *Rego) {
+ r.parsedUnknowns = unknowns
+ }
+}
+
+// DisableInlining adds a set of paths to exclude from partial evaluation inlining.
+func DisableInlining(paths []string) func(r *Rego) {
+ return func(r *Rego) {
+ r.disableInlining = paths
+ }
+}
+
+// ShallowInlining prevents rules that depend on unknown values from being inlined.
+// Rules that only depend on known values are inlined.
+func ShallowInlining(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.shallowInlining = yes
+ }
+}
+
+// SkipPartialNamespace disables namespacing of partial evalution results for support
+// rules generated from policy. Synthetic support rules are still namespaced.
+func SkipPartialNamespace(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.skipPartialNamespace = true
+ }
+}
+
+// PartialNamespace returns an argument that sets the namespace to use for
+// partial evaluation results. The namespace must be a valid package path
+// component.
+func PartialNamespace(ns string) func(r *Rego) {
+ return func(r *Rego) {
+ r.partialNamespace = ns
+ }
+}
+
+// Module returns an argument that adds a Rego module.
+func Module(filename, input string) func(r *Rego) {
+ return func(r *Rego) {
+ r.modules = append(r.modules, rawModule{
+ filename: filename,
+ module: input,
+ })
+ }
+}
+
+// ParsedModule returns an argument that adds a parsed Rego module. If a string
+// module with the same filename name is added, it will override the parsed
+// module.
+func ParsedModule(module *ast.Module) func(*Rego) {
+ return func(r *Rego) {
+ var filename string
+ if module.Package.Location != nil {
+ filename = module.Package.Location.File
+ } else {
+ filename = fmt.Sprintf("module_%p.rego", module)
+ }
+ r.parsedModules[filename] = module
+ }
+}
+
+// Load returns an argument that adds a filesystem path to load data
+// and Rego modules from. Any file with a *.rego, *.yaml, or *.json
+// extension will be loaded. The path can be either a directory or file,
+// directories are loaded recursively. The optional ignore string patterns
+// can be used to filter which files are used.
+// The Load option can only be used once.
+// Note: Loading files will require a write transaction on the store.
+func Load(paths []string, filter loader.Filter) func(r *Rego) {
+ return func(r *Rego) {
+ r.loadPaths = loadPaths{paths, filter}
+ }
+}
+
+// LoadBundle returns an argument that adds a filesystem path to load
+// a bundle from. The path can be a compressed bundle file or a directory
+// to be loaded as a bundle.
+// Note: Loading bundles will require a write transaction on the store.
+func LoadBundle(path string) func(r *Rego) {
+ return func(r *Rego) {
+ r.bundlePaths = append(r.bundlePaths, path)
+ }
+}
+
+// ParsedBundle returns an argument that adds a bundle to be loaded.
+func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) {
+ return func(r *Rego) {
+ r.bundles[name] = b
+ }
+}
+
+// Compiler returns an argument that sets the Rego compiler.
+func Compiler(c *ast.Compiler) func(r *Rego) {
+ return func(r *Rego) {
+ r.compiler = c
+ }
+}
+
+// Store returns an argument that sets the policy engine's data storage layer.
+//
+// If using the Load, LoadBundle, or ParsedBundle options then a transaction
+// must also be provided via the Transaction() option. After loading files
+// or bundles the transaction should be aborted or committed.
+func Store(s storage.Store) func(r *Rego) {
+ return func(r *Rego) {
+ r.store = s
+ }
+}
+
+// Transaction returns an argument that sets the transaction to use for storage
+// layer operations.
+//
+// Requires the store associated with the transaction to be provided via the
+// Store() option. If using Load(), LoadBundle(), or ParsedBundle() options
+// the transaction will likely require write params.
+func Transaction(txn storage.Transaction) func(r *Rego) {
+ return func(r *Rego) {
+ r.txn = txn
+ }
+}
+
+// Metrics returns an argument that sets the metrics collection.
+func Metrics(m metrics.Metrics) func(r *Rego) {
+ return func(r *Rego) {
+ r.metrics = m
+ }
+}
+
+// Instrument returns an argument that enables instrumentation for diagnosing
+// performance issues.
+func Instrument(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.instrument = yes
+ }
+}
+
+// Trace returns an argument that enables tracing on r.
+func Trace(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.trace = yes
+ }
+}
+
+// Tracer returns an argument that adds a query tracer to r.
+// Deprecated: Use QueryTracer instead.
+func Tracer(t topdown.Tracer) func(r *Rego) {
+ return func(r *Rego) {
+ if t != nil {
+ r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t))
+ }
+ }
+}
+
+// QueryTracer returns an argument that adds a query tracer to r.
+func QueryTracer(t topdown.QueryTracer) func(r *Rego) {
+ return func(r *Rego) {
+ if t != nil {
+ r.queryTracers = append(r.queryTracers, t)
+ }
+ }
+}
+
+// Runtime returns an argument that sets the runtime data to provide to the
+// evaluation engine.
+func Runtime(term *ast.Term) func(r *Rego) {
+ return func(r *Rego) {
+ r.runtime = term
+ }
+}
+
+// Time sets the wall clock time to use during policy evaluation. Prepared queries
+// do not inherit this parameter. Use EvalTime to set the wall clock time when
+// executing a prepared query.
+func Time(x time.Time) func(r *Rego) {
+ return func(r *Rego) {
+ r.time = x
+ }
+}
+
+// PrintTrace is a helper function to write a human-readable version of the
+// trace to the writer w.
+func PrintTrace(w io.Writer, r *Rego) {
+ if r == nil || r.tracebuf == nil {
+ return
+ }
+ topdown.PrettyTrace(w, *r.tracebuf)
+}
+
+// PrintTraceWithLocation is a helper function to write a human-readable version of the
+// trace to the writer w.
+func PrintTraceWithLocation(w io.Writer, r *Rego) {
+ if r == nil || r.tracebuf == nil {
+ return
+ }
+ topdown.PrettyTraceWithLocation(w, *r.tracebuf)
+}
+
+// UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow.
+// This option is ignored for module compilation if the caller supplies the
+// compiler. This option is always honored for query compilation. Provide an
+// empty (non-nil) map to disable checks on queries.
+func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) {
+ return func(r *Rego) {
+ r.unsafeBuiltins = unsafeBuiltins
+ }
+}
+
+// SkipBundleVerification skips verification of a signed bundle.
+func SkipBundleVerification(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.skipBundleVerification = yes
+ }
+}
+
+// InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize
+// during evaluation.
+func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) {
+ return func(r *Rego) {
+ r.interQueryBuiltinCache = c
+ }
+}
+
+// StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors.
+func StrictBuiltinErrors(yes bool) func(r *Rego) {
+ return func(r *Rego) {
+ r.strictBuiltinErrors = yes
+ }
+}
+
+// Resolver sets a Resolver for a specified ref path.
+func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) {
+ return func(rego *Rego) {
+ rego.resolvers = append(rego.resolvers, refResolver{ref, r})
+ }
+}
+
+// New returns a new Rego object.
+func New(options ...func(r *Rego)) *Rego {
+
+ r := &Rego{
+ parsedModules: map[string]*ast.Module{},
+ capture: map[*ast.Expr]ast.Var{},
+ compiledQueries: map[queryType]compiledQuery{},
+ builtinDecls: map[string]*ast.Builtin{},
+ builtinFuncs: map[string]*topdown.Builtin{},
+ bundles: map[string]*bundle.Bundle{},
+ }
+
+ for _, option := range options {
+ option(r)
+ }
+
+ if r.compiler == nil {
+ r.compiler = ast.NewCompiler().
+ WithUnsafeBuiltins(r.unsafeBuiltins).
+ WithBuiltins(r.builtinDecls)
+ }
+
+ if r.store == nil {
+ r.store = inmem.New()
+ r.ownStore = true
+ } else {
+ r.ownStore = false
+ }
+
+ if r.metrics == nil {
+ r.metrics = metrics.New()
+ }
+
+ if r.instrument {
+ r.instrumentation = topdown.NewInstrumentation(r.metrics)
+ r.compiler.WithMetrics(r.metrics)
+ }
+
+ if r.trace {
+ r.tracebuf = topdown.NewBufferTracer()
+ r.queryTracers = append(r.queryTracers, r.tracebuf)
+ }
+
+ if r.partialNamespace == "" {
+ r.partialNamespace = defaultPartialNamespace
+ }
+
+ return r
+}
+
+// Eval evaluates this Rego object and returns a ResultSet.
+func (r *Rego) Eval(ctx context.Context) (ResultSet, error) {
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ pq, err := r.PrepareForEval(ctx)
+ if err != nil {
+ txnClose(ctx, err) // Ignore error
+ return nil, err
+ }
+
+ evalArgs := []EvalOption{
+ EvalTransaction(r.txn),
+ EvalMetrics(r.metrics),
+ EvalInstrument(r.instrument),
+ EvalTime(r.time),
+ EvalInterQueryBuiltinCache(r.interQueryBuiltinCache),
+ }
+
+ for _, qt := range r.queryTracers {
+ evalArgs = append(evalArgs, EvalQueryTracer(qt))
+ }
+
+ for i := range r.resolvers {
+ evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r))
+ }
+
+ rs, err := pq.Eval(ctx, evalArgs...)
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err == nil {
+ err = txnErr
+ }
+ return rs, err
+}
+
+// PartialEval has been deprecated and renamed to PartialResult.
+func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) {
+ return r.PartialResult(ctx)
+}
+
+// PartialResult partially evaluates this Rego object and returns a PartialResult.
+func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) {
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ pq, err := r.PrepareForEval(ctx, WithPartialEval())
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err != nil {
+ return PartialResult{}, err
+ }
+ if txnErr != nil {
+ return PartialResult{}, txnErr
+ }
+
+ pr := PartialResult{
+ compiler: pq.r.compiler,
+ store: pq.r.store,
+ body: pq.r.parsedQuery,
+ builtinDecls: pq.r.builtinDecls,
+ builtinFuncs: pq.r.builtinFuncs,
+ }
+
+ return pr, nil
+}
+
+// Partial runs partial evaluation on r and returns the result.
+func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) {
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ pq, err := r.PrepareForPartial(ctx)
+ if err != nil {
+ txnClose(ctx, err) // Ignore error
+ return nil, err
+ }
+
+ evalArgs := []EvalOption{
+ EvalTransaction(r.txn),
+ EvalMetrics(r.metrics),
+ EvalInstrument(r.instrument),
+ EvalInterQueryBuiltinCache(r.interQueryBuiltinCache),
+ }
+
+ for _, t := range r.queryTracers {
+ evalArgs = append(evalArgs, EvalQueryTracer(t))
+ }
+
+ for i := range r.resolvers {
+ evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r))
+ }
+
+ pqs, err := pq.Partial(ctx, evalArgs...)
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err == nil {
+ err = txnErr
+ }
+ return pqs, err
+}
+
+// CompileOption defines a function to set options on Compile calls.
+type CompileOption func(*CompileContext)
+
+// CompileContext contains options for Compile calls.
+type CompileContext struct {
+ partial bool
+}
+
+// CompilePartial defines an option to control whether partial evaluation is run
+// before the query is planned and compiled.
+func CompilePartial(yes bool) CompileOption {
+ return func(cfg *CompileContext) {
+ cfg.partial = yes
+ }
+}
+
+// Compile returns a compiled policy query.
+func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) {
+
+ var cfg CompileContext
+
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+
+ var queries []ast.Body
+ var modules []*ast.Module
+
+ if cfg.partial {
+
+ pq, err := r.Partial(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if r.dump != nil {
+ if len(pq.Queries) != 0 {
+ msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries))
+ fmt.Fprintln(r.dump, msg)
+ fmt.Fprintln(r.dump, strings.Repeat("-", len(msg)))
+ for i := range pq.Queries {
+ fmt.Println(pq.Queries[i])
+ }
+ fmt.Fprintln(r.dump)
+ }
+ if len(pq.Support) != 0 {
+ msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support))
+ fmt.Fprintln(r.dump, msg)
+ fmt.Fprintln(r.dump, strings.Repeat("-", len(msg)))
+ for i := range pq.Support {
+ fmt.Println(pq.Support[i])
+ }
+ fmt.Fprintln(r.dump)
+ }
+ }
+
+ queries = pq.Queries
+ modules = pq.Support
+
+ for _, module := range r.compiler.Modules {
+ modules = append(modules, module)
+ }
+ } else {
+ var err error
+ // If creating a new transacation it should be closed before calling the
+ // planner to avoid holding open the transaction longer than needed.
+ //
+ // TODO(tsandall): in future, planner could make use of store, in which
+ // case this will need to change.
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = r.prepare(ctx, compileQueryType, nil)
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err != nil {
+ return nil, err
+ }
+ if txnErr != nil {
+ return nil, err
+ }
+
+ for _, module := range r.compiler.Modules {
+ modules = append(modules, module)
+ }
+
+ queries = []ast.Body{r.compiledQueries[compileQueryType].query}
+ }
+
+ decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap))
+
+ for k, v := range ast.BuiltinMap {
+ decls[k] = v
+ }
+
+ for k, v := range r.builtinDecls {
+ decls[k] = v
+ }
+
+ const queryName = "eval" // NOTE(tsandall): the query name is arbitrary
+
+ policy, err := planner.New().
+ WithQueries([]planner.QuerySet{
+ {
+ Name: queryName,
+ Queries: queries,
+ RewrittenVars: r.compiledQueries[compileQueryType].compiler.RewrittenVars(),
+ },
+ }).
+ WithModules(modules).
+ WithBuiltinDecls(decls).
+ Plan()
+ if err != nil {
+ return nil, err
+ }
+
+ if r.dump != nil {
+ fmt.Fprintln(r.dump, "PLAN:")
+ fmt.Fprintln(r.dump, "-----")
+ ir.Pretty(r.dump, policy)
+ fmt.Fprintln(r.dump)
+ }
+
+ m, err := wasm.New().WithPolicy(policy).Compile()
+ if err != nil {
+ return nil, err
+ }
+
+ var out bytes.Buffer
+
+ if err := encoding.WriteModule(&out, m); err != nil {
+ return nil, err
+ }
+
+ result := &CompileResult{
+ Bytes: out.Bytes(),
+ }
+
+ return result, nil
+}
+
+// PrepareOption defines a function to set an option to control
+// the behavior of the Prepare call.
+type PrepareOption func(*PrepareConfig)
+
+// PrepareConfig holds settings to control the behavior of the
+// Prepare call.
+type PrepareConfig struct {
+ doPartialEval bool
+ disableInlining *[]string
+}
+
+// WithPartialEval configures an option for PrepareForEval
+// which will have it perform partial evaluation while preparing
+// the query (similar to rego.Rego#PartialResult)
+func WithPartialEval() PrepareOption {
+ return func(p *PrepareConfig) {
+ p.doPartialEval = true
+ }
+}
+
+// WithNoInline adds a set of paths to exclude from partial evaluation inlining.
+func WithNoInline(paths []string) PrepareOption {
+ return func(p *PrepareConfig) {
+ p.disableInlining = &paths
+ }
+}
+
+// PrepareForEval will parse inputs, modules, and query arguments in preparation
+// of evaluating them.
+func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) {
+ if !r.hasQuery() {
+ return PreparedEvalQuery{}, fmt.Errorf("cannot evaluate empty query")
+ }
+
+ pCfg := &PrepareConfig{}
+ for _, o := range opts {
+ o(pCfg)
+ }
+
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return PreparedEvalQuery{}, err
+ }
+
+ // If the caller wanted to do partial evaluation as part of preparation
+ // do it now and use the new Rego object.
+ if pCfg.doPartialEval {
+
+ pr, err := r.partialResult(ctx, pCfg)
+ if err != nil {
+ txnClose(ctx, err) // Ignore error
+ return PreparedEvalQuery{}, err
+ }
+
+ // Prepare the new query using the result of partial evaluation
+ pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx)
+ txnErr := txnClose(ctx, err)
+ if err != nil {
+ return pq, err
+ }
+ return pq, txnErr
+ }
+
+ err = r.prepare(ctx, evalQueryType, []extraStage{
+ {
+ after: "ResolveRefs",
+ stage: ast.QueryCompilerStageDefinition{
+ Name: "RewriteToCaptureValue",
+ MetricName: "query_compile_stage_rewrite_to_capture_value",
+ Stage: r.rewriteQueryToCaptureValue,
+ },
+ },
+ })
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err != nil {
+ return PreparedEvalQuery{}, err
+ }
+ if txnErr != nil {
+ return PreparedEvalQuery{}, txnErr
+ }
+
+ return PreparedEvalQuery{preparedQuery{r, pCfg}}, err
+}
+
+// PrepareForPartial will parse inputs, modules, and query arguments in preparation
+// of partially evaluating them.
+func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) {
+ if !r.hasQuery() {
+ return PreparedPartialQuery{}, fmt.Errorf("cannot evaluate empty query")
+ }
+
+ pCfg := &PrepareConfig{}
+ for _, o := range opts {
+ o(pCfg)
+ }
+
+ var err error
+ var txnClose transactionCloser
+ r.txn, txnClose, err = r.getTxn(ctx)
+ if err != nil {
+ return PreparedPartialQuery{}, err
+ }
+
+ err = r.prepare(ctx, partialQueryType, []extraStage{
+ {
+ after: "CheckSafety",
+ stage: ast.QueryCompilerStageDefinition{
+ Name: "RewriteEquals",
+ MetricName: "query_compile_stage_rewrite_equals",
+ Stage: r.rewriteEqualsForPartialQueryCompile,
+ },
+ },
+ })
+ txnErr := txnClose(ctx, err) // Always call closer
+ if err != nil {
+ return PreparedPartialQuery{}, err
+ }
+ if txnErr != nil {
+ return PreparedPartialQuery{}, txnErr
+ }
+ return PreparedPartialQuery{preparedQuery{r, pCfg}}, err
+}
+
+func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error {
+ var err error
+
+ r.parsedInput, err = r.parseInput()
+ if err != nil {
+ return err
+ }
+
+ err = r.loadFiles(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ err = r.loadBundles(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ err = r.parseModules(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ // Compile the modules *before* the query, else functions
+ // defined in the module won't be found...
+ err = r.compileModules(ctx, r.txn, r.metrics)
+ if err != nil {
+ return err
+ }
+
+ r.parsedQuery, err = r.parseQuery(r.metrics)
+ if err != nil {
+ return err
+ }
+
+ err = r.compileAndCacheQuery(qType, r.parsedQuery, r.metrics, extras)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
+ if len(r.modules) == 0 {
+ return nil
+ }
+
+ m.Timer(metrics.RegoModuleParse).Start()
+ defer m.Timer(metrics.RegoModuleParse).Stop()
+ var errs Errors
+
+ // Parse any modules in the are saved to the store, but only if
+ // another compile step is going to occur (ie. we have parsed modules
+ // that need to be compiled).
+ ids, err := r.store.ListPolicies(ctx, txn)
+ if err != nil {
+ return err
+ }
+
+ for _, id := range ids {
+ // if it is already on the compiler we're using
+ // then don't bother to re-parse it from source
+ if _, haveMod := r.compiler.Modules[id]; haveMod {
+ continue
+ }
+
+ bs, err := r.store.GetPolicy(ctx, txn, id)
+ if err != nil {
+ return err
+ }
+
+ parsed, err := ast.ParseModule(id, string(bs))
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ r.parsedModules[id] = parsed
+ }
+
+ // Parse any passed in as arguments to the Rego object
+ for _, module := range r.modules {
+ p, err := module.Parse()
+ if err != nil {
+ errs = append(errs, err)
+ }
+ r.parsedModules[module.filename] = p
+ }
+
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return nil
+}
+
+func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
+ if len(r.loadPaths.paths) == 0 {
+ return nil
+ }
+
+ m.Timer(metrics.RegoLoadFiles).Start()
+ defer m.Timer(metrics.RegoLoadFiles).Stop()
+
+ result, err := loader.NewFileLoader().WithMetrics(m).Filtered(r.loadPaths.paths, r.loadPaths.filter)
+ if err != nil {
+ return err
+ }
+ for name, mod := range result.Modules {
+ r.parsedModules[name] = mod.Parsed
+ }
+
+ if len(result.Documents) > 0 {
+ err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *Rego) loadBundles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
+ if len(r.bundlePaths) == 0 {
+ return nil
+ }
+
+ m.Timer(metrics.RegoLoadBundles).Start()
+ defer m.Timer(metrics.RegoLoadBundles).Stop()
+
+ for _, path := range r.bundlePaths {
+ bndl, err := loader.NewFileLoader().WithMetrics(m).WithSkipBundleVerification(r.skipBundleVerification).AsBundle(path)
+ if err != nil {
+ return fmt.Errorf("loading error: %s", err)
+ }
+ r.bundles[path] = bndl
+ }
+ return nil
+}
+
+func (r *Rego) parseInput() (ast.Value, error) {
+ if r.parsedInput != nil {
+ return r.parsedInput, nil
+ }
+ return r.parseRawInput(r.rawInput, r.metrics)
+}
+
+func (r *Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Value, error) {
+ var input ast.Value
+
+ if rawInput == nil {
+ return input, nil
+ }
+
+ m.Timer(metrics.RegoInputParse).Start()
+ defer m.Timer(metrics.RegoInputParse).Stop()
+
+ rawPtr := util.Reference(rawInput)
+
+ // roundtrip through json: this turns slices (e.g. []string, []bool) into
+ // []interface{}, the only array type ast.InterfaceToValue can work with
+ if err := util.RoundTrip(rawPtr); err != nil {
+ return nil, err
+ }
+
+ return ast.InterfaceToValue(*rawPtr)
+}
+
+func (r *Rego) parseQuery(m metrics.Metrics) (ast.Body, error) {
+ if r.parsedQuery != nil {
+ return r.parsedQuery, nil
+ }
+
+ m.Timer(metrics.RegoQueryParse).Start()
+ defer m.Timer(metrics.RegoQueryParse).Stop()
+
+ return ast.ParseBody(r.query)
+}
+
+func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error {
+
+ // Only compile again if there are new modules.
+ if len(r.bundles) > 0 || len(r.parsedModules) > 0 {
+
+ // The bundle.Activate call will activate any bundles passed in
+ // (ie compile + handle data store changes), and include any of
+ // the additional modules passed in. If no bundles are provided
+ // it will only compile the passed in modules.
+ // Use this as the single-point of compiling everything only a
+ // single time.
+ opts := &bundle.ActivateOpts{
+ Ctx: ctx,
+ Store: r.store,
+ Txn: txn,
+ Compiler: r.compilerForTxn(ctx, r.store, txn),
+ Metrics: m,
+ Bundles: r.bundles,
+ ExtraModules: r.parsedModules,
+ }
+ err := bundle.Activate(opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided.
+ if len(r.resolvers) == 0 {
+ resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles)
+ if err != nil {
+ return err
+ }
+ for _, rslvr := range resolvers {
+ for _, ep := range rslvr.Entrypoints() {
+ r.resolvers = append(r.resolvers, refResolver{ep, rslvr})
+ }
+ }
+ }
+ return nil
+}
+
+func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, m metrics.Metrics, extras []extraStage) error {
+ m.Timer(metrics.RegoQueryCompile).Start()
+ defer m.Timer(metrics.RegoQueryCompile).Stop()
+
+ cachedQuery, ok := r.compiledQueries[qType]
+ if ok && cachedQuery.query != nil && cachedQuery.compiler != nil {
+ return nil
+ }
+
+ qc, compiled, err := r.compileQuery(query, m, extras)
+ if err != nil {
+ return err
+ }
+
+ // cache the query for future use
+ r.compiledQueries[qType] = compiledQuery{
+ query: compiled,
+ compiler: qc,
+ }
+ return nil
+}
+
+func (r *Rego) compileQuery(query ast.Body, m metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) {
+ var pkg *ast.Package
+
+ if r.pkg != "" {
+ var err error
+ pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg))
+ if err != nil {
+ return nil, nil, err
+ }
+ } else {
+ pkg = r.parsedPackage
+ }
+
+ imports := r.parsedImports
+
+ if len(r.imports) > 0 {
+ s := make([]string, len(r.imports))
+ for i := range r.imports {
+ s[i] = fmt.Sprintf("import %v", r.imports[i])
+ }
+ parsed, err := ast.ParseImports(strings.Join(s, "\n"))
+ if err != nil {
+ return nil, nil, err
+ }
+ imports = append(imports, parsed...)
+ }
+
+ qctx := ast.NewQueryContext().
+ WithPackage(pkg).
+ WithImports(imports)
+
+ qc := r.compiler.QueryCompiler().
+ WithContext(qctx).
+ WithUnsafeBuiltins(r.unsafeBuiltins)
+
+ for _, extra := range extras {
+ qc = qc.WithStageAfter(extra.after, extra.stage)
+ }
+
+ compiled, err := qc.Compile(query)
+
+ return qc, compiled, err
+
+}
+
+func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) {
+
+ q := topdown.NewQuery(ectx.compiledQuery.query).
+ WithQueryCompiler(ectx.compiledQuery.compiler).
+ WithCompiler(r.compiler).
+ WithStore(r.store).
+ WithTransaction(ectx.txn).
+ WithBuiltins(r.builtinFuncs).
+ WithMetrics(ectx.metrics).
+ WithInstrumentation(ectx.instrumentation).
+ WithRuntime(r.runtime).
+ WithIndexing(ectx.indexing).
+ WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache).
+ WithStrictBuiltinErrors(r.strictBuiltinErrors)
+
+ if !ectx.time.IsZero() {
+ q = q.WithTime(ectx.time)
+ }
+
+ for i := range ectx.queryTracers {
+ q = q.WithQueryTracer(ectx.queryTracers[i])
+ }
+
+ if ectx.parsedInput != nil {
+ q = q.WithInput(ast.NewTerm(ectx.parsedInput))
+ }
+
+ for i := range ectx.resolvers {
+ q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r)
+ }
+
+ // Cancel query if context is cancelled or deadline is reached.
+ c := topdown.NewCancel()
+ q = q.WithCancel(c)
+ exit := make(chan struct{})
+ defer close(exit)
+ go waitForDone(ctx, exit, func() {
+ c.Cancel()
+ })
+
+ rewritten := ectx.compiledQuery.compiler.RewrittenVars()
+ var rs ResultSet
+ err := q.Iter(ctx, func(qr topdown.QueryResult) error {
+ result := newResult()
+ for k := range qr {
+ v, err := ast.JSON(qr[k].Value)
+ if err != nil {
+ return err
+ }
+ if rw, ok := rewritten[k]; ok {
+ k = rw
+ }
+ if isTermVar(k) || k.IsGenerated() || k.IsWildcard() {
+ continue
+ }
+ result.Bindings[string(k)] = v
+ }
+ for _, expr := range ectx.compiledQuery.query {
+ if expr.Generated {
+ continue
+ }
+ if k, ok := r.capture[expr]; ok {
+ v, err := ast.JSON(qr[k].Value)
+ if err != nil {
+ return err
+ }
+ result.Expressions = append(result.Expressions, newExpressionValue(expr, v))
+ } else {
+ result.Expressions = append(result.Expressions, newExpressionValue(expr, true))
+ }
+ }
+ rs = append(rs, result)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rs) == 0 {
+ return nil, nil
+ }
+
+ return rs, nil
+}
+
+func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) {
+
+ err := r.prepare(ctx, partialResultQueryType, []extraStage{
+ {
+ after: "ResolveRefs",
+ stage: ast.QueryCompilerStageDefinition{
+ Name: "RewriteForPartialEval",
+ MetricName: "query_compile_stage_rewrite_for_partial_eval",
+ Stage: r.rewriteQueryForPartialEval,
+ },
+ },
+ })
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ ectx := &EvalContext{
+ parsedInput: r.parsedInput,
+ metrics: r.metrics,
+ txn: r.txn,
+ partialNamespace: r.partialNamespace,
+ queryTracers: r.queryTracers,
+ compiledQuery: r.compiledQueries[partialResultQueryType],
+ instrumentation: r.instrumentation,
+ indexing: true,
+ resolvers: r.resolvers,
+ }
+
+ disableInlining := r.disableInlining
+
+ if pCfg.disableInlining != nil {
+ disableInlining = *pCfg.disableInlining
+ }
+
+ ectx.disableInlining, err = parseStringsToRefs(disableInlining)
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ pq, err := r.partial(ctx, ectx)
+ if err != nil {
+ return PartialResult{}, err
+ }
+
+ // Construct module for queries.
+ id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace)
+
+ module, err := ast.ParseModule(id, "package "+ectx.partialNamespace)
+ if err != nil {
+ return PartialResult{}, fmt.Errorf("bad partial namespace")
+ }
+
+ module.Rules = make([]*ast.Rule, len(pq.Queries))
+ for i, body := range pq.Queries {
+ rule := &ast.Rule{
+ Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard),
+ Body: body,
+ Module: module,
+ }
+ module.Rules[i] = rule
+ if checkPartialResultForRecursiveRefs(body, rule.Path()) {
+ return PartialResult{}, Errors{errPartialEvaluationNotEffective}
+ }
+ }
+
+ // Update compiler with partial evaluation output.
+ r.compiler.Modules[id] = module
+ for i, module := range pq.Support {
+ r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module
+ }
+
+ r.metrics.Timer(metrics.RegoModuleCompile).Start()
+ r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules)
+ r.metrics.Timer(metrics.RegoModuleCompile).Stop()
+
+ if r.compiler.Failed() {
+ return PartialResult{}, r.compiler.Errors
+ }
+
+ result := PartialResult{
+ compiler: r.compiler,
+ store: r.store,
+ body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)),
+ builtinDecls: r.builtinDecls,
+ builtinFuncs: r.builtinFuncs,
+ }
+
+ return result, nil
+}
+
+func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) {
+
+ var unknowns []*ast.Term
+
+ if ectx.parsedUnknowns != nil {
+ unknowns = ectx.parsedUnknowns
+ } else if ectx.unknowns != nil {
+ unknowns = make([]*ast.Term, len(ectx.unknowns))
+ for i := range ectx.unknowns {
+ var err error
+ unknowns[i], err = ast.ParseTerm(ectx.unknowns[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else {
+ // Use input document as unknown if caller has not specified any.
+ unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)}
+ }
+
+ q := topdown.NewQuery(ectx.compiledQuery.query).
+ WithQueryCompiler(ectx.compiledQuery.compiler).
+ WithCompiler(r.compiler).
+ WithStore(r.store).
+ WithTransaction(ectx.txn).
+ WithBuiltins(r.builtinFuncs).
+ WithMetrics(ectx.metrics).
+ WithInstrumentation(ectx.instrumentation).
+ WithUnknowns(unknowns).
+ WithDisableInlining(ectx.disableInlining).
+ WithRuntime(r.runtime).
+ WithIndexing(ectx.indexing).
+ WithPartialNamespace(ectx.partialNamespace).
+ WithSkipPartialNamespace(r.skipPartialNamespace).
+ WithShallowInlining(r.shallowInlining).
+ WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache).
+ WithStrictBuiltinErrors(r.strictBuiltinErrors)
+
+ if !ectx.time.IsZero() {
+ q = q.WithTime(ectx.time)
+ }
+
+ for i := range ectx.queryTracers {
+ q = q.WithQueryTracer(ectx.queryTracers[i])
+ }
+
+ if ectx.parsedInput != nil {
+ q = q.WithInput(ast.NewTerm(ectx.parsedInput))
+ }
+
+ for i := range ectx.resolvers {
+ q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r)
+ }
+
+ // Cancel query if context is cancelled or deadline is reached.
+ c := topdown.NewCancel()
+ q = q.WithCancel(c)
+ exit := make(chan struct{})
+ defer close(exit)
+ go waitForDone(ctx, exit, func() {
+ c.Cancel()
+ })
+
+ queries, support, err := q.PartialRun(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ pq := &PartialQueries{
+ Queries: queries,
+ Support: support,
+ }
+
+ return pq, nil
+}
+
+func (r *Rego) rewriteQueryToCaptureValue(qc ast.QueryCompiler, query ast.Body) (ast.Body, error) {
+
+ checkCapture := iteration(query) || len(query) > 1
+
+ for _, expr := range query {
+
+ if expr.Negated {
+ continue
+ }
+
+ if expr.IsAssignment() || expr.IsEquality() {
+ continue
+ }
+
+ var capture *ast.Term
+
+ // If the expression can be evaluated as a function, rewrite it to
+ // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but
+ // plus(1,2,x) does not get rewritten.
+ switch terms := expr.Terms.(type) {
+ case *ast.Term:
+ capture = r.generateTermVar()
+ expr.Terms = ast.Equality.Expr(terms, capture).Terms
+ r.capture[expr] = capture.Value.(ast.Var)
+ case []*ast.Term:
+ if r.compiler.GetArity(expr.Operator()) == len(terms)-1 {
+ capture = r.generateTermVar()
+ expr.Terms = append(terms, capture)
+ r.capture[expr] = capture.Value.(ast.Var)
+ }
+ }
+
+ if capture != nil && checkCapture {
+ cpy := expr.Copy()
+ cpy.Terms = capture
+ cpy.Generated = true
+ cpy.With = nil
+ query.Append(cpy)
+ }
+ }
+
+ return query, nil
+}
+
+func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
+ if len(query) != 1 {
+ return nil, fmt.Errorf("partial evaluation requires single ref (not multiple expressions)")
+ }
+
+ term, ok := query[0].Terms.(*ast.Term)
+ if !ok {
+ return nil, fmt.Errorf("partial evaluation requires ref (not expression)")
+ }
+
+ ref, ok := term.Value.(ast.Ref)
+ if !ok {
+ return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.TypeName(term.Value))
+ }
+
+ if !ref.IsGround() {
+ return nil, fmt.Errorf("partial evaluation requires ground ref")
+ }
+
+ return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil
+}
+
+// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally
+// this wouldn't be done, except for handling queries with the `Partial` API
+// where rewriting them can substantially simplify the result, and it is unlikely
+// that the caller would need expression values.
+func (r *Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) {
+ doubleEq := ast.Equal.Ref()
+ unifyOp := ast.Equality.Ref()
+ ast.WalkExprs(query, func(x *ast.Expr) bool {
+ if x.IsCall() {
+ operator := x.Operator()
+ if operator.Equal(doubleEq) && len(x.Operands()) == 2 {
+ x.SetOperator(ast.NewTerm(unifyOp))
+ }
+ }
+ return false
+ })
+ return query, nil
+}
+
+func (r *Rego) generateTermVar() *ast.Term {
+ r.termVarID++
+ return ast.VarTerm(ast.WildcardPrefix + fmt.Sprintf("term%v", r.termVarID))
+}
+
+func (r Rego) hasQuery() bool {
+ return len(r.query) != 0 || len(r.parsedQuery) != 0
+}
+
+type transactionCloser func(ctx context.Context, err error) error
+
+// getTxn will conditionally create a read or write transaction suitable for
+// the configured Rego object. The returned function should be used to close the txn
+// regardless of status.
+func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) {
+
+ noopCloser := func(ctx context.Context, err error) error {
+ return nil // no-op default
+ }
+
+ if r.txn != nil {
+ // Externally provided txn
+ return r.txn, noopCloser, nil
+ }
+
+ // Create a new transaction..
+ params := storage.TransactionParams{}
+
+ // Bundles and data paths may require writing data files or manifests to storage
+ if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 {
+
+ // If we were given a store we will *not* write to it, only do that on one
+ // which was created automatically on behalf of the user.
+ if !r.ownStore {
+ return nil, noopCloser, errors.New("unable to start write transaction when store was provided")
+ }
+
+ params.Write = true
+ }
+
+ txn, err := r.store.NewTransaction(ctx, params)
+ if err != nil {
+ return nil, noopCloser, err
+ }
+
+ // Setup a closer function that will abort or commit as needed.
+ closer := func(ctx context.Context, txnErr error) error {
+ var err error
+
+ if txnErr == nil && params.Write {
+ err = r.store.Commit(ctx, txn)
+ } else {
+ r.store.Abort(ctx, txn)
+ }
+
+ // Clear the auto created transaction now that it is closed.
+ r.txn = nil
+
+ return err
+ }
+
+ return txn, closer, nil
+}
+
+func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler {
+ // Update the compiler to have a valid path conflict check
+ // for the current context and transaction.
+ return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn))
+}
+
+func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool {
+ var stop bool
+ ast.WalkRefs(body, func(x ast.Ref) bool {
+ if !stop {
+ if path.HasPrefix(x) {
+ stop = true
+ }
+ }
+ return stop
+ })
+ return stop
+}
+
+func isTermVar(v ast.Var) bool {
+ return strings.HasPrefix(string(v), ast.WildcardPrefix+"term")
+}
+
+func waitForDone(ctx context.Context, exit chan struct{}, f func()) {
+ select {
+ case <-exit:
+ return
+ case <-ctx.Done():
+ f()
+ return
+ }
+}
+
+type rawModule struct {
+ filename string
+ module string
+}
+
+func (m rawModule) Parse() (*ast.Module, error) {
+ return ast.ParseModule(m.filename, m.module)
+}
+
+type extraStage struct {
+ after string
+ stage ast.QueryCompilerStageDefinition
+}
+
+type refResolver struct {
+ ref ast.Ref
+ r resolver.Resolver
+}
+
+func iteration(x interface{}) bool {
+
+ var stopped bool
+
+ vis := ast.NewGenericVisitor(func(x interface{}) bool {
+ switch x := x.(type) {
+ case *ast.Term:
+ if ast.IsComprehension(x.Value) {
+ return true
+ }
+ case ast.Ref:
+ if !stopped {
+ if bi := ast.BuiltinMap[x.String()]; bi != nil {
+ if bi.Relation {
+ stopped = true
+ return stopped
+ }
+ }
+ for i := 1; i < len(x); i++ {
+ if _, ok := x[i].Value.(ast.Var); ok {
+ stopped = true
+ return stopped
+ }
+ }
+ }
+ return stopped
+ }
+ return stopped
+ })
+
+ vis.Walk(x)
+
+ return stopped
+}
+
+func parseStringsToRefs(s []string) ([]ast.Ref, error) {
+
+ refs := make([]ast.Ref, len(s))
+ for i := range refs {
+ var err error
+ refs[i], err = ast.ParseRef(s[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return refs, nil
+}
+
+// helper function to finish a built-in function call. If an error occured,
+// wrap the error and return it. Otherwise, invoke the iterator if the result
+// was defined.
+func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error {
+ if err != nil {
+ return &topdown.Error{
+ Code: topdown.BuiltinErr,
+ Message: fmt.Sprintf("%v: %v", name, err.Error()),
+ Location: bctx.Location,
+ }
+ }
+ if result == nil {
+ return nil
+ }
+ return iter(result)
+}
+
+// helper function to return an option that sets a custom built-in function.
+func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) {
+ return func(r *Rego) {
+ r.builtinDecls[decl.Name] = &ast.Builtin{
+ Name: decl.Name,
+ Decl: decl.Decl,
+ }
+ r.builtinFuncs[decl.Name] = &topdown.Builtin{
+ Decl: r.builtinDecls[decl.Name],
+ Func: f,
+ }
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/resolver/interface.go b/vendor/github.com/open-policy-agent/opa/resolver/interface.go
new file mode 100644
index 00000000..fc02329f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/resolver/interface.go
@@ -0,0 +1,29 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package resolver
+
+import (
+ "context"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/metrics"
+)
+
+// Resolver defines an external value resolver for OPA evaluations.
+type Resolver interface {
+ Eval(context.Context, Input) (Result, error)
+}
+
+// Input as provided to a Resolver instance when evaluating.
+type Input struct {
+ Ref ast.Ref
+ Input *ast.Term
+ Metrics metrics.Metrics
+}
+
+// Result of resolving a ref.
+type Result struct {
+ Value ast.Value
+}
diff --git a/vendor/github.com/open-policy-agent/opa/resolver/wasm/nop.go b/vendor/github.com/open-policy-agent/opa/resolver/wasm/nop.go
new file mode 100644
index 00000000..8da686be
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/resolver/wasm/nop.go
@@ -0,0 +1,55 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// +build !opa_wasm
+
+package wasm
+
+import (
+ "context"
+ "errors"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/resolver"
+)
+
+// Resolver is a stub implementation of a resolver.Resolver.
+type Resolver struct {
+}
+
+// Entrypoints unimplemented.
+func (r *Resolver) Entrypoints() []ast.Ref {
+ panic("unreachable")
+}
+
+// Close unimplemented.
+func (r *Resolver) Close() {
+ panic("unreachable")
+}
+
+// Eval unimplemented.
+func (r *Resolver) Eval(ctx context.Context, input resolver.Input) (resolver.Result, error) {
+
+ panic("unreachable")
+}
+
+// SetData unimplemented.
+func (r *Resolver) SetData(data interface{}) error {
+ panic("unreachable")
+}
+
+// SetDataPath unimplemented.
+func (r *Resolver) SetDataPath(path []string, data interface{}) error {
+ panic("unreachable")
+}
+
+// RemoveDataPath unimplemented.
+func (r *Resolver) RemoveDataPath(path []string) error {
+ panic("unreachable")
+}
+
+// New unimplemented. Will always return an error.
+func New(entrypoints []ast.Ref, policy []byte, data interface{}) (*Resolver, error) {
+ return nil, errors.New("WebAssembly runtime not supported in this build")
+}
diff --git a/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go b/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go
new file mode 100644
index 00000000..67a22ba1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go
@@ -0,0 +1,172 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// +build opa_wasm
+
+package wasm
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "github.com/open-policy-agent/opa/internal/wasm/sdk/opa"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/resolver"
+)
+
+// New creates a new Resolver instance which is using the Wasm module
+// policy for the given entrypoint ref.
+func New(entrypoints []ast.Ref, policy []byte, data interface{}) (*Resolver, error) {
+ o, err := opa.New().
+ WithPolicyBytes(policy).
+ WithDataJSON(data).
+ Init()
+ if err != nil {
+ return nil, err
+ }
+
+ // Construct a quick lookup table of ref -> entrypoint ID
+ // for handling evaluations. Only the entrypoints provided
+ // by the caller will be constructed, this may be a subset
+ // of entrypoints available in the Wasm module, however
+ // only the configured ones will be used when Eval() is
+ // called.
+ entrypointRefToID := ast.NewValueMap()
+ epIDs, err := o.Entrypoints(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ for path, id := range epIDs {
+ for _, ref := range entrypoints {
+ refPtr, err := ref.Ptr()
+ if err != nil {
+ return nil, err
+ }
+ if refPtr == path {
+ entrypointRefToID.Put(ref, ast.Number(strconv.Itoa(int(id))))
+ }
+ }
+ }
+
+ return &Resolver{
+ entrypoints: entrypoints,
+ entrypointIDs: entrypointRefToID,
+ o: o,
+ }, nil
+}
+
+// Resolver implements the resolver.Resolver interface
+// using Wasm modules to perform an evaluation.
+type Resolver struct {
+ entrypoints []ast.Ref
+ entrypointIDs *ast.ValueMap
+ o *opa.OPA
+}
+
+// Entrypoints returns a list of entrypoints this resolver is configured to
+// perform evaluations on.
+func (r *Resolver) Entrypoints() []ast.Ref {
+ return r.entrypoints
+}
+
+// Close shuts down the resolver.
+func (r *Resolver) Close() {
+ r.o.Close()
+}
+
+// Eval performs an evaluation using the provided input and the Wasm module
+// associated with this Resolver instance.
+func (r *Resolver) Eval(ctx context.Context, input resolver.Input) (resolver.Result, error) {
+ v := r.entrypointIDs.Get(input.Ref)
+ if v == nil {
+ return resolver.Result{}, fmt.Errorf("unknown entrypoint %s", input.Ref)
+ }
+
+ numValue, ok := v.(ast.Number)
+ if !ok {
+ return resolver.Result{}, fmt.Errorf("internal error: invalid entrypoint id %s", numValue)
+ }
+
+ epID, ok := numValue.Int()
+ if !ok {
+ return resolver.Result{}, fmt.Errorf("internal error: invalid entrypoint id %s", numValue)
+ }
+
+ var in *interface{}
+ if input.Input != nil {
+ var str interface{} = []byte(input.Input.String())
+ in = &str
+ }
+
+ opts := opa.EvalOpts{
+ Input: in,
+ Entrypoint: int32(epID),
+ Metrics: input.Metrics,
+ }
+ out, err := r.o.Eval(ctx, opts)
+ if err != nil {
+ return resolver.Result{}, err
+ }
+
+ result, err := getResult(out)
+ if err != nil {
+ return resolver.Result{}, err
+ }
+
+ return resolver.Result{Value: result}, nil
+}
+
+// SetData will update the external data for the Wasm instance.
+func (r *Resolver) SetData(data interface{}) error {
+ return r.o.SetData(data)
+}
+
+// SetDataPath will set the provided data on the wasm instance at the specified path.
+func (r *Resolver) SetDataPath(path []string, data interface{}) error {
+ return r.o.SetDataPath(path, data)
+}
+
+// RemoveDataPath will remove any data at the specified path.
+func (r *Resolver) RemoveDataPath(path []string) error {
+ return r.o.RemoveDataPath(path)
+}
+
+func getResult(evalResult *opa.Result) (ast.Value, error) {
+
+ parsed, err := ast.ParseTerm(string(evalResult.Result))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse wasm result: %s", err)
+ }
+
+ resultSet, ok := parsed.Value.(ast.Set)
+ if !ok {
+ return nil, fmt.Errorf("illegal result type")
+ }
+
+ if resultSet.Len() == 0 {
+ return nil, nil
+ }
+
+ if resultSet.Len() > 1 {
+ return nil, fmt.Errorf("illegal result type")
+ }
+
+ var obj ast.Object
+ err = resultSet.Iter(func(term *ast.Term) error {
+ obj, ok = term.Value.(ast.Object)
+ if !ok || obj.Len() != 1 {
+ return fmt.Errorf("illegal result type")
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ result := obj.Get(ast.StringTerm("result"))
+
+ return result.Value, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/doc.go b/vendor/github.com/open-policy-agent/opa/storage/doc.go
new file mode 100644
index 00000000..6fa2f86d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package storage exposes the policy engine's storage layer.
+package storage
diff --git a/vendor/github.com/open-policy-agent/opa/storage/errors.go b/vendor/github.com/open-policy-agent/opa/storage/errors.go
new file mode 100644
index 00000000..6c877958
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/errors.go
@@ -0,0 +1,135 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "fmt"
+)
+
+const (
+ // InternalErr indicates an unknown, internal error has occurred.
+ InternalErr = "storage_internal_error"
+
+ // NotFoundErr indicates the path used in the storage operation does not
+ // locate a document.
+ NotFoundErr = "storage_not_found_error"
+
+ // WriteConflictErr indicates a write on the path enocuntered a conflicting
+ // value inside the transaction.
+ WriteConflictErr = "storage_write_conflict_error"
+
+ // InvalidPatchErr indicates an invalid patch/write was issued. The patch
+ // was rejected.
+ InvalidPatchErr = "storage_invalid_patch_error"
+
+ // InvalidTransactionErr indicates an invalid operation was performed
+ // inside of the transaction.
+ InvalidTransactionErr = "storage_invalid_txn_error"
+
+ // TriggersNotSupportedErr indicates the caller attempted to register a
+ // trigger against a store that does not support them.
+ TriggersNotSupportedErr = "storage_triggers_not_supported_error"
+
+ // WritesNotSupportedErr indicate the caller attempted to perform a write
+ // against a store that does not support them.
+ WritesNotSupportedErr = "storage_writes_not_supported_error"
+
+ // PolicyNotSupportedErr indicate the caller attempted to perform a policy
+ // management operation against a store that does not support them.
+ PolicyNotSupportedErr = "storage_policy_not_supported_error"
+
+ // IndexingNotSupportedErr indicate the caller attempted to perform an
+ // indexing operation against a store that does not support them.
+ IndexingNotSupportedErr = "storage_indexing_not_supported_error"
+)
+
+// Error is the error type returned by the storage layer.
+type Error struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (err *Error) Error() string {
+ if err.Message != "" {
+ return fmt.Sprintf("%v: %v", err.Code, err.Message)
+ }
+ return string(err.Code)
+}
+
+// IsNotFound returns true if this error is a NotFoundErr.
+func IsNotFound(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == NotFoundErr
+ }
+ return false
+}
+
+// IsWriteConflictError returns true if this error a WriteConflictErr.
+func IsWriteConflictError(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == WriteConflictErr
+ }
+ return false
+}
+
+// IsInvalidPatch returns true if this error is a InvalidPatchErr.
+func IsInvalidPatch(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == InvalidPatchErr
+ }
+ return false
+}
+
+// IsInvalidTransaction returns true if this error is a InvalidTransactionErr.
+func IsInvalidTransaction(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == InvalidTransactionErr
+ }
+ return false
+}
+
+// IsIndexingNotSupported returns true if this error is a IndexingNotSupportedErr.
+func IsIndexingNotSupported(err error) bool {
+ switch err := err.(type) {
+ case *Error:
+ return err.Code == IndexingNotSupportedErr
+ }
+ return false
+}
+
+func writeConflictError(path Path) *Error {
+ return &Error{
+ Code: WriteConflictErr,
+ Message: fmt.Sprint(path),
+ }
+}
+
+func triggersNotSupportedError() *Error {
+ return &Error{
+ Code: TriggersNotSupportedErr,
+ }
+}
+
+func writesNotSupportedError() *Error {
+ return &Error{
+ Code: WritesNotSupportedErr,
+ }
+}
+
+func policyNotSupportedError() *Error {
+ return &Error{
+ Code: PolicyNotSupportedErr,
+ }
+}
+
+func indexingNotSupportedError() *Error {
+ return &Error{
+ Code: IndexingNotSupportedErr,
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go
new file mode 100644
index 00000000..4c91f416
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go
@@ -0,0 +1,269 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package inmem implements an in-memory version of the policy engine's storage
+// layer.
+//
+// The in-memory store is used as the default storage layer implementation. The
+// in-memory store supports multi-reader/single-writer concurrency with
+// rollback.
+//
+// Callers should assume the in-memory store does not make copies of written
+// data. Once data is written to the in-memory store, it should not be modified
+// (outside of calling Store.Write). Furthermore, data read from the in-memory
+// store should be treated as read-only.
+package inmem
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+
+ "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/util"
+)
+
+// New returns an empty in-memory store.
+func New() storage.Store {
+ return &store{
+ data: map[string]interface{}{},
+ triggers: map[*handle]storage.TriggerConfig{},
+ policies: map[string][]byte{},
+ }
+}
+
+// NewFromObject returns a new in-memory store from the supplied data object.
+func NewFromObject(data map[string]interface{}) storage.Store {
+ db := New()
+ ctx := context.Background()
+ txn, err := db.NewTransaction(ctx, storage.WriteParams)
+ if err != nil {
+ panic(err)
+ }
+ if err := db.Write(ctx, txn, storage.AddOp, storage.Path{}, data); err != nil {
+ panic(err)
+ }
+ if err := db.Commit(ctx, txn); err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// NewFromReader returns a new in-memory store from a reader that produces a
+// JSON serialized object. This function is for test purposes.
+func NewFromReader(r io.Reader) storage.Store {
+ d := util.NewJSONDecoder(r)
+ var data map[string]interface{}
+ if err := d.Decode(&data); err != nil {
+ panic(err)
+ }
+ return NewFromObject(data)
+}
+
+type store struct {
+ rmu sync.RWMutex // reader-writer lock
+ wmu sync.Mutex // writer lock
+ xid uint64 // last generated transaction id
+ data map[string]interface{} // raw data
+ policies map[string][]byte // raw policies
+ triggers map[*handle]storage.TriggerConfig // registered triggers
+}
+
+type handle struct {
+ db *store
+}
+
+func (db *store) NewTransaction(ctx context.Context, params ...storage.TransactionParams) (storage.Transaction, error) {
+ var write bool
+ var context *storage.Context
+ if len(params) > 0 {
+ write = params[0].Write
+ context = params[0].Context
+ }
+ xid := atomic.AddUint64(&db.xid, uint64(1))
+ if write {
+ db.wmu.Lock()
+ } else {
+ db.rmu.RLock()
+ }
+ return newTransaction(xid, write, context, db), nil
+}
+
+func (db *store) Commit(ctx context.Context, txn storage.Transaction) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+ if underlying.write {
+ db.rmu.Lock()
+ event := underlying.Commit()
+ db.runOnCommitTriggers(ctx, txn, event)
+ // Mark the transaction stale after executing triggers so they can
+ // perform store operations if needed.
+ underlying.stale = true
+ db.rmu.Unlock()
+ db.wmu.Unlock()
+ } else {
+ db.rmu.RUnlock()
+ }
+ return nil
+}
+
+func (db *store) Abort(ctx context.Context, txn storage.Transaction) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ panic(err)
+ }
+ underlying.stale = true
+ if underlying.write {
+ db.wmu.Unlock()
+ } else {
+ db.rmu.RUnlock()
+ }
+}
+
+func (db *store) ListPolicies(_ context.Context, txn storage.Transaction) ([]string, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+ return underlying.ListPolicies(), nil
+}
+
+func (db *store) GetPolicy(_ context.Context, txn storage.Transaction, id string) ([]byte, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+ return underlying.GetPolicy(id)
+}
+
+func (db *store) UpsertPolicy(_ context.Context, txn storage.Transaction, id string, bs []byte) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+ return underlying.UpsertPolicy(id, bs)
+}
+
+func (db *store) DeletePolicy(_ context.Context, txn storage.Transaction, id string) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+ if _, err := underlying.GetPolicy(id); err != nil {
+ return err
+ }
+ return underlying.DeletePolicy(id)
+}
+
+func (db *store) Register(ctx context.Context, txn storage.Transaction, config storage.TriggerConfig) (storage.TriggerHandle, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+ if !underlying.write {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "triggers must be registered with a write transaction",
+ }
+ }
+ h := &handle{db}
+ db.triggers[h] = config
+ return h, nil
+}
+
+func (db *store) Read(ctx context.Context, txn storage.Transaction, path storage.Path) (interface{}, error) {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return nil, err
+ }
+ return underlying.Read(path)
+}
+
+func (db *store) Write(ctx context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value interface{}) error {
+ underlying, err := db.underlying(txn)
+ if err != nil {
+ return err
+ }
+ val := util.Reference(value)
+ if err := util.RoundTrip(val); err != nil {
+ return err
+ }
+ return underlying.Write(op, path, *val)
+}
+
+func (h *handle) Unregister(ctx context.Context, txn storage.Transaction) {
+ underlying, err := h.db.underlying(txn)
+ if err != nil {
+ panic(err)
+ }
+ if !underlying.write {
+ panic(&storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "triggers must be unregistered with a write transaction",
+ })
+ }
+ delete(h.db.triggers, h)
+}
+
+func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) {
+ for _, t := range db.triggers {
+ t.OnCommit(ctx, txn, event)
+ }
+}
+
+func (db *store) underlying(txn storage.Transaction) (*transaction, error) {
+ underlying, ok := txn.(*transaction)
+ if !ok {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: fmt.Sprintf("unexpected transaction type %T", txn),
+ }
+ }
+ if underlying.db != db {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "unknown transaction",
+ }
+ }
+ if underlying.stale {
+ return nil, &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "stale transaction",
+ }
+ }
+ return underlying, nil
+}
+
+var doesNotExistMsg = "document does not exist"
+var rootMustBeObjectMsg = "root must be object"
+var rootCannotBeRemovedMsg = "root cannot be removed"
+var outOfRangeMsg = "array index out of range"
+var arrayIndexTypeMsg = "array index must be integer"
+
+func invalidPatchError(f string, a ...interface{}) *storage.Error {
+ return &storage.Error{
+ Code: storage.InvalidPatchErr,
+ Message: fmt.Sprintf(f, a...),
+ }
+}
+
+func notFoundError(path storage.Path) *storage.Error {
+ return notFoundErrorHint(path, doesNotExistMsg)
+}
+
+func notFoundErrorHint(path storage.Path, hint string) *storage.Error {
+ return notFoundErrorf("%v: %v", path.String(), hint)
+}
+
+func notFoundErrorf(f string, a ...interface{}) *storage.Error {
+ msg := fmt.Sprintf(f, a...)
+ return &storage.Error{
+ Code: storage.NotFoundErr,
+ Message: msg,
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go b/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go
new file mode 100644
index 00000000..f97de6b0
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go
@@ -0,0 +1,425 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package inmem
+
+import (
+ "container/list"
+ "encoding/json"
+ "strconv"
+
+ "github.com/open-policy-agent/opa/internal/deepcopy"
+ "github.com/open-policy-agent/opa/storage"
+)
+
+// transaction implements the low-level read/write operations on the in-memory
+// store and contains the state required for pending transactions.
+//
+// For write transactions, the struct contains a logical set of updates
+// performed by write operations in the transaction. Each write operation
+// compacts the set such that two updates never overlap:
+//
+// - If new update path is a prefix of existing update path, existing update is
+// removed, new update is added.
+//
+// - If existing update path is a prefix of new update path, existing update is
+// modified.
+//
+// - Otherwise, new update is added.
+//
+// Read transactions do not require any special handling and simply passthrough
+// to the underlying store. Read transactions do not support upgrade.
+type transaction struct {
+ xid uint64
+ write bool
+ stale bool
+ db *store
+ updates *list.List
+ policies map[string]policyUpdate
+ context *storage.Context
+}
+
+type policyUpdate struct {
+ value []byte
+ remove bool
+}
+
+func newTransaction(xid uint64, write bool, context *storage.Context, db *store) *transaction {
+ return &transaction{
+ xid: xid,
+ write: write,
+ db: db,
+ policies: map[string]policyUpdate{},
+ updates: list.New(),
+ context: context,
+ }
+}
+
+func (txn *transaction) ID() uint64 {
+ return txn.xid
+}
+
+func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value interface{}) error {
+
+ if !txn.write {
+ return &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "data write during read transaction",
+ }
+ }
+
+ if len(path) == 0 {
+ return txn.updateRoot(op, value)
+ }
+
+ for curr := txn.updates.Front(); curr != nil; {
+ update := curr.Value.(*update)
+
+ // Check if new update masks existing update exactly. In this case, the
+ // existing update can be removed and no other updates have to be
+ // visited (because no two updates overlap.)
+ if update.path.Equal(path) {
+ if update.remove {
+ if op != storage.AddOp {
+ return notFoundError(path)
+ }
+ }
+ txn.updates.Remove(curr)
+ break
+ }
+
+ // Check if new update masks existing update. In this case, the
+ // existing update has to be removed but other updates may overlap, so
+ // we must continue.
+ if update.path.HasPrefix(path) {
+ remove := curr
+ curr = curr.Next()
+ txn.updates.Remove(remove)
+ continue
+ }
+
+ // Check if new update modifies existing update. In this case, the
+ // existing update is mutated.
+ if path.HasPrefix(update.path) {
+ if update.remove {
+ return notFoundError(path)
+ }
+ suffix := path[len(update.path):]
+ newUpdate, err := newUpdate(update.value, op, suffix, 0, value)
+ if err != nil {
+ return err
+ }
+ update.value = newUpdate.Apply(update.value)
+ return nil
+ }
+
+ curr = curr.Next()
+ }
+
+ update, err := newUpdate(txn.db.data, op, path, 0, value)
+ if err != nil {
+ return err
+ }
+
+ txn.updates.PushFront(update)
+ return nil
+}
+
+func (txn *transaction) updateRoot(op storage.PatchOp, value interface{}) error {
+ if op == storage.RemoveOp {
+ return invalidPatchError(rootCannotBeRemovedMsg)
+ }
+ if _, ok := value.(map[string]interface{}); !ok {
+ return invalidPatchError(rootMustBeObjectMsg)
+ }
+ txn.updates.Init()
+ txn.updates.PushFront(&update{
+ path: storage.Path{},
+ remove: false,
+ value: value,
+ })
+ return nil
+}
+
+func (txn *transaction) Commit() (result storage.TriggerEvent) {
+ result.Context = txn.context
+ for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
+ action := curr.Value.(*update)
+ updated := action.Apply(txn.db.data)
+ txn.db.data = updated.(map[string]interface{})
+
+ result.Data = append(result.Data, storage.DataEvent{
+ Path: action.path,
+ Data: action.value,
+ Removed: action.remove,
+ })
+ }
+ for id, update := range txn.policies {
+ if update.remove {
+ delete(txn.db.policies, id)
+ } else {
+ txn.db.policies[id] = update.value
+ }
+
+ result.Policy = append(result.Policy, storage.PolicyEvent{
+ ID: id,
+ Data: update.value,
+ Removed: update.remove,
+ })
+ }
+ return result
+}
+
+func (txn *transaction) Read(path storage.Path) (interface{}, error) {
+
+ if !txn.write {
+ return ptr(txn.db.data, path)
+ }
+
+ merge := []*update{}
+
+ for curr := txn.updates.Front(); curr != nil; curr = curr.Next() {
+
+ update := curr.Value.(*update)
+
+ if path.HasPrefix(update.path) {
+ if update.remove {
+ return nil, notFoundError(path)
+ }
+ return ptr(update.value, path[len(update.path):])
+ }
+
+ if update.path.HasPrefix(path) {
+ merge = append(merge, update)
+ }
+ }
+
+ data, err := ptr(txn.db.data, path)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(merge) == 0 {
+ return data, nil
+ }
+
+ cpy := deepcopy.DeepCopy(data)
+
+ for _, update := range merge {
+ cpy = update.Relative(path).Apply(cpy)
+ }
+
+ return cpy, nil
+}
+
+func (txn *transaction) ListPolicies() []string {
+ var ids []string
+ for id := range txn.db.policies {
+ if _, ok := txn.policies[id]; !ok {
+ ids = append(ids, id)
+ }
+ }
+ for id, update := range txn.policies {
+ if !update.remove {
+ ids = append(ids, id)
+ }
+ }
+ return ids
+}
+
+func (txn *transaction) GetPolicy(id string) ([]byte, error) {
+ if update, ok := txn.policies[id]; ok {
+ if !update.remove {
+ return update.value, nil
+ }
+ return nil, notFoundErrorf("policy id %q", id)
+ }
+ if exist, ok := txn.db.policies[id]; ok {
+ return exist, nil
+ }
+ return nil, notFoundErrorf("policy id %q", id)
+}
+
+func (txn *transaction) UpsertPolicy(id string, bs []byte) error {
+ if !txn.write {
+ return &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "policy write during read transaction",
+ }
+ }
+ txn.policies[id] = policyUpdate{bs, false}
+ return nil
+}
+
+func (txn *transaction) DeletePolicy(id string) error {
+ if !txn.write {
+ return &storage.Error{
+ Code: storage.InvalidTransactionErr,
+ Message: "policy write during read transaction",
+ }
+ }
+ txn.policies[id] = policyUpdate{nil, true}
+ return nil
+}
+
+// update contains state associated with an update to be applied to the
+// in-memory data store.
+type update struct {
+ path storage.Path // data path modified by update
+ remove bool // indicates whether update removes the value at path
+ value interface{} // value to add/replace at path (ignored if remove is true)
+}
+
+func newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) {
+
+ switch data := data.(type) {
+ case map[string]interface{}:
+ return newUpdateObject(data, op, path, idx, value)
+
+ case []interface{}:
+ return newUpdateArray(data, op, path, idx, value)
+
+ case nil, bool, json.Number, string:
+ return nil, notFoundError(path)
+ }
+
+ return nil, &storage.Error{
+ Code: storage.InternalErr,
+ Message: "invalid data value encountered",
+ }
+}
+
+func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) {
+
+ if idx == len(path)-1 {
+ if path[idx] == "-" {
+ if op != storage.AddOp {
+ return nil, invalidPatchError("%v: invalid patch path", path)
+ }
+ cpy := make([]interface{}, len(data)+1)
+ copy(cpy, data)
+ cpy[len(data)] = value
+ return &update{path[:len(path)-1], false, cpy}, nil
+ }
+
+ pos, err := validateArrayIndex(data, path[idx], path)
+ if err != nil {
+ return nil, err
+ }
+
+ if op == storage.AddOp {
+ cpy := make([]interface{}, len(data)+1)
+ copy(cpy[:pos], data[:pos])
+ copy(cpy[pos+1:], data[pos:])
+ cpy[pos] = value
+ return &update{path[:len(path)-1], false, cpy}, nil
+
+ } else if op == storage.RemoveOp {
+ cpy := make([]interface{}, len(data)-1)
+ copy(cpy[:pos], data[:pos])
+ copy(cpy[pos:], data[pos+1:])
+ return &update{path[:len(path)-1], false, cpy}, nil
+
+ } else {
+ cpy := make([]interface{}, len(data))
+ copy(cpy, data)
+ cpy[pos] = value
+ return &update{path[:len(path)-1], false, cpy}, nil
+ }
+ }
+
+ pos, err := validateArrayIndex(data, path[idx], path)
+ if err != nil {
+ return nil, err
+ }
+
+ return newUpdate(data[pos], op, path, idx+1, value)
+}
+
+func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) {
+
+ if idx == len(path)-1 {
+ switch op {
+ case storage.ReplaceOp, storage.RemoveOp:
+ if _, ok := data[path[idx]]; !ok {
+ return nil, notFoundError(path)
+ }
+ }
+ return &update{path, op == storage.RemoveOp, value}, nil
+ }
+
+ if data, ok := data[path[idx]]; ok {
+ return newUpdate(data, op, path, idx+1, value)
+ }
+
+ return nil, notFoundError(path)
+}
+func (u *update) Apply(data interface{}) interface{} {
+ if len(u.path) == 0 {
+ return u.value
+ }
+ parent, err := ptr(data, u.path[:len(u.path)-1])
+ if err != nil {
+ panic(err)
+ }
+ key := u.path[len(u.path)-1]
+ if u.remove {
+ obj := parent.(map[string]interface{})
+ delete(obj, key)
+ return data
+ }
+ switch parent := parent.(type) {
+ case map[string]interface{}:
+ parent[key] = u.value
+ case []interface{}:
+ idx, err := strconv.Atoi(key)
+ if err != nil {
+ panic(err)
+ }
+ parent[idx] = u.value
+ }
+ return data
+}
+
+func (u *update) Relative(path storage.Path) *update {
+ cpy := *u
+ cpy.path = cpy.path[len(path):]
+ return &cpy
+}
+
+func ptr(data interface{}, path storage.Path) (interface{}, error) {
+ node := data
+ for i := range path {
+ key := path[i]
+ switch curr := node.(type) {
+ case map[string]interface{}:
+ var ok bool
+ if node, ok = curr[key]; !ok {
+ return nil, notFoundError(path)
+ }
+ case []interface{}:
+ pos, err := validateArrayIndex(curr, key, path)
+ if err != nil {
+ return nil, err
+ }
+ node = curr[pos]
+ default:
+ return nil, notFoundError(path)
+ }
+ }
+
+ return node, nil
+}
+
+func validateArrayIndex(arr []interface{}, s string, path storage.Path) (int, error) {
+ idx, err := strconv.Atoi(s)
+ if err != nil {
+ return 0, notFoundErrorHint(path, arrayIndexTypeMsg)
+ }
+ if idx < 0 || idx >= len(arr) {
+ return 0, notFoundErrorHint(path, outOfRangeMsg)
+ }
+ return idx, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/interface.go b/vendor/github.com/open-policy-agent/opa/storage/interface.go
new file mode 100644
index 00000000..f1507161
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/interface.go
@@ -0,0 +1,194 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "context"
+)
+
+// Transaction defines the interface that identifies a consistent snapshot over
+// the policy engine's storage layer.
+type Transaction interface {
+ ID() uint64
+}
+
+// Store defines the interface for the storage layer's backend.
+type Store interface {
+ Trigger
+ Policy
+
+ // NewTransaction is called create a new transaction in the store.
+ NewTransaction(ctx context.Context, params ...TransactionParams) (Transaction, error)
+
+ // Read is called to fetch a document referred to by path.
+ Read(ctx context.Context, txn Transaction, path Path) (interface{}, error)
+
+ // Write is called to modify a document referred to by path.
+ Write(ctx context.Context, txn Transaction, op PatchOp, path Path, value interface{}) error
+
+ // Commit is called to finish the transaction. If Commit returns an error, the
+ // transaction must be automatically aborted by the Store implementation.
+ Commit(ctx context.Context, txn Transaction) error
+
+ // Abort is called to cancel the transaction.
+ Abort(ctx context.Context, txn Transaction)
+}
+
+// TransactionParams describes a new transaction.
+type TransactionParams struct {
+
+ // Write indicates if this transaction will perform any write operations.
+ Write bool
+
+ // Context contains key/value pairs passed to triggers.
+ Context *Context
+}
+
+// Context is a simple container for key/value pairs.
+type Context struct {
+ values map[interface{}]interface{}
+}
+
+// NewContext returns a new context object.
+func NewContext() *Context {
+ return &Context{
+ values: map[interface{}]interface{}{},
+ }
+}
+
+// Get returns the key value in the context.
+func (ctx *Context) Get(key interface{}) interface{} {
+ if ctx == nil {
+ return nil
+ }
+ return ctx.values[key]
+}
+
+// Put adds a key/value pair to the context.
+func (ctx *Context) Put(key, value interface{}) {
+ ctx.values[key] = value
+}
+
+// WriteParams specifies the TransactionParams for a write transaction.
+var WriteParams = TransactionParams{
+ Write: true,
+}
+
+// PatchOp is the enumeration of supposed modifications.
+type PatchOp int
+
+// Patch supports add, remove, and replace operations.
+const (
+ AddOp PatchOp = iota
+ RemoveOp = iota
+ ReplaceOp = iota
+)
+
+// WritesNotSupported provides a default implementation of the write
+// interface which may be used if the backend does not support writes.
+type WritesNotSupported struct{}
+
+func (WritesNotSupported) Write(ctx context.Context, txn Transaction, op PatchOp, path Path, value interface{}) error {
+ return writesNotSupportedError()
+}
+
+// Policy defines the interface for policy module storage.
+type Policy interface {
+ ListPolicies(context.Context, Transaction) ([]string, error)
+ GetPolicy(context.Context, Transaction, string) ([]byte, error)
+ UpsertPolicy(context.Context, Transaction, string, []byte) error
+ DeletePolicy(context.Context, Transaction, string) error
+}
+
+// PolicyNotSupported provides a default implementation of the policy interface
+// which may be used if the backend does not support policy storage.
+type PolicyNotSupported struct{}
+
+// ListPolicies always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) {
+ return nil, policyNotSupportedError()
+}
+
+// GetPolicy always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) {
+ return nil, policyNotSupportedError()
+}
+
+// UpsertPolicy always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error {
+ return policyNotSupportedError()
+}
+
+// DeletePolicy always returns a PolicyNotSupportedErr.
+func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error {
+ return policyNotSupportedError()
+}
+
+// PolicyEvent describes a change to a policy.
+type PolicyEvent struct {
+ ID string
+ Data []byte
+ Removed bool
+}
+
+// DataEvent describes a change to a base data document.
+type DataEvent struct {
+ Path Path
+ Data interface{}
+ Removed bool
+}
+
+// TriggerEvent describes the changes that caused the trigger to be invoked.
+type TriggerEvent struct {
+ Policy []PolicyEvent
+ Data []DataEvent
+ Context *Context
+}
+
+// IsZero returns true if the TriggerEvent indicates no changes occurred. This
+// function is primarily for test purposes.
+func (e TriggerEvent) IsZero() bool {
+ return !e.PolicyChanged() && !e.DataChanged()
+}
+
+// PolicyChanged returns true if the trigger was caused by a policy change.
+func (e TriggerEvent) PolicyChanged() bool {
+ return len(e.Policy) > 0
+}
+
+// DataChanged returns true if the trigger was caused by a data change.
+func (e TriggerEvent) DataChanged() bool {
+ return len(e.Data) > 0
+}
+
+// TriggerConfig contains the trigger registration configuration.
+type TriggerConfig struct {
+
+ // OnCommit is invoked when a transaction is successfully committed. The
+ // callback is invoked with a handle to the write transaction that
+ // successfully committed before other clients see the changes.
+ OnCommit func(ctx context.Context, txn Transaction, event TriggerEvent)
+}
+
+// Trigger defines the interface that stores implement to register for change
+// notifications when the store is changed.
+type Trigger interface {
+ Register(ctx context.Context, txn Transaction, config TriggerConfig) (TriggerHandle, error)
+}
+
+// TriggersNotSupported provides default implementations of the Trigger
+// interface which may be used if the backend does not support triggers.
+type TriggersNotSupported struct{}
+
+// Register always returns an error indicating triggers are not supported.
+func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) {
+ return nil, triggersNotSupportedError()
+}
+
+// TriggerHandle defines the interface that can be used to unregister triggers that have
+// been registered on a Store.
+type TriggerHandle interface {
+ Unregister(ctx context.Context, txn Transaction)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/path.go b/vendor/github.com/open-policy-agent/opa/storage/path.go
new file mode 100644
index 00000000..c1b514cc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/path.go
@@ -0,0 +1,154 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// Path refers to a document in storage.
+type Path []string
+
+// ParsePath returns a new path for the given str.
+func ParsePath(str string) (path Path, ok bool) {
+ if len(str) == 0 {
+ return nil, false
+ }
+ if str[0] != '/' {
+ return nil, false
+ }
+ if len(str) == 1 {
+ return Path{}, true
+ }
+ parts := strings.Split(str[1:], "/")
+ return parts, true
+}
+
+// ParsePathEscaped returns a new path for the given escaped str.
+func ParsePathEscaped(str string) (path Path, ok bool) {
+ path, ok = ParsePath(str)
+ if !ok {
+ return
+ }
+ for i := range path {
+ segment, err := url.PathUnescape(path[i])
+ if err == nil {
+ path[i] = segment
+ }
+ }
+ return
+}
+
+// NewPathForRef returns a new path for the given ref.
+func NewPathForRef(ref ast.Ref) (path Path, err error) {
+
+ if len(ref) == 0 {
+ return nil, fmt.Errorf("empty reference (indicates error in caller)")
+ }
+
+ if len(ref) == 1 {
+ return Path{}, nil
+ }
+
+ path = make(Path, 0, len(ref)-1)
+
+ for _, term := range ref[1:] {
+ switch v := term.Value.(type) {
+ case ast.String:
+ path = append(path, string(v))
+ case ast.Number:
+ path = append(path, v.String())
+ case ast.Boolean, ast.Null:
+ return nil, &Error{
+ Code: NotFoundErr,
+ Message: fmt.Sprintf("%v: does not exist", ref),
+ }
+ case *ast.Array, ast.Object, ast.Set:
+ return nil, fmt.Errorf("composites cannot be base document keys: %v", ref)
+ default:
+ return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref)
+ }
+ }
+
+ return path, nil
+}
+
+// Compare performs lexigraphical comparison on p and other and returns -1 if p
+// is less than other, 0 if p is equal to other, or 1 if p is greater than
+// other.
+func (p Path) Compare(other Path) (cmp int) {
+ min := len(p)
+ if len(other) < min {
+ min = len(other)
+ }
+ for i := 0; i < min; i++ {
+ if cmp := strings.Compare(p[i], other[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(p) < len(other) {
+ return -1
+ }
+ if len(p) == len(other) {
+ return 0
+ }
+ return 1
+}
+
+// Equal returns true if p is the same as other.
+func (p Path) Equal(other Path) bool {
+ return p.Compare(other) == 0
+}
+
+// HasPrefix returns true if p starts with other.
+func (p Path) HasPrefix(other Path) bool {
+ if len(other) > len(p) {
+ return false
+ }
+ for i := range other {
+ if p[i] != other[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Ref returns a ref that represents p rooted at head.
+func (p Path) Ref(head *ast.Term) (ref ast.Ref) {
+ ref = make(ast.Ref, len(p)+1)
+ ref[0] = head
+ for i := range p {
+ idx, err := strconv.ParseInt(p[i], 10, 64)
+ if err == nil {
+ ref[i+1] = ast.IntNumberTerm(int(idx))
+ } else {
+ ref[i+1] = ast.StringTerm(p[i])
+ }
+ }
+ return ref
+}
+
+func (p Path) String() string {
+ buf := make([]string, len(p))
+ for i := range buf {
+ buf[i] = url.PathEscape(p[i])
+ }
+ return "/" + strings.Join(buf, "/")
+}
+
+// MustParsePath returns a new Path for s. If s cannot be parsed, this function
+// will panic. This is mostly for test purposes.
+func MustParsePath(s string) Path {
+ path, ok := ParsePath(s)
+ if !ok {
+ panic(s)
+ }
+ return path
+}
diff --git a/vendor/github.com/open-policy-agent/opa/storage/storage.go b/vendor/github.com/open-policy-agent/opa/storage/storage.go
new file mode 100644
index 00000000..323a0dba
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/storage/storage.go
@@ -0,0 +1,126 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package storage
+
+import (
+ "context"
+)
+
+// NewTransactionOrDie is a helper function to create a new transaction. If the
+// storage layer cannot create a new transaction, this function will panic. This
+// function should only be used for tests.
+func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction {
+ txn, err := store.NewTransaction(ctx, params...)
+ if err != nil {
+ panic(err)
+ }
+ return txn
+}
+
+// ReadOne is a convenience function to read a single value from the provided Store. It
+// will create a new Transaction to perform the read with, and clean up after itself
+// should an error occur.
+func ReadOne(ctx context.Context, store Store, path Path) (interface{}, error) {
+ txn, err := store.NewTransaction(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer store.Abort(ctx, txn)
+
+ return store.Read(ctx, txn, path)
+}
+
+// WriteOne is a convenience function to write a single value to the provided Store. It
+// will create a new Transaction to perform the write with, and clean up after itself
+// should an error occur.
+func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value interface{}) error {
+ txn, err := store.NewTransaction(ctx, WriteParams)
+ if err != nil {
+ return err
+ }
+
+ if err := store.Write(ctx, txn, op, path, value); err != nil {
+ store.Abort(ctx, txn)
+ return err
+ }
+
+ return store.Commit(ctx, txn)
+}
+
+// MakeDir inserts an empty object at path. If the parent path does not exist,
+// MakeDir will create it recursively.
+func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) (err error) {
+
+ if len(path) == 0 {
+ return nil
+ }
+
+ node, err := store.Read(ctx, txn, path)
+
+ if err != nil {
+ if !IsNotFound(err) {
+ return err
+ }
+
+ if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil {
+ return err
+ } else if err := store.Write(ctx, txn, AddOp, path, map[string]interface{}{}); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ if _, ok := node.(map[string]interface{}); ok {
+ return nil
+ }
+
+ return writeConflictError(path)
+
+}
+
+// Txn is a convenience function that executes f inside a new transaction
+// opened on the store. If the function returns an error, the transaction is
+// aborted and the error is returned. Otherwise, the transaction is committed
+// and the result of the commit is returned.
+func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error {
+
+ txn, err := store.NewTransaction(ctx, params)
+ if err != nil {
+ return err
+ }
+
+ if err := f(txn); err != nil {
+ store.Abort(ctx, txn)
+ return err
+ }
+
+ return store.Commit(ctx, txn)
+}
+
+// NonEmpty returns a function that tests if a path is non-empty. A
+// path is non-empty if a Read on the path returns a value or a Read
+// on any of the path prefixes returns a non-object value.
+func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) {
+ return func(path []string) (bool, error) {
+ if _, err := store.Read(ctx, txn, Path(path)); err == nil {
+ return true, nil
+ } else if !IsNotFound(err) {
+ return false, err
+ }
+ for i := len(path) - 1; i > 0; i-- {
+ val, err := store.Read(ctx, txn, Path(path[:i]))
+ if err != nil && !IsNotFound(err) {
+ return false, err
+ } else if err == nil {
+ if _, ok := val.(map[string]interface{}); ok {
+ return false, nil
+ }
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go b/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go
new file mode 100644
index 00000000..706d8afc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "math/big"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinCount(a ast.Value) (ast.Value, error) {
+ switch a := a.(type) {
+ case *ast.Array:
+ return ast.IntNumberTerm(a.Len()).Value, nil
+ case ast.Object:
+ return ast.IntNumberTerm(a.Len()).Value, nil
+ case ast.Set:
+ return ast.IntNumberTerm(a.Len()).Value, nil
+ case ast.String:
+ return ast.IntNumberTerm(len([]rune(a))).Value, nil
+ }
+ return nil, builtins.NewOperandTypeErr(1, a, "array", "object", "set")
+}
+
+func builtinSum(a ast.Value) (ast.Value, error) {
+ switch a := a.(type) {
+ case *ast.Array:
+ sum := big.NewFloat(0)
+ err := a.Iter(func(x *ast.Term) error {
+ n, ok := x.Value.(ast.Number)
+ if !ok {
+ return builtins.NewOperandElementErr(1, a, x.Value, "number")
+ }
+ sum = new(big.Float).Add(sum, builtins.NumberToFloat(n))
+ return nil
+ })
+ return builtins.FloatToNumber(sum), err
+ case ast.Set:
+ sum := big.NewFloat(0)
+ err := a.Iter(func(x *ast.Term) error {
+ n, ok := x.Value.(ast.Number)
+ if !ok {
+ return builtins.NewOperandElementErr(1, a, x.Value, "number")
+ }
+ sum = new(big.Float).Add(sum, builtins.NumberToFloat(n))
+ return nil
+ })
+ return builtins.FloatToNumber(sum), err
+ }
+ return nil, builtins.NewOperandTypeErr(1, a, "set", "array")
+}
+
+func builtinProduct(a ast.Value) (ast.Value, error) {
+ switch a := a.(type) {
+ case *ast.Array:
+ product := big.NewFloat(1)
+ err := a.Iter(func(x *ast.Term) error {
+ n, ok := x.Value.(ast.Number)
+ if !ok {
+ return builtins.NewOperandElementErr(1, a, x.Value, "number")
+ }
+ product = new(big.Float).Mul(product, builtins.NumberToFloat(n))
+ return nil
+ })
+ return builtins.FloatToNumber(product), err
+ case ast.Set:
+ product := big.NewFloat(1)
+ err := a.Iter(func(x *ast.Term) error {
+ n, ok := x.Value.(ast.Number)
+ if !ok {
+ return builtins.NewOperandElementErr(1, a, x.Value, "number")
+ }
+ product = new(big.Float).Mul(product, builtins.NumberToFloat(n))
+ return nil
+ })
+ return builtins.FloatToNumber(product), err
+ }
+ return nil, builtins.NewOperandTypeErr(1, a, "set", "array")
+}
+
+func builtinMax(a ast.Value) (ast.Value, error) {
+ switch a := a.(type) {
+ case *ast.Array:
+ if a.Len() == 0 {
+ return nil, BuiltinEmpty{}
+ }
+ var max = ast.Value(ast.Null{})
+ a.Foreach(func(x *ast.Term) {
+ if ast.Compare(max, x.Value) <= 0 {
+ max = x.Value
+ }
+ })
+ return max, nil
+ case ast.Set:
+ if a.Len() == 0 {
+ return nil, BuiltinEmpty{}
+ }
+ max, err := a.Reduce(ast.NullTerm(), func(max *ast.Term, elem *ast.Term) (*ast.Term, error) {
+ if ast.Compare(max, elem) <= 0 {
+ return elem, nil
+ }
+ return max, nil
+ })
+ return max.Value, err
+ }
+
+ return nil, builtins.NewOperandTypeErr(1, a, "set", "array")
+}
+
+func builtinMin(a ast.Value) (ast.Value, error) {
+ switch a := a.(type) {
+ case *ast.Array:
+ if a.Len() == 0 {
+ return nil, BuiltinEmpty{}
+ }
+ min := a.Elem(0).Value
+ a.Foreach(func(x *ast.Term) {
+ if ast.Compare(min, x.Value) >= 0 {
+ min = x.Value
+ }
+ })
+ return min, nil
+ case ast.Set:
+ if a.Len() == 0 {
+ return nil, BuiltinEmpty{}
+ }
+ min, err := a.Reduce(ast.NullTerm(), func(min *ast.Term, elem *ast.Term) (*ast.Term, error) {
+ // The null term is considered to be less than any other term,
+ // so in order for min of a set to make sense, we need to check
+ // for it.
+ if min.Value.Compare(ast.Null{}) == 0 {
+ return elem, nil
+ }
+
+ if ast.Compare(min, elem) >= 0 {
+ return elem, nil
+ }
+ return min, nil
+ })
+ return min.Value, err
+ }
+
+ return nil, builtins.NewOperandTypeErr(1, a, "set", "array")
+}
+
+func builtinSort(a ast.Value) (ast.Value, error) {
+ switch a := a.(type) {
+ case *ast.Array:
+ return a.Sorted(), nil
+ case ast.Set:
+ return a.Sorted(), nil
+ }
+ return nil, builtins.NewOperandTypeErr(1, a, "set", "array")
+}
+
+func builtinAll(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case ast.Set:
+ res := true
+ match := ast.BooleanTerm(true)
+ val.Until(func(term *ast.Term) bool {
+ if !match.Equal(term) {
+ res = false
+ return true
+ }
+ return false
+ })
+ return ast.Boolean(res), nil
+ case *ast.Array:
+ res := true
+ match := ast.BooleanTerm(true)
+ val.Until(func(term *ast.Term) bool {
+ if !match.Equal(term) {
+ res = false
+ return true
+ }
+ return false
+ })
+ return ast.Boolean(res), nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "array", "set")
+ }
+}
+
+func builtinAny(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case ast.Set:
+ res := val.Len() > 0 && val.Contains(ast.BooleanTerm(true))
+ return ast.Boolean(res), nil
+ case *ast.Array:
+ res := false
+ match := ast.BooleanTerm(true)
+ val.Until(func(term *ast.Term) bool {
+ if match.Equal(term) {
+ res = true
+ return true
+ }
+ return false
+ })
+ return ast.Boolean(res), nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "array", "set")
+ }
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.Count.Name, builtinCount)
+ RegisterFunctionalBuiltin1(ast.Sum.Name, builtinSum)
+ RegisterFunctionalBuiltin1(ast.Product.Name, builtinProduct)
+ RegisterFunctionalBuiltin1(ast.Max.Name, builtinMax)
+ RegisterFunctionalBuiltin1(ast.Min.Name, builtinMin)
+ RegisterFunctionalBuiltin1(ast.Sort.Name, builtinSort)
+ RegisterFunctionalBuiltin1(ast.Any.Name, builtinAny)
+ RegisterFunctionalBuiltin1(ast.All.Name, builtinAll)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go b/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go
new file mode 100644
index 00000000..b1acdce6
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go
@@ -0,0 +1,180 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "math/big"
+
+ "fmt"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+type arithArity1 func(a *big.Float) (*big.Float, error)
+type arithArity2 func(a, b *big.Float) (*big.Float, error)
+
+func arithAbs(a *big.Float) (*big.Float, error) {
+ return a.Abs(a), nil
+}
+
+var halfAwayFromZero = big.NewFloat(0.5)
+
+func arithRound(a *big.Float) (*big.Float, error) {
+ var i *big.Int
+ if a.Signbit() {
+ i, _ = new(big.Float).Sub(a, halfAwayFromZero).Int(nil)
+ } else {
+ i, _ = new(big.Float).Add(a, halfAwayFromZero).Int(nil)
+ }
+ return new(big.Float).SetInt(i), nil
+}
+
+func arithCeil(a *big.Float) (*big.Float, error) {
+ i, _ := a.Int(nil)
+ f := new(big.Float).SetInt(i)
+
+ if f.Signbit() || a.Cmp(f) == 0 {
+ return f, nil
+ }
+
+ return new(big.Float).Add(f, big.NewFloat(1.0)), nil
+}
+
+func arithFloor(a *big.Float) (*big.Float, error) {
+ i, _ := a.Int(nil)
+ f := new(big.Float).SetInt(i)
+
+ if !f.Signbit() || a.Cmp(f) == 0 {
+ return f, nil
+ }
+
+ return new(big.Float).Sub(f, big.NewFloat(1.0)), nil
+}
+
+func arithPlus(a, b *big.Float) (*big.Float, error) {
+ return new(big.Float).Add(a, b), nil
+}
+
+func arithMinus(a, b *big.Float) (*big.Float, error) {
+ return new(big.Float).Sub(a, b), nil
+}
+
+func arithMultiply(a, b *big.Float) (*big.Float, error) {
+ return new(big.Float).Mul(a, b), nil
+}
+
+func arithDivide(a, b *big.Float) (*big.Float, error) {
+ i, acc := b.Int64()
+ if acc == big.Exact && i == 0 {
+ return nil, fmt.Errorf("divide by zero")
+ }
+ return new(big.Float).Quo(a, b), nil
+}
+
+func arithRem(a, b *big.Int) (*big.Int, error) {
+ if b.Int64() == 0 {
+ return nil, fmt.Errorf("modulo by zero")
+ }
+ return new(big.Int).Rem(a, b), nil
+}
+
+func builtinArithArity1(fn arithArity1) FunctionalBuiltin1 {
+ return func(a ast.Value) (ast.Value, error) {
+ n, err := builtins.NumberOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ f, err := fn(builtins.NumberToFloat(n))
+ if err != nil {
+ return nil, err
+ }
+ return builtins.FloatToNumber(f), nil
+ }
+}
+
+func builtinArithArity2(fn arithArity2) FunctionalBuiltin2 {
+ return func(a, b ast.Value) (ast.Value, error) {
+ n1, err := builtins.NumberOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ n2, err := builtins.NumberOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ f, err := fn(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
+ if err != nil {
+ return nil, err
+ }
+ return builtins.FloatToNumber(f), nil
+ }
+}
+
+func builtinMinus(a, b ast.Value) (ast.Value, error) {
+
+ n1, ok1 := a.(ast.Number)
+ n2, ok2 := b.(ast.Number)
+
+ if ok1 && ok2 {
+ f, err := arithMinus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2))
+ if err != nil {
+ return nil, err
+ }
+ return builtins.FloatToNumber(f), nil
+ }
+
+ s1, ok3 := a.(ast.Set)
+ s2, ok4 := b.(ast.Set)
+
+ if ok3 && ok4 {
+ return s1.Diff(s2), nil
+ }
+
+ if !ok1 && !ok3 {
+ return nil, builtins.NewOperandTypeErr(1, a, "number", "set")
+ }
+
+ return nil, builtins.NewOperandTypeErr(2, b, "number", "set")
+}
+
+func builtinRem(a, b ast.Value) (ast.Value, error) {
+ n1, ok1 := a.(ast.Number)
+ n2, ok2 := b.(ast.Number)
+
+ if ok1 && ok2 {
+
+ op1, err1 := builtins.NumberToInt(n1)
+ op2, err2 := builtins.NumberToInt(n2)
+
+ if err1 != nil || err2 != nil {
+ return nil, fmt.Errorf("modulo on floating-point number")
+ }
+
+ i, err := arithRem(op1, op2)
+ if err != nil {
+ return nil, err
+ }
+ return builtins.IntToNumber(i), nil
+ }
+
+ if !ok1 {
+ return nil, builtins.NewOperandTypeErr(1, a, "number")
+ }
+
+ return nil, builtins.NewOperandTypeErr(2, b, "number")
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.Abs.Name, builtinArithArity1(arithAbs))
+ RegisterFunctionalBuiltin1(ast.Round.Name, builtinArithArity1(arithRound))
+ RegisterFunctionalBuiltin1(ast.Ceil.Name, builtinArithArity1(arithCeil))
+ RegisterFunctionalBuiltin1(ast.Floor.Name, builtinArithArity1(arithFloor))
+ RegisterFunctionalBuiltin2(ast.Plus.Name, builtinArithArity2(arithPlus))
+ RegisterFunctionalBuiltin2(ast.Minus.Name, builtinMinus)
+ RegisterFunctionalBuiltin2(ast.Multiply.Name, builtinArithArity2(arithMultiply))
+ RegisterFunctionalBuiltin2(ast.Divide.Name, builtinArithArity2(arithDivide))
+ RegisterFunctionalBuiltin2(ast.Rem.Name, builtinRem)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/array.go b/vendor/github.com/open-policy-agent/opa/topdown/array.go
new file mode 100644
index 00000000..ca3e0d9f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/array.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinArrayConcat(a, b ast.Value) (ast.Value, error) {
+ arrA, err := builtins.ArrayOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ arrB, err := builtins.ArrayOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ arrC := make([]*ast.Term, arrA.Len()+arrB.Len())
+
+ i := 0
+ arrA.Foreach(func(elemA *ast.Term) {
+ arrC[i] = elemA
+ i++
+ })
+
+ arrB.Foreach(func(elemB *ast.Term) {
+ arrC[i] = elemB
+ i++
+ })
+
+ return ast.NewArray(arrC...), nil
+}
+
+func builtinArraySlice(a, i, j ast.Value) (ast.Value, error) {
+ arr, err := builtins.ArrayOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ startIndex, err := builtins.IntOperand(i, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ stopIndex, err := builtins.IntOperand(j, 3)
+ if err != nil {
+ return nil, err
+ }
+
+ // Clamp stopIndex to avoid out-of-range errors. If negative, clamp to zero.
+ // Otherwise, clamp to length of array.
+ if stopIndex < 0 {
+ stopIndex = 0
+ } else if stopIndex > arr.Len() {
+ stopIndex = arr.Len()
+ }
+
+ // Clamp startIndex to avoid out-of-range errors. If negative, clamp to zero.
+ // Otherwise, clamp to stopIndex to avoid to avoid cases like arr[1:0].
+ if startIndex < 0 {
+ startIndex = 0
+ } else if startIndex > stopIndex {
+ startIndex = stopIndex
+ }
+
+ return arr.Slice(startIndex, stopIndex), nil
+}
+
+func init() {
+ RegisterFunctionalBuiltin2(ast.ArrayConcat.Name, builtinArrayConcat)
+ RegisterFunctionalBuiltin3(ast.ArraySlice.Name, builtinArraySlice)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/binary.go b/vendor/github.com/open-policy-agent/opa/topdown/binary.go
new file mode 100644
index 00000000..3cab5def
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/binary.go
@@ -0,0 +1,45 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinBinaryAnd(a ast.Value, b ast.Value) (ast.Value, error) {
+
+ s1, err := builtins.SetOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ s2, err := builtins.SetOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return s1.Intersect(s2), nil
+}
+
+func builtinBinaryOr(a ast.Value, b ast.Value) (ast.Value, error) {
+
+ s1, err := builtins.SetOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ s2, err := builtins.SetOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return s1.Union(s2), nil
+}
+
+func init() {
+ RegisterFunctionalBuiltin2(ast.And.Name, builtinBinaryAnd)
+ RegisterFunctionalBuiltin2(ast.Or.Name, builtinBinaryOr)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bindings.go b/vendor/github.com/open-policy-agent/opa/topdown/bindings.go
new file mode 100644
index 00000000..e6c4f3c5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/bindings.go
@@ -0,0 +1,386 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+type undo struct {
+ k *ast.Term
+ u *bindings
+}
+
+func (u *undo) Undo() {
+ if u == nil {
+ // Allow call on zero value of Undo for ease-of-use.
+ return
+ }
+ if u.u == nil {
+ // Call on empty unifier undos a no-op unify operation.
+ return
+ }
+ u.u.delete(u.k)
+}
+
+type bindings struct {
+ id uint64
+ values bindingsArrayHashmap
+ instr *Instrumentation
+}
+
+func newBindings(id uint64, instr *Instrumentation) *bindings {
+ values := newBindingsArrayHashmap()
+ return &bindings{id, values, instr}
+}
+
+func (u *bindings) Iter(caller *bindings, iter func(*ast.Term, *ast.Term) error) error {
+
+ var err error
+
+ u.values.Iter(func(k *ast.Term, v value) bool {
+ if err != nil {
+ return true
+ }
+ err = iter(k, u.PlugNamespaced(k, caller))
+
+ return false
+ })
+
+ return err
+}
+
+func (u *bindings) Namespace(x ast.Node, caller *bindings) {
+ vis := namespacingVisitor{
+ b: u,
+ caller: caller,
+ }
+ ast.NewGenericVisitor(vis.Visit).Walk(x)
+}
+
+func (u *bindings) Plug(a *ast.Term) *ast.Term {
+ return u.PlugNamespaced(a, nil)
+}
+
+func (u *bindings) PlugNamespaced(a *ast.Term, caller *bindings) *ast.Term {
+ if u != nil {
+ u.instr.startTimer(evalOpPlug)
+ t := u.plugNamespaced(a, caller)
+ u.instr.stopTimer(evalOpPlug)
+ return t
+ }
+
+ return u.plugNamespaced(a, caller)
+}
+
+func (u *bindings) plugNamespaced(a *ast.Term, caller *bindings) *ast.Term {
+ switch v := a.Value.(type) {
+ case ast.Var:
+ b, next := u.apply(a)
+ if a != b || u != next {
+ return next.plugNamespaced(b, caller)
+ }
+ return u.namespaceVar(b, caller)
+ case *ast.Array:
+ cpy := *a
+ arr := make([]*ast.Term, v.Len())
+ for i := 0; i < len(arr); i++ {
+ arr[i] = u.plugNamespaced(v.Elem(i), caller)
+ }
+ cpy.Value = ast.NewArray(arr...)
+ return &cpy
+ case ast.Object:
+ if a.IsGround() {
+ return a
+ }
+ cpy := *a
+ cpy.Value, _ = v.Map(func(k, v *ast.Term) (*ast.Term, *ast.Term, error) {
+ return u.plugNamespaced(k, caller), u.plugNamespaced(v, caller), nil
+ })
+ return &cpy
+ case ast.Set:
+ cpy := *a
+ cpy.Value, _ = v.Map(func(x *ast.Term) (*ast.Term, error) {
+ return u.plugNamespaced(x, caller), nil
+ })
+ return &cpy
+ case ast.Ref:
+ cpy := *a
+ ref := make(ast.Ref, len(v))
+ for i := 0; i < len(ref); i++ {
+ ref[i] = u.plugNamespaced(v[i], caller)
+ }
+ cpy.Value = ref
+ return &cpy
+ }
+ return a
+}
+
+func (u *bindings) bind(a *ast.Term, b *ast.Term, other *bindings, und *undo) {
+ u.values.Put(a, value{
+ u: other,
+ v: b,
+ })
+ und.k = a
+ und.u = u
+}
+
+func (u *bindings) apply(a *ast.Term) (*ast.Term, *bindings) {
+ // Early exit for non-var terms. Only vars are bound in the binding list,
+ // so the lookup below will always fail for non-var terms. In some cases,
+ // the lookup may be expensive as it has to hash the term (which for large
+ // inputs can be costly.)
+ _, ok := a.Value.(ast.Var)
+ if !ok {
+ return a, u
+ }
+ val, ok := u.get(a)
+ if !ok {
+ return a, u
+ }
+ return val.u.apply(val.v)
+}
+
+func (u *bindings) delete(v *ast.Term) {
+ u.values.Delete(v)
+}
+
+func (u *bindings) get(v *ast.Term) (value, bool) {
+ if u == nil {
+ return value{}, false
+ }
+ return u.values.Get(v)
+}
+
+func (u *bindings) String() string {
+ if u == nil {
+ return "()"
+ }
+ var buf []string
+ u.values.Iter(func(a *ast.Term, b value) bool {
+ buf = append(buf, fmt.Sprintf("%v: %v", a, b))
+ return false
+ })
+ return fmt.Sprintf("({%v}, %v)", strings.Join(buf, ", "), u.id)
+}
+
+func (u *bindings) namespaceVar(v *ast.Term, caller *bindings) *ast.Term {
+ name, ok := v.Value.(ast.Var)
+ if !ok {
+ panic("illegal value")
+ }
+ if caller != nil && caller != u {
+ // Root documents (i.e., data, input) should never be namespaced because they
+ // are globally unique.
+ if !ast.RootDocumentNames.Contains(v) {
+ return ast.NewTerm(ast.Var(string(name) + fmt.Sprint(u.id)))
+ }
+ }
+ return v
+}
+
+type value struct {
+ u *bindings
+ v *ast.Term
+}
+
+func (v value) String() string {
+ return fmt.Sprintf("(%v, %d)", v.v, v.u.id)
+}
+
+func (v value) equal(other *value) bool {
+ if v.u == other.u {
+ return v.v.Equal(other.v)
+ }
+ return false
+}
+
+type namespacingVisitor struct {
+ b *bindings
+ caller *bindings
+}
+
+func (vis namespacingVisitor) Visit(x interface{}) bool {
+ switch x := x.(type) {
+ case *ast.ArrayComprehension:
+ x.Term = vis.namespaceTerm(x.Term)
+ ast.NewGenericVisitor(vis.Visit).Walk(x.Body)
+ return true
+ case *ast.SetComprehension:
+ x.Term = vis.namespaceTerm(x.Term)
+ ast.NewGenericVisitor(vis.Visit).Walk(x.Body)
+ return true
+ case *ast.ObjectComprehension:
+ x.Key = vis.namespaceTerm(x.Key)
+ x.Value = vis.namespaceTerm(x.Value)
+ ast.NewGenericVisitor(vis.Visit).Walk(x.Body)
+ return true
+ case *ast.Expr:
+ switch terms := x.Terms.(type) {
+ case []*ast.Term:
+ for i := 1; i < len(terms); i++ {
+ terms[i] = vis.namespaceTerm(terms[i])
+ }
+ case *ast.Term:
+ x.Terms = vis.namespaceTerm(terms)
+ }
+ for _, w := range x.With {
+ w.Target = vis.namespaceTerm(w.Target)
+ w.Value = vis.namespaceTerm(w.Value)
+ }
+ }
+ return false
+}
+
+func (vis namespacingVisitor) namespaceTerm(a *ast.Term) *ast.Term {
+ switch v := a.Value.(type) {
+ case ast.Var:
+ return vis.b.namespaceVar(a, vis.caller)
+ case *ast.Array:
+ cpy := *a
+ arr := make([]*ast.Term, v.Len())
+ for i := 0; i < len(arr); i++ {
+ arr[i] = vis.namespaceTerm(v.Elem(i))
+ }
+ cpy.Value = ast.NewArray(arr...)
+ return &cpy
+ case ast.Object:
+ if a.IsGround() {
+ return a
+ }
+ cpy := *a
+ cpy.Value, _ = v.Map(func(k, v *ast.Term) (*ast.Term, *ast.Term, error) {
+ return vis.namespaceTerm(k), vis.namespaceTerm(v), nil
+ })
+ return &cpy
+ case ast.Set:
+ cpy := *a
+ cpy.Value, _ = v.Map(func(x *ast.Term) (*ast.Term, error) {
+ return vis.namespaceTerm(x), nil
+ })
+ return &cpy
+ case ast.Ref:
+ cpy := *a
+ ref := make(ast.Ref, len(v))
+ for i := 0; i < len(ref); i++ {
+ ref[i] = vis.namespaceTerm(v[i])
+ }
+ cpy.Value = ref
+ return &cpy
+ }
+ return a
+}
+
+const maxLinearScan = 16
+
+// bindingsArrayHashMap uses an array with linear scan instead of a hash map for smaller # of entries. Hash maps start to show off their performance advantage only after 16 keys.
+type bindingsArrayHashmap struct {
+ n int // Entries in the array.
+ a *[maxLinearScan]bindingArrayKeyValue
+ m map[ast.Var]bindingArrayKeyValue
+}
+
+type bindingArrayKeyValue struct {
+ key *ast.Term
+ value value
+}
+
+func newBindingsArrayHashmap() bindingsArrayHashmap {
+ return bindingsArrayHashmap{}
+}
+
+func (b *bindingsArrayHashmap) Put(key *ast.Term, value value) {
+ if b.m == nil {
+ if b.a == nil {
+ b.a = new([maxLinearScan]bindingArrayKeyValue)
+ } else if i := b.find(key); i >= 0 {
+ (*b.a)[i].value = value
+ return
+ }
+
+ if b.n < maxLinearScan {
+ (*b.a)[b.n] = bindingArrayKeyValue{key, value}
+ b.n++
+ return
+ }
+
+ // Array is full, revert to using the hash map instead.
+
+ b.m = make(map[ast.Var]bindingArrayKeyValue, maxLinearScan+1)
+ for _, kv := range *b.a {
+ b.m[kv.key.Value.(ast.Var)] = bindingArrayKeyValue{kv.key, kv.value}
+ }
+ b.m[key.Value.(ast.Var)] = bindingArrayKeyValue{key, value}
+
+ b.n = 0
+ return
+ }
+
+ b.m[key.Value.(ast.Var)] = bindingArrayKeyValue{key, value}
+}
+
+func (b *bindingsArrayHashmap) Get(key *ast.Term) (value, bool) {
+ if b.m == nil {
+ if i := b.find(key); i >= 0 {
+ return (*b.a)[i].value, true
+ }
+
+ return value{}, false
+ }
+
+ v, ok := b.m[key.Value.(ast.Var)]
+ if ok {
+ return v.value, true
+ }
+
+ return value{}, false
+}
+
+func (b *bindingsArrayHashmap) Delete(key *ast.Term) {
+ if b.m == nil {
+ if i := b.find(key); i >= 0 {
+ n := b.n - 1
+ if i < n {
+ (*b.a)[i] = (*b.a)[n]
+ }
+
+ b.n = n
+ }
+ return
+ }
+
+ delete(b.m, key.Value.(ast.Var))
+}
+
+func (b *bindingsArrayHashmap) Iter(f func(k *ast.Term, v value) bool) {
+ if b.m == nil {
+ for i := 0; i < b.n; i++ {
+ if f((*b.a)[i].key, (*b.a)[i].value) {
+ return
+ }
+ }
+ return
+ }
+
+ for _, v := range b.m {
+ if f(v.key, v.value) {
+ return
+ }
+ }
+}
+
+func (b *bindingsArrayHashmap) find(key *ast.Term) int {
+ v := key.Value.(ast.Var)
+ for i := 0; i < b.n; i++ {
+ if (*b.a)[i].key.Value.(ast.Var) == v {
+ return i
+ }
+ }
+
+ return -1
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/bits.go b/vendor/github.com/open-policy-agent/opa/topdown/bits.go
new file mode 100644
index 00000000..7a63c0df
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/bits.go
@@ -0,0 +1,88 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "math/big"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+type bitsArity1 func(a *big.Int) (*big.Int, error)
+type bitsArity2 func(a, b *big.Int) (*big.Int, error)
+
+func bitsOr(a, b *big.Int) (*big.Int, error) {
+ return new(big.Int).Or(a, b), nil
+}
+
+func bitsAnd(a, b *big.Int) (*big.Int, error) {
+ return new(big.Int).And(a, b), nil
+}
+
+func bitsNegate(a *big.Int) (*big.Int, error) {
+ return new(big.Int).Not(a), nil
+}
+
+func bitsXOr(a, b *big.Int) (*big.Int, error) {
+ return new(big.Int).Xor(a, b), nil
+}
+
+func bitsShiftLeft(a, b *big.Int) (*big.Int, error) {
+ if b.Sign() == -1 {
+ return nil, builtins.NewOperandErr(2, "must be an unsigned integer number but got a negative integer")
+ }
+ shift := uint(b.Uint64())
+ return new(big.Int).Lsh(a, shift), nil
+}
+
+func bitsShiftRight(a, b *big.Int) (*big.Int, error) {
+ if b.Sign() == -1 {
+ return nil, builtins.NewOperandErr(2, "must be an unsigned integer number but got a negative integer")
+ }
+ shift := uint(b.Uint64())
+ return new(big.Int).Rsh(a, shift), nil
+}
+
+func builtinBitsArity1(fn bitsArity1) BuiltinFunc {
+ return func(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ i, err := builtins.BigIntOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+ iOut, err := fn(i)
+ if err != nil {
+ return err
+ }
+ return iter(ast.NewTerm(builtins.IntToNumber(iOut)))
+ }
+}
+
+func builtinBitsArity2(fn bitsArity2) BuiltinFunc {
+ return func(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ i1, err := builtins.BigIntOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+ i2, err := builtins.BigIntOperand(operands[1].Value, 2)
+ if err != nil {
+ return err
+ }
+ iOut, err := fn(i1, i2)
+ if err != nil {
+ return err
+ }
+ return iter(ast.NewTerm(builtins.IntToNumber(iOut)))
+ }
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.BitsOr.Name, builtinBitsArity2(bitsOr))
+ RegisterBuiltinFunc(ast.BitsAnd.Name, builtinBitsArity2(bitsAnd))
+ RegisterBuiltinFunc(ast.BitsNegate.Name, builtinBitsArity1(bitsNegate))
+ RegisterBuiltinFunc(ast.BitsXOr.Name, builtinBitsArity2(bitsXOr))
+ RegisterBuiltinFunc(ast.BitsShiftLeft.Name, builtinBitsArity2(bitsShiftLeft))
+ RegisterBuiltinFunc(ast.BitsShiftRight.Name, builtinBitsArity2(bitsShiftRight))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins.go b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go
new file mode 100644
index 00000000..e2578a56
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/builtins.go
@@ -0,0 +1,170 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/topdown/cache"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+type (
+ // FunctionalBuiltin1 is deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin1 func(op1 ast.Value) (output ast.Value, err error)
+
+ // FunctionalBuiltin2 is deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin2 func(op1, op2 ast.Value) (output ast.Value, err error)
+
+ // FunctionalBuiltin3 is deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin3 func(op1, op2, op3 ast.Value) (output ast.Value, err error)
+
+ // FunctionalBuiltin4 is deprecated. Use BuiltinFunc instead.
+ FunctionalBuiltin4 func(op1, op2, op3, op4 ast.Value) (output ast.Value, err error)
+
+ // BuiltinContext contains context from the evaluator that may be used by
+ // built-in functions.
+ BuiltinContext struct {
+ Context context.Context // request context that was passed when query started
+ Metrics metrics.Metrics // metrics registry for recording built-in specific metrics
+ Seed io.Reader // randomization seed
+ Time *ast.Term // wall clock time
+ Cancel Cancel // atomic value that signals evaluation to halt
+ Runtime *ast.Term // runtime information on the OPA instance
+ Cache builtins.Cache // built-in function state cache
+ InterQueryBuiltinCache cache.InterQueryCache // cross-query built-in function state cache
+ Location *ast.Location // location of built-in call
+ Tracers []Tracer // Deprecated: Use QueryTracers instead
+ QueryTracers []QueryTracer // tracer objects for trace() built-in function
+ TraceEnabled bool // indicates whether tracing is enabled for the evaluation
+ QueryID uint64 // identifies query being evaluated
+ ParentID uint64 // identifies parent of query being evaluated
+ }
+
+ // BuiltinFunc defines an interface for implementing built-in functions.
+ // The built-in function is called with the plugged operands from the call
+ // (including the output operands.) The implementation should evaluate the
+ // operands and invoke the iterator for each successful/defined output
+ // value.
+ BuiltinFunc func(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error
+)
+
+// RegisterBuiltinFunc adds a new built-in function to the evaluation engine.
+func RegisterBuiltinFunc(name string, f BuiltinFunc) {
+ builtinFunctions[name] = builtinErrorWrapper(name, f)
+}
+
+// RegisterFunctionalBuiltin1 is deprecated use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin1(name string, fun FunctionalBuiltin1) {
+ builtinFunctions[name] = functionalWrapper1(name, fun)
+}
+
+// RegisterFunctionalBuiltin2 is deprecated use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin2(name string, fun FunctionalBuiltin2) {
+ builtinFunctions[name] = functionalWrapper2(name, fun)
+}
+
+// RegisterFunctionalBuiltin3 is deprecated use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin3(name string, fun FunctionalBuiltin3) {
+ builtinFunctions[name] = functionalWrapper3(name, fun)
+}
+
+// RegisterFunctionalBuiltin4 is deprecated use RegisterBuiltinFunc instead.
+func RegisterFunctionalBuiltin4(name string, fun FunctionalBuiltin4) {
+ builtinFunctions[name] = functionalWrapper4(name, fun)
+}
+
+// GetBuiltin returns a built-in function implementation, nil if no built-in found.
+func GetBuiltin(name string) BuiltinFunc {
+ return builtinFunctions[name]
+}
+
+// BuiltinEmpty is deprecated.
+type BuiltinEmpty struct{}
+
+func (BuiltinEmpty) Error() string {
+ return ""
+}
+
+var builtinFunctions = map[string]BuiltinFunc{}
+
+func builtinErrorWrapper(name string, fn BuiltinFunc) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ err := fn(bctx, args, iter)
+ if err == nil {
+ return nil
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper1(name string, fn FunctionalBuiltin1) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper2(name string, fn FunctionalBuiltin2) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value, args[1].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper3(name string, fn FunctionalBuiltin3) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value, args[1].Value, args[2].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func functionalWrapper4(name string, fn FunctionalBuiltin4) BuiltinFunc {
+ return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := fn(args[0].Value, args[1].Value, args[2].Value, args[3].Value)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ if _, empty := err.(BuiltinEmpty); empty {
+ return nil
+ }
+ return handleBuiltinErr(name, bctx.Location, err)
+ }
+}
+
+func handleBuiltinErr(name string, loc *ast.Location, err error) error {
+ switch err := err.(type) {
+ case BuiltinEmpty:
+ return nil
+ case *Error, Halt:
+ return err
+ case builtins.ErrOperand:
+ return &Error{
+ Code: TypeErr,
+ Message: fmt.Sprintf("%v: %v", string(name), err.Error()),
+ Location: loc,
+ }
+ default:
+ return &Error{
+ Code: BuiltinErr,
+ Message: fmt.Sprintf("%v: %v", string(name), err.Error()),
+ Location: loc,
+ }
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go b/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go
new file mode 100644
index 00000000..fa0e0a28
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go
@@ -0,0 +1,237 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package builtins contains utilities for implementing built-in functions.
+package builtins
+
+import (
+ "fmt"
+ "math/big"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// Cache defines the built-in cache used by the top-down evaluation. The keys
+// must be comparable and should not be of type string.
+type Cache map[interface{}]interface{}
+
+// Put updates the cache for the named built-in.
+func (c Cache) Put(k, v interface{}) {
+ c[k] = v
+}
+
+// Get returns the cached value for k.
+func (c Cache) Get(k interface{}) (interface{}, bool) {
+ v, ok := c[k]
+ return v, ok
+}
+
+// ErrOperand represents an invalid operand has been passed to a built-in
+// function. Built-ins should return ErrOperand to indicate a type error has
+// occurred.
+type ErrOperand string
+
+func (err ErrOperand) Error() string {
+ return string(err)
+}
+
+// NewOperandErr returns a generic operand error.
+func NewOperandErr(pos int, f string, a ...interface{}) error {
+ f = fmt.Sprintf("operand %v ", pos) + f
+ return ErrOperand(fmt.Sprintf(f, a...))
+}
+
+// NewOperandTypeErr returns an operand error indicating the operand's type was wrong.
+func NewOperandTypeErr(pos int, got ast.Value, expected ...string) error {
+
+ if len(expected) == 1 {
+ return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.TypeName(got))
+ }
+
+ return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.TypeName(got))
+}
+
+// NewOperandElementErr returns an operand error indicating an element in the
+// composite operand was wrong.
+func NewOperandElementErr(pos int, composite ast.Value, got ast.Value, expected ...string) error {
+
+ tpe := ast.TypeName(composite)
+
+ if len(expected) == 1 {
+ return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.TypeName(got))
+ }
+
+ return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.TypeName(got))
+}
+
+// NewOperandEnumErr returns an operand error indicating a value was wrong.
+func NewOperandEnumErr(pos int, expected ...string) error {
+
+ if len(expected) == 1 {
+ return NewOperandErr(pos, "must be %v", expected[0])
+ }
+
+ return NewOperandErr(pos, "must be one of {%v}", strings.Join(expected, ", "))
+}
+
+// IntOperand converts x to an int. If the cast fails, a descriptive error is
+// returned.
+func IntOperand(x ast.Value, pos int) (int, error) {
+ n, ok := x.(ast.Number)
+ if !ok {
+ return 0, NewOperandTypeErr(pos, x, "number")
+ }
+
+ i, ok := n.Int()
+ if !ok {
+ return 0, NewOperandErr(pos, "must be integer number but got floating-point number")
+ }
+
+ return i, nil
+}
+
+// BigIntOperand converts x to a big int. If the cast fails, a descriptive error
+// is returned.
+func BigIntOperand(x ast.Value, pos int) (*big.Int, error) {
+ n, err := NumberOperand(x, 1)
+ if err != nil {
+ return nil, NewOperandTypeErr(pos, x, "integer")
+ }
+ bi, err := NumberToInt(n)
+ if err != nil {
+ return nil, NewOperandErr(pos, "must be integer number but got floating-point number")
+ }
+
+ return bi, nil
+}
+
+// NumberOperand converts x to a number. If the cast fails, a descriptive error is
+// returned.
+func NumberOperand(x ast.Value, pos int) (ast.Number, error) {
+ n, ok := x.(ast.Number)
+ if !ok {
+ return ast.Number(""), NewOperandTypeErr(pos, x, "number")
+ }
+ return n, nil
+}
+
+// SetOperand converts x to a set. If the cast fails, a descriptive error is
+// returned.
+func SetOperand(x ast.Value, pos int) (ast.Set, error) {
+ s, ok := x.(ast.Set)
+ if !ok {
+ return nil, NewOperandTypeErr(pos, x, "set")
+ }
+ return s, nil
+}
+
+// StringOperand converts x to a string. If the cast fails, a descriptive error is
+// returned.
+func StringOperand(x ast.Value, pos int) (ast.String, error) {
+ s, ok := x.(ast.String)
+ if !ok {
+ return ast.String(""), NewOperandTypeErr(pos, x, "string")
+ }
+ return s, nil
+}
+
+// ObjectOperand converts x to an object. If the cast fails, a descriptive
+// error is returned.
+func ObjectOperand(x ast.Value, pos int) (ast.Object, error) {
+ o, ok := x.(ast.Object)
+ if !ok {
+ return nil, NewOperandTypeErr(pos, x, "object")
+ }
+ return o, nil
+}
+
+// ArrayOperand converts x to an array. If the cast fails, a descriptive
+// error is returned.
+func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) {
+ a, ok := x.(*ast.Array)
+ if !ok {
+ return ast.NewArray(), NewOperandTypeErr(pos, x, "array")
+ }
+ return a, nil
+}
+
+// NumberToFloat converts n to a big float.
+func NumberToFloat(n ast.Number) *big.Float {
+ r, ok := new(big.Float).SetString(string(n))
+ if !ok {
+ panic("illegal value")
+ }
+ return r
+}
+
+// FloatToNumber converts f to a number.
+func FloatToNumber(f *big.Float) ast.Number {
+ return ast.Number(f.Text('g', -1))
+}
+
+// NumberToInt converts n to a big int.
+// If n cannot be converted to an big int, an error is returned.
+func NumberToInt(n ast.Number) (*big.Int, error) {
+ f := NumberToFloat(n)
+ r, accuracy := f.Int(nil)
+ if accuracy != big.Exact {
+ return nil, fmt.Errorf("illegal value")
+ }
+ return r, nil
+}
+
+// IntToNumber converts i to a number.
+func IntToNumber(i *big.Int) ast.Number {
+ return ast.Number(i.String())
+}
+
+// StringSliceOperand converts x to a []string. If the cast fails, a descriptive error is
+// returned.
+func StringSliceOperand(x ast.Value, pos int) ([]string, error) {
+ a, err := ArrayOperand(x, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ var f = make([]string, a.Len())
+ for k := 0; k < a.Len(); k++ {
+ b := a.Elem(k)
+ c, ok := b.Value.(ast.String)
+ if !ok {
+ return nil, NewOperandElementErr(pos, x, b.Value, "[]string")
+ }
+
+ f[k] = string(c)
+ }
+
+ return f, nil
+}
+
+// RuneSliceOperand converts x to a []rune. If the cast fails, a descriptive error is
+// returned.
+func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) {
+ a, err := ArrayOperand(x, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ var f = make([]rune, a.Len())
+ for k := 0; k < a.Len(); k++ {
+ b := a.Elem(k)
+ c, ok := b.Value.(ast.String)
+ if !ok {
+ return nil, NewOperandElementErr(pos, x, b.Value, "string")
+ }
+
+ d := []rune(string(c))
+ if len(d) != 1 {
+ return nil, NewOperandElementErr(pos, x, b.Value, "rune")
+ }
+
+ f[k] = d[0]
+ }
+
+ return f, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache.go b/vendor/github.com/open-policy-agent/opa/topdown/cache.go
new file mode 100644
index 00000000..96c250e1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/cache.go
@@ -0,0 +1,237 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/util"
+)
+
+type virtualCache struct {
+ stack []*virtualCacheElem
+}
+
+type virtualCacheElem struct {
+ value *ast.Term
+ children *util.HashMap
+}
+
+func newVirtualCache() *virtualCache {
+ cache := &virtualCache{}
+ cache.Push()
+ return cache
+}
+
+func (c *virtualCache) Push() {
+ c.stack = append(c.stack, newVirtualCacheElem())
+}
+
+func (c *virtualCache) Pop() {
+ c.stack = c.stack[:len(c.stack)-1]
+}
+
+func (c *virtualCache) Get(ref ast.Ref) *ast.Term {
+ node := c.stack[len(c.stack)-1]
+ for i := 0; i < len(ref); i++ {
+ x, ok := node.children.Get(ref[i])
+ if !ok {
+ return nil
+ }
+ node = x.(*virtualCacheElem)
+ }
+ return node.value
+}
+
+func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) {
+ node := c.stack[len(c.stack)-1]
+ for i := 0; i < len(ref); i++ {
+ x, ok := node.children.Get(ref[i])
+ if ok {
+ node = x.(*virtualCacheElem)
+ } else {
+ next := newVirtualCacheElem()
+ node.children.Put(ref[i], next)
+ node = next
+ }
+ }
+ node.value = value
+}
+
+func newVirtualCacheElem() *virtualCacheElem {
+ return &virtualCacheElem{children: newVirtualCacheHashMap()}
+}
+
+func newVirtualCacheHashMap() *util.HashMap {
+ return util.NewHashMap(func(a, b util.T) bool {
+ return a.(*ast.Term).Equal(b.(*ast.Term))
+ }, func(x util.T) int {
+ return x.(*ast.Term).Hash()
+ })
+}
+
+// baseCache implements a trie structure to cache base documents read out of
+// storage. Values inserted into the cache may contain other values that were
+// previously inserted. In this case, the previous values are erased from the
+// structure.
+type baseCache struct {
+ root *baseCacheElem
+}
+
+func newBaseCache() *baseCache {
+ return &baseCache{
+ root: newBaseCacheElem(),
+ }
+}
+
+func (c *baseCache) Get(ref ast.Ref) ast.Value {
+ node := c.root
+ for i := 0; i < len(ref); i++ {
+ node = node.children[ref[i].Value]
+ if node == nil {
+ return nil
+ } else if node.value != nil {
+ result, err := node.value.Find(ref[i+1:])
+ if err != nil {
+ return nil
+ }
+ return result
+ }
+ }
+ return nil
+}
+
+func (c *baseCache) Put(ref ast.Ref, value ast.Value) {
+ node := c.root
+ for i := 0; i < len(ref); i++ {
+ if child, ok := node.children[ref[i].Value]; ok {
+ node = child
+ } else {
+ child := newBaseCacheElem()
+ node.children[ref[i].Value] = child
+ node = child
+ }
+ }
+ node.set(value)
+}
+
+type baseCacheElem struct {
+ value ast.Value
+ children map[ast.Value]*baseCacheElem
+}
+
+func newBaseCacheElem() *baseCacheElem {
+ return &baseCacheElem{
+ children: map[ast.Value]*baseCacheElem{},
+ }
+}
+
+func (e *baseCacheElem) set(value ast.Value) {
+ e.value = value
+ e.children = map[ast.Value]*baseCacheElem{}
+}
+
+type refStack struct {
+ sl []refStackElem
+}
+
+type refStackElem struct {
+ refs []ast.Ref
+}
+
+func newRefStack() *refStack {
+ return &refStack{}
+}
+
+func (s *refStack) Push(refs []ast.Ref) {
+ s.sl = append(s.sl, refStackElem{refs: refs})
+}
+
+func (s *refStack) Pop() {
+ s.sl = s.sl[:len(s.sl)-1]
+}
+
+func (s *refStack) Prefixed(ref ast.Ref) bool {
+ if s != nil {
+ for i := len(s.sl) - 1; i >= 0; i-- {
+ for j := range s.sl[i].refs {
+ if ref.HasPrefix(s.sl[i].refs[j]) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+type comprehensionCache struct {
+ stack []map[*ast.Term]*comprehensionCacheElem
+}
+
+type comprehensionCacheElem struct {
+ value *ast.Term
+ children *util.HashMap
+}
+
+func newComprehensionCache() *comprehensionCache {
+ cache := &comprehensionCache{}
+ cache.Push()
+ return cache
+}
+
+func (c *comprehensionCache) Push() {
+ c.stack = append(c.stack, map[*ast.Term]*comprehensionCacheElem{})
+}
+
+func (c *comprehensionCache) Pop() {
+ c.stack = c.stack[:len(c.stack)-1]
+}
+
+func (c *comprehensionCache) Elem(t *ast.Term) (*comprehensionCacheElem, bool) {
+ elem, ok := c.stack[len(c.stack)-1][t]
+ return elem, ok
+}
+
+func (c *comprehensionCache) Set(t *ast.Term, elem *comprehensionCacheElem) {
+ c.stack[len(c.stack)-1][t] = elem
+}
+
+func newComprehensionCacheElem() *comprehensionCacheElem {
+ return &comprehensionCacheElem{children: newComprehensionCacheHashMap()}
+}
+
+func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term {
+ node := c
+ for i := 0; i < len(key); i++ {
+ x, ok := node.children.Get(key[i])
+ if !ok {
+ return nil
+ }
+ node = x.(*comprehensionCacheElem)
+ }
+ return node.value
+}
+
+func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) {
+ node := c
+ for i := 0; i < len(key); i++ {
+ x, ok := node.children.Get(key[i])
+ if ok {
+ node = x.(*comprehensionCacheElem)
+ } else {
+ next := newComprehensionCacheElem()
+ node.children.Put(key[i], next)
+ node = next
+ }
+ }
+ node.value = value
+}
+
+func newComprehensionCacheHashMap() *util.HashMap {
+ return util.NewHashMap(func(a, b util.T) bool {
+ return a.(*ast.Term).Equal(b.(*ast.Term))
+ }, func(x util.T) int {
+ return x.(*ast.Term).Hash()
+ })
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go b/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go
new file mode 100644
index 00000000..00608409
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go
@@ -0,0 +1,161 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package cache defines the inter-query cache interface that can cache data across queries
+package cache
+
+import (
+ "container/list"
+
+ "github.com/open-policy-agent/opa/ast"
+
+ "sync"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+const (
+ defaultMaxSizeBytes = int64(0) // unlimited
+)
+
+// Config represents the configuration of the inter-query cache.
+type Config struct {
+ InterQueryBuiltinCache InterQueryBuiltinCacheConfig `json:"inter_query_builtin_cache"`
+}
+
+// InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize.
+type InterQueryBuiltinCacheConfig struct {
+ MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"`
+}
+
+// ParseCachingConfig returns the config for the inter-query cache.
+func ParseCachingConfig(raw []byte) (*Config, error) {
+ if raw == nil {
+ maxSize := new(int64)
+ *maxSize = defaultMaxSizeBytes
+ return &Config{InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{MaxSizeBytes: maxSize}}, nil
+ }
+
+ var config Config
+
+ if err := util.Unmarshal(raw, &config); err == nil {
+ if err = config.validateAndInjectDefaults(); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+
+ return &config, nil
+}
+
+func (c *Config) validateAndInjectDefaults() error {
+ if c.InterQueryBuiltinCache.MaxSizeBytes == nil {
+ maxSize := new(int64)
+ *maxSize = defaultMaxSizeBytes
+ c.InterQueryBuiltinCache.MaxSizeBytes = maxSize
+ }
+ return nil
+}
+
+// InterQueryCacheValue defines the interface for the data that the inter-query cache holds.
+type InterQueryCacheValue interface {
+ SizeInBytes() int64
+}
+
+// InterQueryCache defines the interface for the inter-query cache.
+type InterQueryCache interface {
+ Get(key ast.Value) (value InterQueryCacheValue, found bool)
+ Insert(key ast.Value, value InterQueryCacheValue) int
+ Delete(key ast.Value)
+ UpdateConfig(config *Config)
+}
+
+// NewInterQueryCache returns a new inter-query cache.
+func NewInterQueryCache(config *Config) InterQueryCache {
+ return &cache{
+ items: map[string]InterQueryCacheValue{},
+ usage: 0,
+ config: config,
+ l: list.New(),
+ }
+}
+
+type cache struct {
+ items map[string]InterQueryCacheValue
+ usage int64
+ config *Config
+ l *list.List
+ mtx sync.Mutex
+}
+
+// Insert inserts a key k into the cache with value v.
+func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ return c.unsafeInsert(k, v)
+}
+
+// Get returns the value in the cache for k.
+func (c *cache) Get(k ast.Value) (InterQueryCacheValue, bool) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ return c.unsafeGet(k)
+}
+
+// Delete deletes the value in the cache for k.
+func (c *cache) Delete(k ast.Value) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ c.unsafeDelete(k)
+}
+
+func (c *cache) UpdateConfig(config *Config) {
+ if config == nil {
+ return
+ }
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ c.config = config
+}
+
+func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue) (dropped int) {
+ size := v.SizeInBytes()
+ limit := c.maxSizeBytes()
+ if limit > 0 {
+ for key := c.l.Front(); key != nil && (c.usage+size > limit); key = key.Next() {
+ dropKey := key.Value.(ast.Value)
+ c.unsafeDelete(dropKey)
+ c.l.Remove(key)
+ dropped++
+ }
+ }
+
+ c.items[k.String()] = v
+ c.l.PushBack(k)
+ c.usage += size
+ return dropped
+}
+
+func (c *cache) unsafeGet(k ast.Value) (InterQueryCacheValue, bool) {
+ value, ok := c.items[k.String()]
+ return value, ok
+}
+
+func (c *cache) unsafeDelete(k ast.Value) {
+ value, ok := c.unsafeGet(k)
+ if !ok {
+ return
+ }
+
+ c.usage -= int64(value.SizeInBytes())
+ delete(c.items, k.String())
+}
+
+func (c *cache) maxSizeBytes() int64 {
+ if c.config == nil {
+ return defaultMaxSizeBytes
+ }
+ return *c.config.InterQueryBuiltinCache.MaxSizeBytes
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cancel.go b/vendor/github.com/open-policy-agent/opa/topdown/cancel.go
new file mode 100644
index 00000000..534e0799
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/cancel.go
@@ -0,0 +1,33 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "sync/atomic"
+)
+
+// Cancel defines the interface for cancelling topdown queries. Cancel
+// operations are thread-safe and idempotent.
+type Cancel interface {
+ Cancel()
+ Cancelled() bool
+}
+
+type cancel struct {
+ flag int32
+}
+
+// NewCancel returns a new Cancel object.
+func NewCancel() Cancel {
+ return &cancel{}
+}
+
+func (c *cancel) Cancel() {
+ atomic.StoreInt32(&c.flag, 1)
+}
+
+func (c *cancel) Cancelled() bool {
+ return atomic.LoadInt32(&c.flag) != 0
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/casts.go b/vendor/github.com/open-policy-agent/opa/topdown/casts.go
new file mode 100644
index 00000000..c207bfd1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/casts.go
@@ -0,0 +1,117 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "strconv"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinToNumber(a ast.Value) (ast.Value, error) {
+ switch a := a.(type) {
+ case ast.Null:
+ return ast.Number("0"), nil
+ case ast.Boolean:
+ if a {
+ return ast.Number("1"), nil
+ }
+ return ast.Number("0"), nil
+ case ast.Number:
+ return a, nil
+ case ast.String:
+ _, err := strconv.ParseFloat(string(a), 64)
+ if err != nil {
+ return nil, err
+ }
+ return ast.Number(a), nil
+ }
+ return nil, builtins.NewOperandTypeErr(1, a, "null", "boolean", "number", "string")
+}
+
+// Deprecated in v0.13.0.
+func builtinToArray(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case *ast.Array:
+ return val, nil
+ case ast.Set:
+ arr := make([]*ast.Term, val.Len())
+ i := 0
+ val.Foreach(func(term *ast.Term) {
+ arr[i] = term
+ i++
+ })
+ return ast.NewArray(arr...), nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "array", "set")
+ }
+}
+
+// Deprecated in v0.13.0.
+func builtinToSet(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case *ast.Array:
+ s := ast.NewSet()
+ val.Foreach(func(v *ast.Term) {
+ s.Add(v)
+ })
+ return s, nil
+ case ast.Set:
+ return val, nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "array", "set")
+ }
+}
+
+// Deprecated in v0.13.0.
+func builtinToString(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case ast.String:
+ return val, nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "string")
+ }
+}
+
+// Deprecated in v0.13.0.
+func builtinToBoolean(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case ast.Boolean:
+ return val, nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "boolean")
+ }
+}
+
+// Deprecated in v0.13.0.
+func builtinToNull(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case ast.Null:
+ return val, nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "null")
+ }
+}
+
+// Deprecated in v0.13.0.
+func builtinToObject(a ast.Value) (ast.Value, error) {
+ switch val := a.(type) {
+ case ast.Object:
+ return val, nil
+ default:
+ return nil, builtins.NewOperandTypeErr(1, a, "object")
+ }
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.ToNumber.Name, builtinToNumber)
+ RegisterFunctionalBuiltin1(ast.CastArray.Name, builtinToArray)
+ RegisterFunctionalBuiltin1(ast.CastSet.Name, builtinToSet)
+ RegisterFunctionalBuiltin1(ast.CastString.Name, builtinToString)
+ RegisterFunctionalBuiltin1(ast.CastBoolean.Name, builtinToBoolean)
+ RegisterFunctionalBuiltin1(ast.CastNull.Name, builtinToNull)
+ RegisterFunctionalBuiltin1(ast.CastObject.Name, builtinToObject)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/cidr.go b/vendor/github.com/open-policy-agent/opa/topdown/cidr.go
new file mode 100644
index 00000000..7c0e72f6
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/cidr.go
@@ -0,0 +1,404 @@
+package topdown
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "sort"
+
+ "github.com/open-policy-agent/opa/ast"
+ cidrMerge "github.com/open-policy-agent/opa/internal/cidr/merge"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func getNetFromOperand(v ast.Value) (*net.IPNet, error) {
+ subnetStringA, err := builtins.StringOperand(v, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ _, cidrnet, err := net.ParseCIDR(string(subnetStringA))
+ if err != nil {
+ return nil, err
+ }
+
+ return cidrnet, nil
+}
+
+func getLastIP(cidr *net.IPNet) (net.IP, error) {
+ prefixLen, bits := cidr.Mask.Size()
+ if prefixLen == 0 && bits == 0 {
+ // non-standard mask, see https://golang.org/pkg/net/#IPMask.Size
+ return nil, fmt.Errorf("CIDR mask is in non-standard format")
+ }
+ var lastIP []byte
+ if prefixLen == bits {
+ // Special case for single ip address ranges ex: 192.168.1.1/32
+ // We can just use the starting IP as the last IP
+ lastIP = cidr.IP
+ } else {
+ // Use big.Int's so we can handle ipv6 addresses
+ firstIPInt := new(big.Int)
+ firstIPInt.SetBytes(cidr.IP)
+ hostLen := uint(bits) - uint(prefixLen)
+ lastIPInt := big.NewInt(1)
+ lastIPInt.Lsh(lastIPInt, hostLen)
+ lastIPInt.Sub(lastIPInt, big.NewInt(1))
+ lastIPInt.Or(lastIPInt, firstIPInt)
+
+ ipBytes := lastIPInt.Bytes()
+ lastIP = make([]byte, bits/8)
+
+ // Pack our IP bytes into the end of the return array,
+ // since big.Int.Bytes() removes front zero padding.
+ for i := 1; i <= len(lastIPInt.Bytes()); i++ {
+ lastIP[len(lastIP)-i] = ipBytes[len(ipBytes)-i]
+ }
+ }
+
+ return lastIP, nil
+}
+
+func builtinNetCIDRIntersects(a, b ast.Value) (ast.Value, error) {
+ cidrnetA, err := getNetFromOperand(a)
+ if err != nil {
+ return nil, err
+ }
+
+ cidrnetB, err := getNetFromOperand(b)
+ if err != nil {
+ return nil, err
+ }
+
+ // If either net contains the others starting IP they are overlapping
+ cidrsOverlap := (cidrnetA.Contains(cidrnetB.IP) || cidrnetB.Contains(cidrnetA.IP))
+
+ return ast.Boolean(cidrsOverlap), nil
+}
+
+func builtinNetCIDRContains(a, b ast.Value) (ast.Value, error) {
+ cidrnetA, err := getNetFromOperand(a)
+ if err != nil {
+ return nil, err
+ }
+
+ // b could be either an IP addressor CIDR string, try to parse it as an IP first, fall back to CIDR
+ bStr, err := builtins.StringOperand(b, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ ip := net.ParseIP(string(bStr))
+ if ip != nil {
+ return ast.Boolean(cidrnetA.Contains(ip)), nil
+ }
+
+ // It wasn't an IP, try and parse it as a CIDR
+ cidrnetB, err := getNetFromOperand(b)
+ if err != nil {
+ return nil, fmt.Errorf("not a valid textual representation of an IP address or CIDR: %s", string(bStr))
+ }
+
+ // We can determine if cidr A contains cidr B iff A contains the starting address of B and the last address in B.
+ cidrContained := false
+ if cidrnetA.Contains(cidrnetB.IP) {
+ // Only spend time calculating the last IP if the starting IP is already verified to be in cidr A
+ lastIP, err := getLastIP(cidrnetB)
+ if err != nil {
+ return nil, err
+ }
+ cidrContained = cidrnetA.Contains(lastIP)
+ }
+
+ return ast.Boolean(cidrContained), nil
+}
+
+var errNetCIDRContainsMatchElementType = errors.New("element must be string or non-empty array")
+
+func getCIDRMatchTerm(a *ast.Term) (*ast.Term, error) {
+ switch v := a.Value.(type) {
+ case ast.String:
+ return a, nil
+ case *ast.Array:
+ if v.Len() == 0 {
+ return nil, errNetCIDRContainsMatchElementType
+ }
+ return v.Elem(0), nil
+ default:
+ return nil, errNetCIDRContainsMatchElementType
+ }
+}
+
+func evalNetCIDRContainsMatchesOperand(operand int, a *ast.Term, iter func(cidr, index *ast.Term) error) error {
+ switch v := a.Value.(type) {
+ case ast.String:
+ return iter(a, a)
+ case *ast.Array:
+ for i := 0; i < v.Len(); i++ {
+ cidr, err := getCIDRMatchTerm(v.Elem(i))
+ if err != nil {
+ return fmt.Errorf("operand %v: %v", operand, err)
+ }
+ if err := iter(cidr, ast.IntNumberTerm(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+ case ast.Set:
+ return v.Iter(func(x *ast.Term) error {
+ cidr, err := getCIDRMatchTerm(x)
+ if err != nil {
+ return fmt.Errorf("operand %v: %v", operand, err)
+ }
+ return iter(cidr, x)
+ })
+ case ast.Object:
+ return v.Iter(func(k, v *ast.Term) error {
+ cidr, err := getCIDRMatchTerm(v)
+ if err != nil {
+ return fmt.Errorf("operand %v: %v", operand, err)
+ }
+ return iter(cidr, k)
+ })
+ }
+ return nil
+}
+
+func builtinNetCIDRContainsMatches(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result := ast.NewSet()
+ err := evalNetCIDRContainsMatchesOperand(1, args[0], func(cidr1 *ast.Term, index1 *ast.Term) error {
+ return evalNetCIDRContainsMatchesOperand(2, args[1], func(cidr2 *ast.Term, index2 *ast.Term) error {
+ if v, err := builtinNetCIDRContains(cidr1.Value, cidr2.Value); err != nil {
+ return err
+ } else if vb, ok := v.(ast.Boolean); ok && bool(vb) {
+ result.Add(ast.ArrayTerm(index1, index2))
+ }
+ return nil
+ })
+ })
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+func builtinNetCIDRExpand(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ s, err := builtins.StringOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ ip, ipNet, err := net.ParseCIDR(string(s))
+ if err != nil {
+ return err
+ }
+
+ result := ast.NewSet()
+
+ for ip := ip.Mask(ipNet.Mask); ipNet.Contains(ip); incIP(ip) {
+
+ if bctx.Cancel != nil && bctx.Cancel.Cancelled() {
+ return Halt{
+ Err: &Error{
+ Code: CancelErr,
+ Message: "net.cidr_expand: timed out before generating all IP addresses",
+ },
+ }
+ }
+
+ result.Add(ast.StringTerm(ip.String()))
+ }
+
+ return iter(ast.NewTerm(result))
+}
+
+type cidrBlockRange struct {
+ First *net.IP
+ Last *net.IP
+ Network *net.IPNet
+}
+
+type cidrBlockRanges []*cidrBlockRange
+
+// Implement Sort interface
+func (c cidrBlockRanges) Len() int {
+ return len(c)
+}
+
+func (c cidrBlockRanges) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
+
+func (c cidrBlockRanges) Less(i, j int) bool {
+ // Compare last IP.
+ cmp := bytes.Compare(*c[i].Last, *c[j].Last)
+ if cmp < 0 {
+ return true
+ } else if cmp > 0 {
+ return false
+ }
+
+ // Then compare first IP.
+ cmp = bytes.Compare(*c[i].First, *c[i].First)
+ if cmp < 0 {
+ return true
+ } else if cmp > 0 {
+ return false
+ }
+
+ // Ranges are Equal.
+ return false
+}
+
+// builtinNetCIDRMerge merges the provided list of IP addresses and subnets into the smallest possible list of CIDRs.
+// It merges adjacent subnets where possible, those contained within others and also removes any duplicates.
+// Original Algorithm: https://github.com/netaddr/netaddr.
+func builtinNetCIDRMerge(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ networks := []*net.IPNet{}
+
+ switch v := operands[0].Value.(type) {
+ case *ast.Array:
+ for i := 0; i < v.Len(); i++ {
+ network, err := generateIPNet(v.Elem(i))
+ if err != nil {
+ return err
+ }
+ networks = append(networks, network)
+ }
+ case ast.Set:
+ err := v.Iter(func(x *ast.Term) error {
+ network, err := generateIPNet(x)
+ if err != nil {
+ return err
+ }
+ networks = append(networks, network)
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ default:
+ return errors.New("operand must be an array")
+ }
+
+ merged := evalNetCIDRMerge(networks)
+
+ result := ast.NewSet()
+ for _, network := range merged {
+ result.Add(ast.StringTerm(network.String()))
+ }
+
+ return iter(ast.NewTerm(result))
+}
+
+func evalNetCIDRMerge(networks []*net.IPNet) []*net.IPNet {
+ if len(networks) == 0 {
+ return nil
+ }
+
+ var ranges cidrBlockRanges
+
+ // For each CIDR, create an IP range. Sort them and merge when possible.
+ for _, network := range networks {
+ firstIP, lastIP := cidrMerge.GetAddressRange(*network)
+ ranges = append(ranges, &cidrBlockRange{
+ First: &firstIP,
+ Last: &lastIP,
+ Network: network,
+ })
+ }
+
+ // merge CIDRs.
+ merged := mergeCIDRs(ranges)
+
+ // convert ranges into an equivalent list of net.IPNet.
+ result := []*net.IPNet{}
+
+ for _, r := range merged {
+ // Not merged with any other CIDR.
+ if r.Network != nil {
+ result = append(result, r.Network)
+ } else {
+ // Find new network that represents the merged range.
+ rangeCIDRs := cidrMerge.RangeToCIDRs(*r.First, *r.Last)
+ result = append(result, rangeCIDRs...)
+ }
+ }
+ return result
+}
+
+func generateIPNet(term *ast.Term) (*net.IPNet, error) {
+ switch e := term.Value.(type) {
+ case ast.String:
+ network := &net.IPNet{}
+ // try to parse element as an IP first, fall back to CIDR
+ ip := net.ParseIP(string(e))
+ if ip != nil {
+ network.IP = ip
+ network.Mask = ip.DefaultMask()
+ } else {
+ var err error
+ _, network, err = net.ParseCIDR(string(e))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return network, nil
+ default:
+ return nil, errors.New("element must be string")
+ }
+}
+
+func mergeCIDRs(ranges cidrBlockRanges) cidrBlockRanges {
+ sort.Sort(ranges)
+
+ // Merge adjacent CIDRs if possible.
+ for i := len(ranges) - 1; i > 0; i-- {
+ previousIP := cidrMerge.GetPreviousIP(*ranges[i].First)
+
+ // If the previous IP of the current network overlaps
+ // with the last IP of the previous network in the
+ // list, then merge the two ranges together.
+ if bytes.Compare(previousIP, *ranges[i-1].Last) <= 0 {
+ var firstIP *net.IP
+ if bytes.Compare(*ranges[i-1].First, *ranges[i].First) < 0 {
+ firstIP = ranges[i-1].First
+ } else {
+ firstIP = ranges[i].First
+ }
+
+ lastIPRange := make(net.IP, len(*ranges[i].Last))
+ copy(lastIPRange, *ranges[i].Last)
+
+ firstIPRange := make(net.IP, len(*firstIP))
+ copy(firstIPRange, *firstIP)
+
+ ranges[i-1] = &cidrBlockRange{First: &firstIPRange, Last: &lastIPRange, Network: nil}
+
+ // Delete ranges[i] since merged with the previous.
+ ranges = append(ranges[:i], ranges[i+1:]...)
+ }
+ }
+ return ranges
+}
+
+func incIP(ip net.IP) {
+ for j := len(ip) - 1; j >= 0; j-- {
+ ip[j]++
+ if ip[j] > 0 {
+ break
+ }
+ }
+}
+
+func init() {
+ RegisterFunctionalBuiltin2(ast.NetCIDROverlap.Name, builtinNetCIDRContains)
+ RegisterFunctionalBuiltin2(ast.NetCIDRIntersects.Name, builtinNetCIDRIntersects)
+ RegisterFunctionalBuiltin2(ast.NetCIDRContains.Name, builtinNetCIDRContains)
+ RegisterBuiltinFunc(ast.NetCIDRContainsMatches.Name, builtinNetCIDRContainsMatches)
+ RegisterBuiltinFunc(ast.NetCIDRExpand.Name, builtinNetCIDRExpand)
+ RegisterBuiltinFunc(ast.NetCIDRMerge.Name, builtinNetCIDRMerge)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/comparison.go b/vendor/github.com/open-policy-agent/opa/topdown/comparison.go
new file mode 100644
index 00000000..96be984a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/comparison.go
@@ -0,0 +1,48 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import "github.com/open-policy-agent/opa/ast"
+
+type compareFunc func(a, b ast.Value) bool
+
+func compareGreaterThan(a, b ast.Value) bool {
+ return ast.Compare(a, b) > 0
+}
+
+func compareGreaterThanEq(a, b ast.Value) bool {
+ return ast.Compare(a, b) >= 0
+}
+
+func compareLessThan(a, b ast.Value) bool {
+ return ast.Compare(a, b) < 0
+}
+
+func compareLessThanEq(a, b ast.Value) bool {
+ return ast.Compare(a, b) <= 0
+}
+
+func compareNotEq(a, b ast.Value) bool {
+ return ast.Compare(a, b) != 0
+}
+
+func compareEq(a, b ast.Value) bool {
+ return ast.Compare(a, b) == 0
+}
+
+func builtinCompare(cmp compareFunc) FunctionalBuiltin2 {
+ return func(a, b ast.Value) (ast.Value, error) {
+ return ast.Boolean(cmp(a, b)), nil
+ }
+}
+
+func init() {
+ RegisterFunctionalBuiltin2(ast.GreaterThan.Name, builtinCompare(compareGreaterThan))
+ RegisterFunctionalBuiltin2(ast.GreaterThanEq.Name, builtinCompare(compareGreaterThanEq))
+ RegisterFunctionalBuiltin2(ast.LessThan.Name, builtinCompare(compareLessThan))
+ RegisterFunctionalBuiltin2(ast.LessThanEq.Name, builtinCompare(compareLessThanEq))
+ RegisterFunctionalBuiltin2(ast.NotEqual.Name, builtinCompare(compareNotEq))
+ RegisterFunctionalBuiltin2(ast.Equal.Name, builtinCompare(compareEq))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go b/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go
new file mode 100644
index 00000000..20aac0f7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go
@@ -0,0 +1,428 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package copypropagation
+
+import (
+ "sort"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// CopyPropagator implements a simple copy propagation optimization to remove
+// intermediate variables in partial evaluation results.
+//
+// For example, given the query: input.x > 1 where 'input' is unknown, the
+// compiled query would become input.x = a; a > 1 which would remain in the
+// partial evaluation result. The CopyPropagator will remove the variable
+// assignment so that partial evaluation simply outputs input.x > 1.
+//
+// In many cases, copy propagation can remove all variables from the result of
+// partial evaluation which simplifies evaluation for non-OPA consumers.
+//
+// In some cases, copy propagation cannot remove all variables. If the output of
+// a built-in call is subsequently used as a ref head, the output variable must
+// be kept. For example. sort(input, x); x[0] == 1. In this case, copy
+// propagation cannot replace x[0] == 1 with sort(input, x)[0] == 1 as this is
+// not legal.
+type CopyPropagator struct {
+ livevars ast.VarSet // vars that must be preserved in the resulting query
+ sorted []ast.Var // sorted copy of vars to ensure deterministic result
+ ensureNonEmptyBody bool
+ compiler *ast.Compiler
+}
+
+// New returns a new CopyPropagator that optimizes queries while preserving vars
+// in the livevars set.
+func New(livevars ast.VarSet) *CopyPropagator {
+
+ sorted := make([]ast.Var, 0, len(livevars))
+ for v := range livevars {
+ sorted = append(sorted, v)
+ }
+
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].Compare(sorted[j]) < 0
+ })
+
+ return &CopyPropagator{livevars: livevars, sorted: sorted}
+}
+
+// WithEnsureNonEmptyBody configures p to ensure that results are always non-empty.
+func (p *CopyPropagator) WithEnsureNonEmptyBody(yes bool) *CopyPropagator {
+ p.ensureNonEmptyBody = yes
+ return p
+}
+
+// WithCompiler configures the compiler to read from while processing the query. This
+// should be the same compiler used to compile the original policy.
+func (p *CopyPropagator) WithCompiler(c *ast.Compiler) *CopyPropagator {
+ p.compiler = c
+ return p
+}
+
+// Apply executes the copy propagation optimization and returns a new query.
+func (p *CopyPropagator) Apply(query ast.Body) ast.Body {
+
+ result := ast.NewBody()
+
+ uf, ok := makeDisjointSets(p.livevars, query)
+ if !ok {
+ return query
+ }
+
+ // Compute set of vars that appear in the head of refs in the query. If a var
+ // is dereferenced, we can plug it with a constant value, but it is not always
+ // optimal to do so.
+ // TODO: Improve the algorithm for when we should plug constants/calls/etc
+ headvars := ast.NewVarSet()
+ ast.WalkRefs(query, func(x ast.Ref) bool {
+ if v, ok := x[0].Value.(ast.Var); ok {
+ if root, ok := uf.Find(v); ok {
+ root.constant = nil
+ headvars.Add(root.key.(ast.Var))
+ } else {
+ headvars.Add(v)
+ }
+ }
+ return false
+ })
+
+ removedEqs := ast.NewValueMap()
+
+ for _, expr := range query {
+
+ pctx := &plugContext{
+ removedEqs: removedEqs,
+ uf: uf,
+ negated: expr.Negated,
+ headvars: headvars,
+ }
+
+ expr = p.plugBindings(pctx, expr)
+
+ if p.updateBindings(pctx, expr) {
+ result.Append(expr)
+ }
+ }
+
+ // Run post-processing step on the query to ensure that all live vars are bound
+ // in the result. The plugging that happens above substitutes all vars in the
+ // same set with the root.
+ //
+ // This step should run before the next step to prevent unnecessary bindings
+ // from being added to the result. For example:
+ //
+ // - Given the following result:
+ // - Given the following removed equalities: "x = input.x" and "y = input"
+ // - Given the following liveset: {x}
+ //
+ // If this step were to run AFTER the following step, the output would be:
+ //
+ // x = input.x; y = input
+ //
+ // Even though y = input is not required.
+ for _, v := range p.sorted {
+ if root, ok := uf.Find(v); ok {
+ if root.constant != nil {
+ result.Append(ast.Equality.Expr(ast.NewTerm(v), root.constant))
+ } else if b := removedEqs.Get(root.key); b != nil {
+ result.Append(ast.Equality.Expr(ast.NewTerm(v), ast.NewTerm(b)))
+ } else if root.key != v {
+ result.Append(ast.Equality.Expr(ast.NewTerm(v), ast.NewTerm(root.key)))
+ }
+ }
+ }
+
+ // Run post-processing step on query to ensure that all killed exprs are
+ // accounted for. There are several cases we look for:
+ //
+ // * If an expr is killed but the binding is never used, the query
+ // must still include the expr. For example, given the query 'input.x = a' and
+ // an empty livevar set, the result must include the ref input.x otherwise the
+ // query could be satisfied without input.x being defined.
+ //
+ // * If an expr is killed that provided safety to vars which are not
+ // otherwise being made safe by the current result.
+ //
+ // For any of these cases we re-add the removed equality expression
+ // to the current result.
+
+ // Invariant: Live vars are bound (above) and reserved vars are implicitly ground.
+ safe := ast.ReservedVars.Copy()
+ safe.Update(p.livevars)
+ safe.Update(ast.OutputVarsFromBody(p.compiler, result, safe))
+ unsafe := result.Vars(ast.SafetyCheckVisitorParams).Diff(safe)
+
+ for _, b := range sortbindings(removedEqs) {
+ removedEq := ast.Equality.Expr(ast.NewTerm(b.k), ast.NewTerm(b.v))
+
+ providesSafety := false
+ outputVars := ast.OutputVarsFromExpr(p.compiler, removedEq, safe)
+ diff := unsafe.Diff(outputVars)
+ if len(diff) < len(unsafe) {
+ unsafe = diff
+ providesSafety = true
+ }
+
+ if providesSafety || !containedIn(b.v, result) {
+ result.Append(removedEq)
+ safe.Update(outputVars)
+ }
+ }
+
+ if len(unsafe) > 0 {
+ // NOTE(tsandall): This should be impossible but if it does occur, throw
+ // away the result rather than generating unsafe output.
+ return query
+ }
+
+ if p.ensureNonEmptyBody && len(result) == 0 {
+ result = append(result, ast.NewExpr(ast.BooleanTerm(true)))
+ }
+
+ return result
+}
+
+// plugBindings applies the binding list and union-find to x. This process
+// removes as many variables as possible.
+func (p *CopyPropagator) plugBindings(pctx *plugContext, expr *ast.Expr) *ast.Expr {
+
+ xform := bindingPlugTransform{
+ pctx: pctx,
+ }
+
+ // Deep copy the expression as it may be mutated during the transform and
+ // the caller running copy propagation may have references to the
+ // expression. Note, the transform does not contain any error paths and
+ // should never return a non-expression value for the root so consider
+ // errors unreachable.
+ x, err := ast.Transform(xform, expr.Copy())
+
+ if expr, ok := x.(*ast.Expr); !ok || err != nil {
+ panic("unreachable")
+ } else {
+ return expr
+ }
+}
+
+type bindingPlugTransform struct {
+ pctx *plugContext
+}
+
+func (t bindingPlugTransform) Transform(x interface{}) (interface{}, error) {
+ switch x := x.(type) {
+ case ast.Var:
+ return t.plugBindingsVar(t.pctx, x), nil
+ case ast.Ref:
+ return t.plugBindingsRef(t.pctx, x), nil
+ default:
+ return x, nil
+ }
+}
+
+func (t bindingPlugTransform) plugBindingsVar(pctx *plugContext, v ast.Var) (result ast.Value) {
+
+ result = v
+
+ // Apply union-find to remove redundant variables from input.
+ if root, ok := pctx.uf.Find(v); ok {
+ result = root.Value()
+ }
+
+ // Apply binding list to substitute remaining vars.
+ if v, ok := result.(ast.Var); ok {
+ if b := pctx.removedEqs.Get(v); b != nil {
+ if !pctx.negated || b.IsGround() {
+ result = b
+ }
+ }
+ }
+
+ return result
+}
+
+func (t bindingPlugTransform) plugBindingsRef(pctx *plugContext, v ast.Ref) ast.Ref {
+
+ // Apply union-find to remove redundant variables from input.
+ if root, ok := pctx.uf.Find(v[0].Value); ok {
+ v[0].Value = root.Value()
+ }
+
+ result := v
+
+ // Refs require special handling. If the head of the ref was killed, then
+ // the rest of the ref must be concatenated with the new base.
+ if b := pctx.removedEqs.Get(v[0].Value); b != nil {
+ if !pctx.negated || b.IsGround() {
+ var base ast.Ref
+ switch x := b.(type) {
+ case ast.Ref:
+ base = x
+ default:
+ base = ast.Ref{ast.NewTerm(x)}
+ }
+ result = base.Concat(v[1:])
+ }
+ }
+
+ return result
+}
+
+// updateBindings returns false if the expression can be killed. If the
+// expression is killed, the binding list is updated to map a var to value.
+func (p *CopyPropagator) updateBindings(pctx *plugContext, expr *ast.Expr) bool {
+ if pctx.negated || len(expr.With) > 0 {
+ return true
+ }
+ if expr.IsEquality() {
+ a, b := expr.Operand(0), expr.Operand(1)
+ if a.Equal(b) {
+ return false
+ }
+ k, v, keep := p.updateBindingsEq(a, b)
+ if !keep {
+ if v != nil {
+ pctx.removedEqs.Put(k, v)
+ }
+ return false
+ }
+ } else if expr.IsCall() {
+ terms := expr.Terms.([]*ast.Term)
+ output := terms[len(terms)-1]
+ if k, ok := output.Value.(ast.Var); ok && !p.livevars.Contains(k) && !pctx.headvars.Contains(k) {
+ pctx.removedEqs.Put(k, ast.CallTerm(terms[:len(terms)-1]...).Value)
+ return false
+ }
+ }
+ return !isNoop(expr)
+}
+
+func (p *CopyPropagator) updateBindingsEq(a, b *ast.Term) (ast.Var, ast.Value, bool) {
+ k, v, keep := p.updateBindingsEqAsymmetric(a, b)
+ if !keep {
+ return k, v, keep
+ }
+ return p.updateBindingsEqAsymmetric(b, a)
+}
+
+func (p *CopyPropagator) updateBindingsEqAsymmetric(a, b *ast.Term) (ast.Var, ast.Value, bool) {
+ k, ok := a.Value.(ast.Var)
+ if !ok || p.livevars.Contains(k) {
+ return "", nil, true
+ }
+
+ switch b.Value.(type) {
+ case ast.Ref, ast.Call:
+ return k, b.Value, false
+ }
+
+ return "", nil, true
+}
+
+type plugContext struct {
+ removedEqs *ast.ValueMap
+ uf *unionFind
+ headvars ast.VarSet
+ negated bool
+}
+
+type binding struct {
+ k ast.Value
+ v ast.Value
+}
+
+func containedIn(value ast.Value, x interface{}) bool {
+ var stop bool
+ switch v := value.(type) {
+ case ast.Ref:
+ ast.WalkRefs(x, func(other ast.Ref) bool {
+ if stop || other.HasPrefix(v) {
+ stop = true
+ return stop
+ }
+ return false
+ })
+ default:
+ ast.WalkTerms(x, func(other *ast.Term) bool {
+ if stop || other.Value.Compare(v) == 0 {
+ stop = true
+ return stop
+ }
+ return false
+ })
+ }
+ return stop
+}
+
+func sortbindings(bindings *ast.ValueMap) []*binding {
+ sorted := make([]*binding, 0, bindings.Len())
+ bindings.Iter(func(k ast.Value, v ast.Value) bool {
+ sorted = append(sorted, &binding{k, v})
+ return false
+ })
+ sort.Slice(sorted, func(i, j int) bool {
+ return sorted[i].k.Compare(sorted[j].k) < 0
+ })
+ return sorted
+}
+
+// makeDisjointSets builds the union-find structure for the query. The structure
+// is built by processing all of the equality exprs in the query. Sets represent
+// vars that must be equal to each other. In addition to vars, each set can have
+// at most one constant. If the query contains expressions that cannot be
+// satisfied (e.g., because a set has multiple constants) this function returns
+// false.
+func makeDisjointSets(livevars ast.VarSet, query ast.Body) (*unionFind, bool) {
+ uf := newUnionFind(func(r1, r2 *unionFindRoot) (*unionFindRoot, *unionFindRoot) {
+ if v, ok := r1.key.(ast.Var); ok && livevars.Contains(v) {
+ return r1, r2
+ }
+ return r2, r1
+ })
+ for _, expr := range query {
+ if expr.IsEquality() && !expr.Negated && len(expr.With) == 0 {
+ a, b := expr.Operand(0), expr.Operand(1)
+ varA, ok1 := a.Value.(ast.Var)
+ varB, ok2 := b.Value.(ast.Var)
+ if ok1 && ok2 {
+ if _, ok := uf.Merge(varA, varB); !ok {
+ return nil, false
+ }
+ } else if ok1 && ast.IsConstant(b.Value) {
+ root := uf.MakeSet(varA)
+ if root.constant != nil && !root.constant.Equal(b) {
+ return nil, false
+ }
+ root.constant = b
+ } else if ok2 && ast.IsConstant(a.Value) {
+ root := uf.MakeSet(varB)
+ if root.constant != nil && !root.constant.Equal(a) {
+ return nil, false
+ }
+ root.constant = a
+ }
+ }
+ }
+
+ return uf, true
+}
+
+func isNoop(expr *ast.Expr) bool {
+
+ if !expr.IsCall() {
+ term := expr.Terms.(*ast.Term)
+ if !ast.IsConstant(term.Value) {
+ return false
+ }
+ return !ast.Boolean(false).Equal(term.Value)
+ }
+
+ // A==A can be ignored
+ if expr.Operator().Equal(ast.Equal.Ref()) {
+ return expr.Operand(0).Equal(expr.Operand(1))
+ }
+
+ return false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go b/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go
new file mode 100644
index 00000000..38ec56f3
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go
@@ -0,0 +1,135 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package copypropagation
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/util"
+)
+
+type rankFunc func(*unionFindRoot, *unionFindRoot) (*unionFindRoot, *unionFindRoot)
+
+type unionFind struct {
+ roots *util.HashMap
+ parents *ast.ValueMap
+ rank rankFunc
+}
+
+func newUnionFind(rank rankFunc) *unionFind {
+ return &unionFind{
+ roots: util.NewHashMap(func(a util.T, b util.T) bool {
+ return a.(ast.Value).Compare(b.(ast.Value)) == 0
+ }, func(v util.T) int {
+ return v.(ast.Value).Hash()
+ }),
+ parents: ast.NewValueMap(),
+ rank: rank,
+ }
+}
+
+func (uf *unionFind) MakeSet(v ast.Value) *unionFindRoot {
+
+ root, ok := uf.Find(v)
+ if ok {
+ return root
+ }
+
+ root = newUnionFindRoot(v)
+ uf.parents.Put(v, v)
+ uf.roots.Put(v, root)
+ return root
+}
+
+func (uf *unionFind) Find(v ast.Value) (*unionFindRoot, bool) {
+
+ parent := uf.parents.Get(v)
+ if parent == nil {
+ return nil, false
+ }
+
+ if parent.Compare(v) == 0 {
+ r, ok := uf.roots.Get(v)
+ return r.(*unionFindRoot), ok
+ }
+
+ return uf.Find(parent)
+}
+
+func (uf *unionFind) Merge(a, b ast.Value) (*unionFindRoot, bool) {
+
+ r1 := uf.MakeSet(a)
+ r2 := uf.MakeSet(b)
+
+ if r1 != r2 {
+
+ r1, r2 = uf.rank(r1, r2)
+
+ uf.parents.Put(r2.key, r1.key)
+ uf.roots.Delete(r2.key)
+
+ // Sets can have at most one constant value associated with them. When
+ // unioning, we must preserve this invariant. If a set has two constants,
+ // there will be no way to prove the query.
+ if r1.constant != nil && r2.constant != nil && !r1.constant.Equal(r2.constant) {
+ return nil, false
+ } else if r1.constant == nil {
+ r1.constant = r2.constant
+ }
+ }
+
+ return r1, true
+}
+
+func (uf *unionFind) String() string {
+ o := struct {
+ Roots map[string]interface{}
+ Parents map[string]ast.Value
+ }{
+ map[string]interface{}{},
+ map[string]ast.Value{},
+ }
+
+ uf.roots.Iter(func(k util.T, v util.T) bool {
+ o.Roots[k.(ast.Value).String()] = struct {
+ Constant *ast.Term
+ Key ast.Value
+ }{
+ v.(*unionFindRoot).constant,
+ v.(*unionFindRoot).key,
+ }
+ return true
+ })
+
+ uf.parents.Iter(func(k ast.Value, v ast.Value) bool {
+ o.Parents[k.String()] = v
+ return true
+ })
+
+ return string(util.MustMarshalJSON(o))
+}
+
+type unionFindRoot struct {
+ key ast.Value
+ constant *ast.Term
+}
+
+func newUnionFindRoot(key ast.Value) *unionFindRoot {
+ return &unionFindRoot{
+ key: key,
+ }
+}
+
+func (r *unionFindRoot) Value() ast.Value {
+ if r.constant != nil {
+ return r.constant.Value
+ }
+ return r.key
+}
+
+func (r *unionFindRoot) String() string {
+ return fmt.Sprintf("{key: %s, constant: %s", r.key, r.constant)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/crypto.go b/vendor/github.com/open-policy-agent/opa/topdown/crypto.go
new file mode 100644
index 00000000..2551ef05
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/crypto.go
@@ -0,0 +1,223 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/util"
+)
+
+func builtinCryptoX509ParseCertificates(a ast.Value) (ast.Value, error) {
+
+ input, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ // data to be passed to x509.ParseCertificates
+ bytes := []byte(input)
+
+ // if the input is not a PEM string, attempt to decode b64
+ if str := string(input); !strings.HasPrefix(str, "-----BEGIN CERTIFICATE-----") {
+ bytes, err = base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // attempt to decode input as PEM data
+ p, rest := pem.Decode(bytes)
+ if p != nil && p.Type != "CERTIFICATE" {
+ return nil, fmt.Errorf("PEM data contains '%s', expected CERTIFICATE", p.Type)
+ }
+ if p != nil {
+ // if PEM decoded as a valid certificate, use its data as the DER input
+ bytes = p.Bytes
+ }
+
+ // check for more certificates in the chain
+ if p != nil && len(rest) > 0 {
+ var p *pem.Block
+ for {
+ p, rest = pem.Decode(rest)
+ if p == nil {
+ // finish when no more PEM data is read
+ break
+ }
+ // reject any data that isn't exclusively certificates
+ if p != nil && p.Type != "CERTIFICATE" {
+ return nil, fmt.Errorf("PEM data contains '%s', expected CERTIFICATE", p.Type)
+ }
+ bytes = append(bytes, p.Bytes...)
+ }
+ }
+
+ certs, err := x509.ParseCertificates(bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := json.Marshal(certs)
+ if err != nil {
+ return nil, err
+ }
+
+ var x interface{}
+
+ if err := util.UnmarshalJSON(bs, &x); err != nil {
+ return nil, err
+ }
+
+ return ast.InterfaceToValue(x)
+}
+
+func builtinCryptoX509ParseCertificateRequest(a ast.Value) (ast.Value, error) {
+
+ input, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ // data to be passed to x509.ParseCertificateRequest
+ bytes := []byte(input)
+
+ // if the input is not a PEM string, attempt to decode b64
+ if str := string(input); !strings.HasPrefix(str, "-----BEGIN CERTIFICATE REQUEST-----") {
+ bytes, err = base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ p, _ := pem.Decode(bytes)
+ if p != nil && p.Type != "CERTIFICATE REQUEST" {
+ return nil, fmt.Errorf("invalid PEM-encoded certificate signing request")
+ }
+ if p != nil {
+ bytes = p.Bytes
+ }
+
+ csr, err := x509.ParseCertificateRequest(bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := json.Marshal(csr)
+ if err != nil {
+ return nil, err
+ }
+
+ var x interface{}
+ if err := util.UnmarshalJSON(bs, &x); err != nil {
+ return nil, err
+ }
+ return ast.InterfaceToValue(x)
+}
+
+func hashHelper(a ast.Value, h func(ast.String) string) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ return ast.String(h(s)), nil
+}
+
+func builtinCryptoMd5(a ast.Value) (ast.Value, error) {
+ return hashHelper(a, func(s ast.String) string { return fmt.Sprintf("%x", md5.Sum([]byte(s))) })
+}
+
+func builtinCryptoSha1(a ast.Value) (ast.Value, error) {
+ return hashHelper(a, func(s ast.String) string { return fmt.Sprintf("%x", sha1.Sum([]byte(s))) })
+}
+
+func builtinCryptoSha256(a ast.Value) (ast.Value, error) {
+ return hashHelper(a, func(s ast.String) string { return fmt.Sprintf("%x", sha256.Sum256([]byte(s))) })
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.CryptoX509ParseCertificates.Name, builtinCryptoX509ParseCertificates)
+ RegisterFunctionalBuiltin1(ast.CryptoMd5.Name, builtinCryptoMd5)
+ RegisterFunctionalBuiltin1(ast.CryptoSha1.Name, builtinCryptoSha1)
+ RegisterFunctionalBuiltin1(ast.CryptoSha256.Name, builtinCryptoSha256)
+ RegisterFunctionalBuiltin1(ast.CryptoX509ParseCertificateRequest.Name, builtinCryptoX509ParseCertificateRequest)
+}
+
+// addCACertsFromFile adds CA certificates from filePath into the given pool.
+// If pool is nil, it creates a new x509.CertPool. pool is returned.
+func addCACertsFromFile(pool *x509.CertPool, filePath string) (*x509.CertPool, error) {
+ if pool == nil {
+ pool = x509.NewCertPool()
+ }
+
+ caCert, err := readCertFromFile(filePath)
+ if err != nil {
+ return nil, err
+ }
+
+ if ok := pool.AppendCertsFromPEM(caCert); !ok {
+ return nil, fmt.Errorf("could not append CA certificates from %q", filePath)
+ }
+
+ return pool, nil
+}
+
+// addCACertsFromBytes adds CA certificates from pemBytes into the given pool.
+// If pool is nil, it creates a new x509.CertPool. pool is returned.
+func addCACertsFromBytes(pool *x509.CertPool, pemBytes []byte) (*x509.CertPool, error) {
+ if pool == nil {
+ pool = x509.NewCertPool()
+ }
+
+ if ok := pool.AppendCertsFromPEM(pemBytes); !ok {
+ return nil, fmt.Errorf("could not append certificates")
+ }
+
+ return pool, nil
+}
+
+// addCACertsFromBytes adds CA certificates from the environment variable named
+// by envName into the given pool. If pool is nil, it creates a new x509.CertPool.
+// pool is returned.
+func addCACertsFromEnv(pool *x509.CertPool, envName string) (*x509.CertPool, error) {
+ pool, err := addCACertsFromBytes(pool, []byte(os.Getenv(envName)))
+ if err != nil {
+ return nil, fmt.Errorf("could not add CA certificates from envvar %q: %w", envName, err)
+ }
+
+ return pool, err
+}
+
+// ReadCertFromFile reads a cert from file
+func readCertFromFile(localCertFile string) ([]byte, error) {
+ // Read in the cert file
+ certPEM, err := ioutil.ReadFile(localCertFile)
+ if err != nil {
+ return nil, err
+ }
+ return certPEM, nil
+}
+
+// ReadKeyFromFile reads a key from file
+func readKeyFromFile(localKeyFile string) ([]byte, error) {
+ // Read in the cert file
+ key, err := ioutil.ReadFile(localKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/doc.go b/vendor/github.com/open-policy-agent/opa/topdown/doc.go
new file mode 100644
index 00000000..9aa7aa45
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package topdown provides low-level query evaluation support.
+//
+// The topdown implementation is a modified version of the standard top-down
+// evaluation algorithm used in Datalog. References and comprehensions are
+// evaluated eagerly while all other terms are evaluated lazily.
+package topdown
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/encoding.go b/vendor/github.com/open-policy-agent/opa/topdown/encoding.go
new file mode 100644
index 00000000..fa1cfc88
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/encoding.go
@@ -0,0 +1,309 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ ghodss "github.com/ghodss/yaml"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/util"
+)
+
+func builtinJSONMarshal(a ast.Value) (ast.Value, error) {
+
+ asJSON, err := ast.JSON(a)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := json.Marshal(asJSON)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(string(bs)), nil
+}
+
+func builtinJSONUnmarshal(a ast.Value) (ast.Value, error) {
+
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ var x interface{}
+
+ if err := util.UnmarshalJSON([]byte(str), &x); err != nil {
+ return nil, err
+ }
+
+ return ast.InterfaceToValue(x)
+}
+
+func builtinJSONIsValid(a ast.Value) (ast.Value, error) {
+
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ var x interface{}
+ err = util.UnmarshalJSON([]byte(str), &x)
+ return ast.Boolean(err == nil), nil
+}
+
+func builtinBase64Encode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(base64.StdEncoding.EncodeToString([]byte(str))), nil
+}
+
+func builtinBase64Decode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ result, err := base64.StdEncoding.DecodeString(string(str))
+ return ast.String(result), err
+}
+
+func builtinBase64IsValid(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = base64.StdEncoding.DecodeString(string(str))
+ return ast.Boolean(err == nil), nil
+}
+
+func builtinBase64UrlEncode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(base64.URLEncoding.EncodeToString([]byte(str))), nil
+}
+
+func builtinBase64UrlEncodeNoPad(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ return ast.String(base64.RawURLEncoding.EncodeToString([]byte(str))), nil
+}
+
+func builtinBase64UrlDecode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ s := string(str)
+
+ // Some base64url encoders omit the padding at the end, so this case
+ // corrects such representations using the method given in RFC 7515
+ // Appendix C: https://tools.ietf.org/html/rfc7515#appendix-C
+ if !strings.HasSuffix(s, "=") {
+ switch len(s) % 4 {
+ case 0:
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ default:
+ return nil, fmt.Errorf("illegal base64url string: %s", s)
+ }
+ }
+ result, err := base64.URLEncoding.DecodeString(s)
+ return ast.String(result), err
+}
+
+func builtinURLQueryEncode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ return ast.String(url.QueryEscape(string(str))), nil
+}
+
+func builtinURLQueryDecode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ s, err := url.QueryUnescape(string(str))
+ if err != nil {
+ return nil, err
+ }
+ return ast.String(s), nil
+}
+
+var encodeObjectErr = builtins.NewOperandErr(1, "values must be string, array[string], or set[string]")
+
+func builtinURLQueryEncodeObject(a ast.Value) (ast.Value, error) {
+ asJSON, err := ast.JSON(a)
+ if err != nil {
+ return nil, err
+ }
+
+ inputs, ok := asJSON.(map[string]interface{})
+ if !ok {
+ return nil, builtins.NewOperandTypeErr(1, a, "object")
+ }
+
+ query := url.Values{}
+
+ for k, v := range inputs {
+ switch vv := v.(type) {
+ case string:
+ query.Set(k, vv)
+ case []interface{}:
+ for _, val := range vv {
+ strVal, ok := val.(string)
+ if !ok {
+ return nil, encodeObjectErr
+ }
+ query.Add(k, strVal)
+ }
+ default:
+ return nil, encodeObjectErr
+ }
+ }
+
+ return ast.String(query.Encode()), nil
+}
+
+func builtinURLQueryDecodeObject(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ query, err := builtins.StringOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ queryParams, err := url.ParseQuery(string(query))
+ if err != nil {
+ return err
+ }
+
+ queryObject := ast.NewObject()
+ for k, v := range queryParams {
+ paramsArray := make([]*ast.Term, len(v))
+ for i, param := range v {
+ paramsArray[i] = ast.StringTerm(param)
+ }
+ queryObject.Insert(ast.StringTerm(k), ast.ArrayTerm(paramsArray...))
+ }
+
+ return iter(ast.NewTerm(queryObject))
+}
+
+func builtinYAMLMarshal(a ast.Value) (ast.Value, error) {
+
+ asJSON, err := ast.JSON(a)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ encoder := json.NewEncoder(&buf)
+ if err := encoder.Encode(asJSON); err != nil {
+ return nil, err
+ }
+
+ bs, err := ghodss.JSONToYAML(buf.Bytes())
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(string(bs)), nil
+}
+
+func builtinYAMLUnmarshal(a ast.Value) (ast.Value, error) {
+
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := ghodss.YAMLToJSON([]byte(str))
+ if err != nil {
+ return nil, err
+ }
+
+ buf := bytes.NewBuffer(bs)
+ decoder := util.NewJSONDecoder(buf)
+ var val interface{}
+ err = decoder.Decode(&val)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.InterfaceToValue(val)
+}
+
+func builtinYAMLIsValid(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ var x interface{}
+ err = ghodss.Unmarshal([]byte(str), &x)
+ return ast.Boolean(err == nil), nil
+}
+
+func builtinHexEncode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ return ast.String(hex.EncodeToString([]byte(str))), nil
+}
+
+func builtinHexDecode(a ast.Value) (ast.Value, error) {
+ str, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ val, err := hex.DecodeString(string(str))
+ if err != nil {
+ return nil, err
+ }
+ return ast.String(val), nil
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.JSONMarshal.Name, builtinJSONMarshal)
+ RegisterFunctionalBuiltin1(ast.JSONUnmarshal.Name, builtinJSONUnmarshal)
+ RegisterFunctionalBuiltin1(ast.JSONIsValid.Name, builtinJSONIsValid)
+ RegisterFunctionalBuiltin1(ast.Base64Encode.Name, builtinBase64Encode)
+ RegisterFunctionalBuiltin1(ast.Base64Decode.Name, builtinBase64Decode)
+ RegisterFunctionalBuiltin1(ast.Base64IsValid.Name, builtinBase64IsValid)
+ RegisterFunctionalBuiltin1(ast.Base64UrlEncode.Name, builtinBase64UrlEncode)
+ RegisterFunctionalBuiltin1(ast.Base64UrlEncodeNoPad.Name, builtinBase64UrlEncodeNoPad)
+ RegisterFunctionalBuiltin1(ast.Base64UrlDecode.Name, builtinBase64UrlDecode)
+ RegisterFunctionalBuiltin1(ast.URLQueryDecode.Name, builtinURLQueryDecode)
+ RegisterFunctionalBuiltin1(ast.URLQueryEncode.Name, builtinURLQueryEncode)
+ RegisterFunctionalBuiltin1(ast.URLQueryEncodeObject.Name, builtinURLQueryEncodeObject)
+ RegisterBuiltinFunc(ast.URLQueryDecodeObject.Name, builtinURLQueryDecodeObject)
+ RegisterFunctionalBuiltin1(ast.YAMLMarshal.Name, builtinYAMLMarshal)
+ RegisterFunctionalBuiltin1(ast.YAMLUnmarshal.Name, builtinYAMLUnmarshal)
+ RegisterFunctionalBuiltin1(ast.YAMLIsValid.Name, builtinYAMLIsValid)
+ RegisterFunctionalBuiltin1(ast.HexEncode.Name, builtinHexEncode)
+ RegisterFunctionalBuiltin1(ast.HexDecode.Name, builtinHexDecode)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/errors.go b/vendor/github.com/open-policy-agent/opa/topdown/errors.go
new file mode 100644
index 00000000..53f4e5ba
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/errors.go
@@ -0,0 +1,137 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// Halt is a special error type that built-in function implementations return to indicate
+// that policy evaluation should stop immediately.
+type Halt struct {
+ Err error
+}
+
+func (h Halt) Error() string {
+ return h.Err.Error()
+}
+
+// Error is the error type returned by the Eval and Query functions when
+// an evaluation error occurs.
+type Error struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Location *ast.Location `json:"location,omitempty"`
+}
+
+const (
+
+ // InternalErr represents an unknown evaluation error.
+ InternalErr string = "eval_internal_error"
+
+ // CancelErr indicates the evaluation process was cancelled.
+ CancelErr string = "eval_cancel_error"
+
+ // ConflictErr indicates a conflict was encountered during evaluation. For
+ // instance, a conflict occurs if a rule produces multiple, differing values
+ // for the same key in an object. Conflict errors indicate the policy does
+ // not account for the data loaded into the policy engine.
+ ConflictErr string = "eval_conflict_error"
+
+ // TypeErr indicates evaluation stopped because an expression was applied to
+ // a value of an inappropriate type.
+ TypeErr string = "eval_type_error"
+
+ // BuiltinErr indicates a built-in function received a semantically invalid
+ // input or encountered some kind of runtime error, e.g., connection
+ // timeout, connection refused, etc.
+ BuiltinErr string = "eval_builtin_error"
+
+ // WithMergeErr indicates that the real and replacement data could not be merged.
+ WithMergeErr string = "eval_with_merge_error"
+)
+
+// IsError returns true if the err is an Error.
+func IsError(err error) bool {
+ _, ok := err.(*Error)
+ return ok
+}
+
+// IsCancel returns true if err was caused by cancellation.
+func IsCancel(err error) bool {
+ if e, ok := err.(*Error); ok {
+ return e.Code == CancelErr
+ }
+ return false
+}
+
+func (e *Error) Error() string {
+
+ msg := fmt.Sprintf("%v: %v", e.Code, e.Message)
+
+ if e.Location != nil {
+ msg = e.Location.String() + ": " + msg
+ }
+
+ return msg
+}
+
+func functionConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: ConflictErr,
+ Location: loc,
+ Message: "functions must not produce multiple outputs for same inputs",
+ }
+}
+
+func completeDocConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: ConflictErr,
+ Location: loc,
+ Message: "complete rules must not produce multiple outputs",
+ }
+}
+
+func objectDocKeyConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: ConflictErr,
+ Location: loc,
+ Message: "object keys must be unique",
+ }
+}
+
+func documentConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: ConflictErr,
+ Location: loc,
+ Message: "base and virtual document keys must be disjoint",
+ }
+}
+
+func unsupportedBuiltinErr(loc *ast.Location) error {
+ return &Error{
+ Code: InternalErr,
+ Location: loc,
+ Message: "unsupported built-in",
+ }
+}
+
+func mergeConflictErr(loc *ast.Location) error {
+ return &Error{
+ Code: WithMergeErr,
+ Location: loc,
+ Message: "real and replacement data could not be merged",
+ }
+}
+
+func internalErr(loc *ast.Location, msg string) error {
+ return &Error{
+ Code: InternalErr,
+ Location: loc,
+ Message: msg,
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/eval.go b/vendor/github.com/open-policy-agent/opa/topdown/eval.go
new file mode 100644
index 00000000..4c508a90
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/eval.go
@@ -0,0 +1,2869 @@
+package topdown
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/topdown/cache"
+ "github.com/open-policy-agent/opa/topdown/copypropagation"
+)
+
+type evalIterator func(*eval) error
+
+type unifyIterator func() error
+
+type queryIDFactory struct {
+ curr uint64
+}
+
+// Note: The first call to Next() returns 0.
+func (f *queryIDFactory) Next() uint64 {
+ curr := f.curr
+ f.curr++
+ return curr
+}
+
+type builtinErrors struct {
+ errs []error
+}
+
+type eval struct {
+ ctx context.Context
+ metrics metrics.Metrics
+ seed io.Reader
+ time *ast.Term
+ queryID uint64
+ queryIDFact *queryIDFactory
+ parent *eval
+ caller *eval
+ cancel Cancel
+ query ast.Body
+ queryCompiler ast.QueryCompiler
+ index int
+ indexing bool
+ bindings *bindings
+ store storage.Store
+ baseCache *baseCache
+ txn storage.Transaction
+ compiler *ast.Compiler
+ input *ast.Term
+ data *ast.Term
+ external *resolverTrie
+ targetStack *refStack
+ tracers []QueryTracer
+ traceEnabled bool
+ plugTraceVars bool
+ instr *Instrumentation
+ builtins map[string]*Builtin
+ builtinCache builtins.Cache
+ virtualCache *virtualCache
+ comprehensionCache *comprehensionCache
+ interQueryBuiltinCache cache.InterQueryCache
+ saveSet *saveSet
+ saveStack *saveStack
+ saveSupport *saveSupport
+ saveNamespace *ast.Term
+ skipSaveNamespace bool
+ inliningControl *inliningControl
+ genvarprefix string
+ genvarid int
+ runtime *ast.Term
+ builtinErrors *builtinErrors
+}
+
+func (e *eval) Run(iter evalIterator) error {
+ e.traceEnter(e.query)
+ return e.eval(func(e *eval) error {
+ e.traceExit(e.query)
+ err := iter(e)
+ e.traceRedo(e.query)
+ return err
+ })
+}
+
+func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) {
+ decl, ok := ast.BuiltinMap[name]
+ if !ok {
+ bi, ok := e.builtins[name]
+ if ok {
+ return bi.Decl, bi.Func, true
+ }
+ } else {
+ f, ok := builtinFunctions[name]
+ if ok {
+ return decl, f, true
+ }
+ }
+ return nil, nil, false
+}
+
+func (e *eval) closure(query ast.Body) *eval {
+ cpy := *e
+ cpy.index = 0
+ cpy.query = query
+ cpy.queryID = cpy.queryIDFact.Next()
+ cpy.parent = e
+ return &cpy
+}
+
+func (e *eval) child(query ast.Body) *eval {
+ cpy := *e
+ cpy.index = 0
+ cpy.query = query
+ cpy.queryID = cpy.queryIDFact.Next()
+ cpy.bindings = newBindings(cpy.queryID, e.instr)
+ cpy.parent = e
+ return &cpy
+}
+
+func (e *eval) next(iter evalIterator) error {
+ e.index++
+ err := e.evalExpr(iter)
+ e.index--
+ return err
+}
+
+func (e *eval) partial() bool {
+ return e.saveSet != nil
+}
+
+func (e *eval) unknown(x interface{}, b *bindings) bool {
+ if !e.partial() {
+ return false
+ }
+
+ // If the caller provided an ast.Value directly (e.g., an ast.Ref) wrap
+ // it as an ast.Term because the saveSet Contains() function expects
+ // ast.Term.
+ if v, ok := x.(ast.Value); ok {
+ x = ast.NewTerm(v)
+ }
+
+ return saveRequired(e.compiler, e.inliningControl, true, e.saveSet, b, x, false)
+}
+
+func (e *eval) traceEnter(x ast.Node) {
+ e.traceEvent(EnterOp, x, "", nil)
+}
+
+func (e *eval) traceExit(x ast.Node) {
+ e.traceEvent(ExitOp, x, "", nil)
+}
+
+func (e *eval) traceEval(x ast.Node) {
+ e.traceEvent(EvalOp, x, "", nil)
+}
+
+func (e *eval) traceDuplicate(x ast.Node) {
+ e.traceEvent(DuplicateOp, x, "", nil)
+}
+
+func (e *eval) traceFail(x ast.Node) {
+ e.traceEvent(FailOp, x, "", nil)
+}
+
+func (e *eval) traceRedo(x ast.Node) {
+ e.traceEvent(RedoOp, x, "", nil)
+}
+
+func (e *eval) traceSave(x ast.Node) {
+ e.traceEvent(SaveOp, x, "", nil)
+}
+
+func (e *eval) traceIndex(x ast.Node, msg string, target *ast.Ref) {
+ e.traceEvent(IndexOp, x, msg, target)
+}
+
+func (e *eval) traceWasm(x ast.Node, target *ast.Ref) {
+ e.traceEvent(WasmOp, x, "", target)
+}
+
+func (e *eval) traceEvent(op Op, x ast.Node, msg string, target *ast.Ref) {
+
+ if !e.traceEnabled {
+ return
+ }
+
+ var parentID uint64
+ if e.parent != nil {
+ parentID = e.parent.queryID
+ }
+
+ evt := Event{
+ QueryID: e.queryID,
+ ParentID: parentID,
+ Op: op,
+ Node: x,
+ Location: x.Loc(),
+ Message: msg,
+ Ref: target,
+ }
+
+ // Skip plugging the local variables, unless any of the tracers
+ // had required it via their configuration. If any required the
+ // variable bindings then we will plug and give values for all
+ // tracers.
+ if e.plugTraceVars {
+
+ evt.Locals = ast.NewValueMap()
+ evt.LocalMetadata = map[ast.Var]VarMetadata{}
+
+ e.bindings.Iter(nil, func(k, v *ast.Term) error {
+ original := k.Value.(ast.Var)
+ rewritten, _ := e.rewrittenVar(original)
+ evt.LocalMetadata[original] = VarMetadata{
+ Name: rewritten,
+ Location: k.Loc(),
+ }
+
+ // For backwards compatibility save a copy of the values too..
+ evt.Locals.Put(k.Value, v.Value)
+ return nil
+ })
+
+ ast.WalkTerms(x, func(term *ast.Term) bool {
+ if v, ok := term.Value.(ast.Var); ok {
+ if _, ok := evt.LocalMetadata[v]; !ok {
+ if rewritten, ok := e.rewrittenVar(v); ok {
+ evt.LocalMetadata[v] = VarMetadata{
+ Name: rewritten,
+ Location: term.Loc(),
+ }
+ }
+ }
+ }
+ return false
+ })
+ }
+
+ for i := range e.tracers {
+ e.tracers[i].TraceEvent(evt)
+ }
+}
+
+func (e *eval) eval(iter evalIterator) error {
+ return e.evalExpr(iter)
+}
+
+func (e *eval) evalExpr(iter evalIterator) error {
+
+ if e.cancel != nil && e.cancel.Cancelled() {
+ return &Error{
+ Code: CancelErr,
+ Message: "caller cancelled query execution",
+ }
+ }
+
+ if e.index >= len(e.query) {
+ return iter(e)
+ }
+
+ expr := e.query[e.index]
+
+ e.traceEval(expr)
+
+ if len(expr.With) > 0 {
+ return e.evalWith(iter)
+ }
+
+ return e.evalStep(func(e *eval) error {
+ return e.next(iter)
+ })
+}
+
+func (e *eval) evalStep(iter evalIterator) error {
+
+ expr := e.query[e.index]
+
+ if expr.Negated {
+ return e.evalNot(iter)
+ }
+
+ var defined bool
+ var err error
+
+ switch terms := expr.Terms.(type) {
+ case []*ast.Term:
+ if expr.IsEquality() {
+ err = e.unify(terms[1], terms[2], func() error {
+ defined = true
+ err := iter(e)
+ e.traceRedo(expr)
+ return err
+ })
+ } else {
+ err = e.evalCall(terms, func() error {
+ defined = true
+ err := iter(e)
+ e.traceRedo(expr)
+ return err
+ })
+ }
+ case *ast.Term:
+ rterm := e.generateVar(fmt.Sprintf("term_%d_%d", e.queryID, e.index))
+ err = e.unify(terms, rterm, func() error {
+ if e.saveSet.Contains(rterm, e.bindings) {
+ return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error {
+ return iter(e)
+ })
+ }
+ if !e.bindings.Plug(rterm).Equal(ast.BooleanTerm(false)) {
+ defined = true
+ err := iter(e)
+ e.traceRedo(expr)
+ return err
+ }
+ return nil
+ })
+ }
+
+ if err != nil {
+ return err
+ }
+
+ if !defined {
+ e.traceFail(expr)
+ }
+
+ return nil
+}
+
+func (e *eval) evalNot(iter evalIterator) error {
+
+ expr := e.query[e.index]
+
+ if e.unknown(expr, e.bindings) {
+ return e.evalNotPartial(iter)
+ }
+
+ negation := ast.NewBody(expr.Complement().NoWith())
+ child := e.closure(negation)
+
+ var defined bool
+ child.traceEnter(negation)
+
+ err := child.eval(func(*eval) error {
+ child.traceExit(negation)
+ defined = true
+ child.traceRedo(negation)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ if !defined {
+ return iter(e)
+ }
+
+ e.traceFail(expr)
+ return nil
+}
+
+func (e *eval) evalWith(iter evalIterator) error {
+
+ expr := e.query[e.index]
+ var disable []ast.Ref
+
+ if e.partial() {
+
+ // If the value is unknown the with statement cannot be evaluated and so
+ // the entire expression should be saved to be safe. In the future this
+ // could be relaxed in certain cases (e.g., if the with statement would
+ // have no affect.)
+ for _, with := range expr.With {
+ if e.saveSet.ContainsRecursive(with.Value, e.bindings) {
+ return e.saveExprMarkUnknowns(expr, e.bindings, func() error {
+ return e.next(iter)
+ })
+ }
+ }
+
+ // Disable inlining on all references in the expression so the result of
+ // partial evaluation has the same semamntics w/ the with statements
+ // preserved.
+ ast.WalkRefs(expr, func(x ast.Ref) bool {
+ disable = append(disable, x.GroundPrefix())
+ return false
+ })
+ }
+
+ pairsInput := [][2]*ast.Term{}
+ pairsData := [][2]*ast.Term{}
+ targets := []ast.Ref{}
+
+ for i := range expr.With {
+ plugged := e.bindings.Plug(expr.With[i].Value)
+ if isInputRef(expr.With[i].Target) {
+ pairsInput = append(pairsInput, [...]*ast.Term{expr.With[i].Target, plugged})
+ } else if isDataRef(expr.With[i].Target) {
+ pairsData = append(pairsData, [...]*ast.Term{expr.With[i].Target, plugged})
+ }
+ targets = append(targets, expr.With[i].Target.Value.(ast.Ref))
+ }
+
+ input, err := mergeTermWithValues(e.input, pairsInput)
+ if err != nil {
+ return &Error{
+ Code: ConflictErr,
+ Location: expr.Location,
+ Message: err.Error(),
+ }
+ }
+
+ data, err := mergeTermWithValues(e.data, pairsData)
+ if err != nil {
+ return &Error{
+ Code: ConflictErr,
+ Location: expr.Location,
+ Message: err.Error(),
+ }
+ }
+
+ oldInput, oldData := e.evalWithPush(input, data, targets, disable)
+
+ err = e.evalStep(func(e *eval) error {
+ e.evalWithPop(oldInput, oldData)
+ err := e.next(iter)
+ oldInput, oldData = e.evalWithPush(input, data, targets, disable)
+ return err
+ })
+
+ e.evalWithPop(oldInput, oldData)
+
+ return err
+}
+
+func (e *eval) evalWithPush(input *ast.Term, data *ast.Term, targets []ast.Ref, disable []ast.Ref) (*ast.Term, *ast.Term) {
+
+ var oldInput *ast.Term
+
+ if input != nil {
+ oldInput = e.input
+ e.input = input
+ }
+
+ var oldData *ast.Term
+
+ if data != nil {
+ oldData = e.data
+ e.data = data
+ }
+
+ e.comprehensionCache.Push()
+ e.virtualCache.Push()
+ e.targetStack.Push(targets)
+ e.inliningControl.PushDisable(disable, true)
+
+ return oldInput, oldData
+}
+
+func (e *eval) evalWithPop(input *ast.Term, data *ast.Term) {
+ e.inliningControl.PopDisable()
+ e.targetStack.Pop()
+ e.virtualCache.Pop()
+ e.comprehensionCache.Pop()
+ e.data = data
+ e.input = input
+}
+
+func (e *eval) evalNotPartial(iter evalIterator) error {
+
+ // Prepare query normally.
+ expr := e.query[e.index]
+ negation := expr.Complement().NoWith()
+ child := e.closure(ast.NewBody(negation))
+
+ // Unknowns is the set of variables that are marked as unknown. The variables
+ // are namespaced with the query ID that they originate in. This ensures that
+ // variables across two or more queries are identified uniquely.
+ //
+ // NOTE(tsandall): this is greedy in the sense that we only need variable
+ // dependencies of the negation.
+ unknowns := e.saveSet.Vars(e.caller.bindings)
+
+ // Run partial evaluation. Since the result may require support, push a new
+ // query onto the save stack to avoid mutating the current save query. If
+ // shallow inlining is not enabled, run copy propagation to further simplify
+ // the result.
+ var cp *copypropagation.CopyPropagator
+
+ if !e.inliningControl.shallow {
+ cp = copypropagation.New(unknowns).WithEnsureNonEmptyBody(true).WithCompiler(e.compiler)
+ }
+
+ var savedQueries []ast.Body
+ e.saveStack.PushQuery(nil)
+
+ child.eval(func(*eval) error {
+ query := e.saveStack.Peek()
+ plugged := query.Plug(e.caller.bindings)
+ // Skip this rule body if it fails to type-check.
+ // Type-checking failure means the rule body will never succeed.
+ if !e.compiler.PassesTypeCheck(plugged) {
+ return nil
+ }
+ if cp != nil {
+ plugged = applyCopyPropagation(cp, e.instr, plugged)
+ }
+ savedQueries = append(savedQueries, plugged)
+ return nil
+ })
+
+ e.saveStack.PopQuery()
+
+ // If partial evaluation produced no results, the expression is always undefined
+ // so it does not have to be saved.
+ if len(savedQueries) == 0 {
+ return iter(e)
+ }
+
+ // Check if the partial evaluation result can be inlined in this query. If not,
+ // generate support rules for the result. Depending on the size of the partial
+ // evaluation result and the contents, it may or may not be inlinable. We treat
+ // the unknowns as safe because vars in the save set will either be known to
+ // the caller or made safe by an expression on the save stack.
+ if !canInlineNegation(unknowns, savedQueries) {
+ return e.evalNotPartialSupport(child.queryID, expr, unknowns, savedQueries, iter)
+ }
+
+ // If we can inline the result, we have to generate the cross product of the
+ // queries. For example:
+ //
+ // (A && B) || (C && D)
+ //
+ // Becomes:
+ //
+ // (!A && !C) || (!A && !D) || (!B && !C) || (!B && !D)
+ return complementedCartesianProduct(savedQueries, 0, nil, func(q ast.Body) error {
+ return e.saveInlinedNegatedExprs(q, func() error {
+ return iter(e)
+ })
+ })
+}
+
+func (e *eval) evalNotPartialSupport(negationID uint64, expr *ast.Expr, unknowns ast.VarSet, queries []ast.Body, iter evalIterator) error {
+
+ // Prepare support rule head.
+ supportName := fmt.Sprintf("__not%d_%d_%d__", e.queryID, e.index, negationID)
+ term := ast.RefTerm(ast.DefaultRootDocument, e.saveNamespace, ast.StringTerm(supportName))
+ path := term.Value.(ast.Ref)
+ head := ast.NewHead(ast.Var(supportName), nil, ast.BooleanTerm(true))
+
+ bodyVars := ast.NewVarSet()
+
+ for _, q := range queries {
+ bodyVars.Update(q.Vars(ast.VarVisitorParams{}))
+ }
+
+ unknowns = unknowns.Intersect(bodyVars)
+
+ // Make rule args. Sort them to ensure order is deterministic.
+ args := make([]*ast.Term, 0, len(unknowns))
+
+ for v := range unknowns {
+ args = append(args, ast.NewTerm(v))
+ }
+
+ sort.Slice(args, func(i, j int) bool {
+ return args[i].Value.Compare(args[j].Value) < 0
+ })
+
+ if len(args) > 0 {
+ head.Args = ast.Args(args)
+ }
+
+ // Save support rules.
+ for _, query := range queries {
+ e.saveSupport.Insert(path, &ast.Rule{
+ Head: head,
+ Body: query,
+ })
+ }
+
+ // Save expression that refers to support rule set.
+
+ terms := expr.Terms
+ expr.Terms = nil // Prevent unnecessary copying the terms.
+ cpy := expr.Copy()
+ expr.Terms = terms
+
+ if len(args) > 0 {
+ terms := make([]*ast.Term, len(args)+1)
+ terms[0] = term
+ for i := 0; i < len(args); i++ {
+ terms[i+1] = args[i]
+ }
+ cpy.Terms = terms
+ } else {
+ cpy.Terms = term
+ }
+
+ return e.saveInlinedNegatedExprs([]*ast.Expr{cpy}, func() error {
+ return e.next(iter)
+ })
+}
+
+func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error {
+
+ ref := terms[0].Value.(ast.Ref)
+
+ if ref[0].Equal(ast.DefaultRootDocument) {
+ eval := evalFunc{
+ e: e,
+ ref: ref,
+ terms: terms,
+ }
+ return eval.eval(iter)
+ }
+
+ bi, f, ok := e.builtinFunc(ref.String())
+ if !ok {
+ return unsupportedBuiltinErr(e.query[e.index].Location)
+ }
+
+ if e.unknown(e.query[e.index], e.bindings) {
+ return e.saveCall(len(bi.Decl.Args()), terms, iter)
+ }
+
+ var parentID uint64
+ if e.parent != nil {
+ parentID = e.parent.queryID
+ }
+
+ bctx := BuiltinContext{
+ Context: e.ctx,
+ Metrics: e.metrics,
+ Seed: e.seed,
+ Time: e.time,
+ Cancel: e.cancel,
+ Runtime: e.runtime,
+ Cache: e.builtinCache,
+ InterQueryBuiltinCache: e.interQueryBuiltinCache,
+ Location: e.query[e.index].Location,
+ QueryTracers: e.tracers,
+ TraceEnabled: e.traceEnabled,
+ QueryID: e.queryID,
+ ParentID: parentID,
+ }
+
+ eval := evalBuiltin{
+ e: e,
+ bi: bi,
+ bctx: bctx,
+ f: f,
+ terms: terms[1:],
+ }
+ return eval.eval(iter)
+}
+
+func (e *eval) unify(a, b *ast.Term, iter unifyIterator) error {
+ return e.biunify(a, b, e.bindings, e.bindings, iter)
+}
+
+func (e *eval) biunify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ a, b1 = b1.apply(a)
+ b, b2 = b2.apply(b)
+ switch vA := a.Value.(type) {
+ case ast.Var, ast.Ref, *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ case ast.Null:
+ switch b.Value.(type) {
+ case ast.Var, ast.Null, ast.Ref:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ }
+ case ast.Boolean:
+ switch b.Value.(type) {
+ case ast.Var, ast.Boolean, ast.Ref:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ }
+ case ast.Number:
+ switch b.Value.(type) {
+ case ast.Var, ast.Number, ast.Ref:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ }
+ case ast.String:
+ switch b.Value.(type) {
+ case ast.Var, ast.String, ast.Ref:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ }
+ case *ast.Array:
+ switch vB := b.Value.(type) {
+ case ast.Var, ast.Ref, *ast.ArrayComprehension:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ case *ast.Array:
+ return e.biunifyArrays(vA, vB, b1, b2, iter)
+ }
+ case ast.Object:
+ switch vB := b.Value.(type) {
+ case ast.Var, ast.Ref, *ast.ObjectComprehension:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ case ast.Object:
+ return e.biunifyObjects(vA, vB, b1, b2, iter)
+ }
+ case ast.Set:
+ return e.biunifyValues(a, b, b1, b2, iter)
+ }
+ return nil
+}
+
+func (e *eval) biunifyArrays(a, b *ast.Array, b1, b2 *bindings, iter unifyIterator) error {
+ if a.Len() != b.Len() {
+ return nil
+ }
+ return e.biunifyArraysRec(a, b, b1, b2, iter, 0)
+}
+
+func (e *eval) biunifyArraysRec(a, b *ast.Array, b1, b2 *bindings, iter unifyIterator, idx int) error {
+ if idx == a.Len() {
+ return iter()
+ }
+ return e.biunify(a.Elem(idx), b.Elem(idx), b1, b2, func() error {
+ return e.biunifyArraysRec(a, b, b1, b2, iter, idx+1)
+ })
+}
+
+func (e *eval) biunifyObjects(a, b ast.Object, b1, b2 *bindings, iter unifyIterator) error {
+ if a.Len() != b.Len() {
+ return nil
+ }
+
+ // Objects must not contain unbound variables as keys at this point as we
+ // cannot unify them. Similar to sets, plug both sides before comparing the
+ // keys and unifying the values.
+ if nonGroundKeys(a) {
+ a = plugKeys(a, b1)
+ }
+
+ if nonGroundKeys(b) {
+ b = plugKeys(b, b2)
+ }
+
+ return e.biunifyObjectsRec(a, b, b1, b2, iter, a, 0)
+}
+
+func (e *eval) biunifyObjectsRec(a, b ast.Object, b1, b2 *bindings, iter unifyIterator, keys ast.Object, idx int) error {
+ if idx == keys.Len() {
+ return iter()
+ }
+ key, _ := keys.Elem(idx)
+ v2 := b.Get(key)
+ if v2 == nil {
+ return nil
+ }
+ return e.biunify(a.Get(key), v2, b1, b2, func() error {
+ return e.biunifyObjectsRec(a, b, b1, b2, iter, keys, idx+1)
+ })
+}
+
+func (e *eval) biunifyValues(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ // Try to evaluate refs and comprehensions. If partial evaluation is
+ // enabled, then skip evaluation (and save the expression) if the term is
+ // in the save set. Currently, comprehensions are not evaluated during
+ // partial eval. This could be improved in the future.
+
+ var saveA, saveB bool
+
+ if _, ok := a.Value.(ast.Set); ok {
+ saveA = e.saveSet.ContainsRecursive(a, b1)
+ } else {
+ saveA = e.saveSet.Contains(a, b1)
+ if !saveA {
+ if _, refA := a.Value.(ast.Ref); refA {
+ return e.biunifyRef(a, b, b1, b2, iter)
+ }
+ }
+ }
+
+ if _, ok := b.Value.(ast.Set); ok {
+ saveB = e.saveSet.ContainsRecursive(b, b2)
+ } else {
+ saveB = e.saveSet.Contains(b, b2)
+ if !saveB {
+ if _, refB := b.Value.(ast.Ref); refB {
+ return e.biunifyRef(b, a, b2, b1, iter)
+ }
+ }
+ }
+
+ if saveA || saveB {
+ return e.saveUnify(a, b, b1, b2, iter)
+ }
+
+ if ast.IsComprehension(a.Value) {
+ return e.biunifyComprehension(a, b, b1, b2, false, iter)
+ } else if ast.IsComprehension(b.Value) {
+ return e.biunifyComprehension(b, a, b2, b1, true, iter)
+ }
+
+ // Perform standard unification.
+ _, varA := a.Value.(ast.Var)
+ _, varB := b.Value.(ast.Var)
+
+ var undo undo
+
+ if varA && varB {
+ if b1 == b2 && a.Equal(b) {
+ return iter()
+ }
+ b1.bind(a, b, b2, &undo)
+ err := iter()
+ undo.Undo()
+ return err
+ } else if varA && !varB {
+ b1.bind(a, b, b2, &undo)
+ err := iter()
+ undo.Undo()
+ return err
+ } else if varB && !varA {
+ b2.bind(b, a, b1, &undo)
+ err := iter()
+ undo.Undo()
+ return err
+ }
+
+ // Sets must not contain unbound variables at this point as we cannot unify
+ // them. So simply plug both sides (to substitute any bound variables with
+ // values) and then check for equality.
+ switch a.Value.(type) {
+ case ast.Set:
+ a = b1.Plug(a)
+ b = b2.Plug(b)
+ }
+
+ if a.Equal(b) {
+ return iter()
+ }
+
+ return nil
+}
+
+func (e *eval) biunifyRef(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+
+ ref := a.Value.(ast.Ref)
+
+ if ref[0].Equal(ast.DefaultRootDocument) {
+ node := e.compiler.RuleTree.Child(ref[0].Value)
+ eval := evalTree{
+ e: e,
+ ref: ref,
+ pos: 1,
+ plugged: ref.Copy(),
+ bindings: b1,
+ rterm: b,
+ rbindings: b2,
+ node: node,
+ }
+ return eval.eval(iter)
+ }
+
+ var term *ast.Term
+ var termbindings *bindings
+
+ if ref[0].Equal(ast.InputRootDocument) {
+ term = e.input
+ termbindings = b1
+ } else {
+ term, termbindings = b1.apply(ref[0])
+ if term == ref[0] {
+ term = nil
+ }
+ }
+
+ if term == nil {
+ return nil
+ }
+
+ eval := evalTerm{
+ e: e,
+ ref: ref,
+ pos: 1,
+ bindings: b1,
+ term: term,
+ termbindings: termbindings,
+ rterm: b,
+ rbindings: b2,
+ }
+
+ return eval.eval(iter)
+}
+
+func (e *eval) biunifyComprehension(a, b *ast.Term, b1, b2 *bindings, swap bool, iter unifyIterator) error {
+
+ if e.unknown(a, b1) {
+ return e.biunifyComprehensionPartial(a, b, b1, b2, swap, iter)
+ }
+
+ value, err := e.buildComprehensionCache(a)
+
+ if err != nil {
+ return err
+ } else if value != nil {
+ return e.biunify(value, b, b1, b2, iter)
+ } else {
+ e.instr.counterIncr(evalOpComprehensionCacheMiss)
+ }
+
+ switch a := a.Value.(type) {
+ case *ast.ArrayComprehension:
+ return e.biunifyComprehensionArray(a, b, b1, b2, iter)
+ case *ast.SetComprehension:
+ return e.biunifyComprehensionSet(a, b, b1, b2, iter)
+ case *ast.ObjectComprehension:
+ return e.biunifyComprehensionObject(a, b, b1, b2, iter)
+ }
+
+ return internalErr(e.query[e.index].Location, "illegal comprehension type")
+}
+
+func (e *eval) buildComprehensionCache(a *ast.Term) (*ast.Term, error) {
+
+ index := e.comprehensionIndex(a)
+ if index == nil {
+ e.instr.counterIncr(evalOpComprehensionCacheSkip)
+ return nil, nil
+ }
+
+ cache, ok := e.comprehensionCache.Elem(a)
+ if !ok {
+ var err error
+ switch x := a.Value.(type) {
+ case *ast.ArrayComprehension:
+ cache, err = e.buildComprehensionCacheArray(x, index.Keys)
+ case *ast.SetComprehension:
+ cache, err = e.buildComprehensionCacheSet(x, index.Keys)
+ case *ast.ObjectComprehension:
+ cache, err = e.buildComprehensionCacheObject(x, index.Keys)
+ default:
+ err = internalErr(e.query[e.index].Location, "illegal comprehension type")
+ }
+ if err != nil {
+ return nil, err
+ }
+ e.comprehensionCache.Set(a, cache)
+ e.instr.counterIncr(evalOpComprehensionCacheBuild)
+ } else {
+ e.instr.counterIncr(evalOpComprehensionCacheHit)
+ }
+
+ values := make([]*ast.Term, len(index.Keys))
+
+ for i := range index.Keys {
+ values[i] = e.bindings.Plug(index.Keys[i])
+ }
+
+ return cache.Get(values), nil
+}
+
+func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) {
+ child := e.child(x.Body)
+ node := newComprehensionCacheElem()
+ return node, child.Run(func(child *eval) error {
+ values := make([]*ast.Term, len(keys))
+ for i := range keys {
+ values[i] = child.bindings.Plug(keys[i])
+ }
+ head := child.bindings.Plug(x.Term)
+ cached := node.Get(values)
+ if cached != nil {
+ cached.Value = cached.Value.(*ast.Array).Append(head)
+ } else {
+ node.Put(values, ast.ArrayTerm(head))
+ }
+ return nil
+ })
+}
+
+func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) {
+ child := e.child(x.Body)
+ node := newComprehensionCacheElem()
+ return node, child.Run(func(child *eval) error {
+ values := make([]*ast.Term, len(keys))
+ for i := range keys {
+ values[i] = child.bindings.Plug(keys[i])
+ }
+ head := child.bindings.Plug(x.Term)
+ cached := node.Get(values)
+ if cached != nil {
+ set := cached.Value.(ast.Set)
+ set.Add(head)
+ } else {
+ node.Put(values, ast.SetTerm(head))
+ }
+ return nil
+ })
+}
+
+func (e *eval) buildComprehensionCacheObject(x *ast.ObjectComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) {
+ child := e.child(x.Body)
+ node := newComprehensionCacheElem()
+ return node, child.Run(func(child *eval) error {
+ values := make([]*ast.Term, len(keys))
+ for i := range keys {
+ values[i] = child.bindings.Plug(keys[i])
+ }
+ headKey := child.bindings.Plug(x.Key)
+ headValue := child.bindings.Plug(x.Value)
+ cached := node.Get(values)
+ if cached != nil {
+ obj := cached.Value.(ast.Object)
+ obj.Insert(headKey, headValue)
+ } else {
+ node.Put(values, ast.ObjectTerm(ast.Item(headKey, headValue)))
+ }
+ return nil
+ })
+}
+
+func (e *eval) biunifyComprehensionPartial(a, b *ast.Term, b1, b2 *bindings, swap bool, iter unifyIterator) error {
+ cpyA := a.Copy()
+
+ // Capture bindings available to the comprehension. We will add expressions
+ // to the comprehension body that ensure the comprehension body is safe.
+ // Currently this process adds _all_ bindings (even if they are not
+ // needed.) Eventually we may want to make the logic a bit smarter.
+ var extras []*ast.Expr
+
+ err := b1.Iter(e.caller.bindings, func(k, v *ast.Term) error {
+ extras = append(extras, ast.Equality.Expr(k, v))
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ // Namespace the variables in the body to avoid collision when the final
+ // queries returned by partial evaluation.
+ var body *ast.Body
+
+ switch a := cpyA.Value.(type) {
+ case *ast.ArrayComprehension:
+ body = &a.Body
+ case *ast.SetComprehension:
+ body = &a.Body
+ case *ast.ObjectComprehension:
+ body = &a.Body
+ default:
+ return fmt.Errorf("illegal comprehension %T", a)
+ }
+
+ for _, e := range extras {
+ body.Append(e)
+ }
+
+ b1.Namespace(cpyA, e.caller.bindings)
+
+ // The other term might need to be plugged so include the bindings. The
+ // bindings for the comprehension term are saved (for compatibility) but
+ // the eventual plug operation on the comprehension will be a no-op.
+ if !swap {
+ return e.saveUnify(cpyA, b, b1, b2, iter)
+ }
+
+ return e.saveUnify(b, cpyA, b2, b1, iter)
+}
+
+func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ result := ast.NewArray()
+ child := e.closure(x.Body)
+ err := child.Run(func(child *eval) error {
+ result = result.Append(child.bindings.Plug(x.Term))
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ return e.biunify(ast.NewTerm(result), b, b1, b2, iter)
+}
+
+func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ result := ast.NewSet()
+ child := e.closure(x.Body)
+ err := child.Run(func(child *eval) error {
+ result.Add(child.bindings.Plug(x.Term))
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ return e.biunify(ast.NewTerm(result), b, b1, b2, iter)
+}
+
+func (e *eval) biunifyComprehensionObject(x *ast.ObjectComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ result := ast.NewObject()
+ child := e.closure(x.Body)
+ err := child.Run(func(child *eval) error {
+ key := child.bindings.Plug(x.Key)
+ value := child.bindings.Plug(x.Value)
+ exist := result.Get(key)
+ if exist != nil && !exist.Equal(value) {
+ return objectDocKeyConflictErr(x.Key.Location)
+ }
+ result.Insert(key, value)
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ return e.biunify(ast.NewTerm(result), b, b1, b2, iter)
+}
+
+func (e *eval) saveExpr(expr *ast.Expr, b *bindings, iter unifyIterator) error {
+ expr.With = e.query[e.index].With
+ expr.Location = e.query[e.index].Location
+ e.saveStack.Push(expr, b, b)
+ e.traceSave(expr)
+ err := iter()
+ e.saveStack.Pop()
+ return err
+}
+
+func (e *eval) saveExprMarkUnknowns(expr *ast.Expr, b *bindings, iter unifyIterator) error {
+ expr.With = e.query[e.index].With
+ expr.Location = e.query[e.index].Location
+ declArgsLen, err := e.getDeclArgsLen(expr)
+ if err != nil {
+ return err
+ }
+ var pops int
+ if pairs := getSavePairsFromExpr(declArgsLen, expr, b, nil); len(pairs) > 0 {
+ pops += len(pairs)
+ for _, p := range pairs {
+ e.saveSet.Push([]*ast.Term{p.term}, p.b)
+ }
+ }
+ e.saveStack.Push(expr, b, b)
+ e.traceSave(expr)
+ err = iter()
+ e.saveStack.Pop()
+ for i := 0; i < pops; i++ {
+ e.saveSet.Pop()
+ }
+ return err
+}
+
+func (e *eval) saveUnify(a, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error {
+ e.instr.startTimer(partialOpSaveUnify)
+ expr := ast.Equality.Expr(a, b)
+ expr.With = e.query[e.index].With
+ expr.Location = e.query[e.index].Location
+ pops := 0
+ if pairs := getSavePairsFromTerm(a, b1, nil); len(pairs) > 0 {
+ pops += len(pairs)
+ for _, p := range pairs {
+ e.saveSet.Push([]*ast.Term{p.term}, p.b)
+ }
+
+ }
+ if pairs := getSavePairsFromTerm(b, b2, nil); len(pairs) > 0 {
+ pops += len(pairs)
+ for _, p := range pairs {
+ e.saveSet.Push([]*ast.Term{p.term}, p.b)
+ }
+ }
+ e.saveStack.Push(expr, b1, b2)
+ e.traceSave(expr)
+ e.instr.stopTimer(partialOpSaveUnify)
+ err := iter()
+
+ e.saveStack.Pop()
+ for i := 0; i < pops; i++ {
+ e.saveSet.Pop()
+ }
+
+ return err
+}
+
+func (e *eval) saveCall(declArgsLen int, terms []*ast.Term, iter unifyIterator) error {
+ expr := ast.NewExpr(terms)
+ expr.With = e.query[e.index].With
+ expr.Location = e.query[e.index].Location
+
+ // If call-site includes output value then partial eval must add vars in output
+ // position to the save set.
+ pops := 0
+ if declArgsLen == len(terms)-2 {
+ if pairs := getSavePairsFromTerm(terms[len(terms)-1], e.bindings, nil); len(pairs) > 0 {
+ pops += len(pairs)
+ for _, p := range pairs {
+ e.saveSet.Push([]*ast.Term{p.term}, p.b)
+ }
+ }
+ }
+ e.saveStack.Push(expr, e.bindings, nil)
+ e.traceSave(expr)
+ err := iter()
+
+ e.saveStack.Pop()
+ for i := 0; i < pops; i++ {
+ e.saveSet.Pop()
+ }
+ return err
+}
+
+func (e *eval) saveInlinedNegatedExprs(exprs []*ast.Expr, iter unifyIterator) error {
+
+ with := make([]*ast.With, len(e.query[e.index].With))
+
+ for i := range e.query[e.index].With {
+ cpy := e.query[e.index].With[i].Copy()
+ cpy.Value = e.bindings.PlugNamespaced(cpy.Value, e.caller.bindings)
+ with[i] = cpy
+ }
+
+ for _, expr := range exprs {
+ expr.With = with
+ e.saveStack.Push(expr, nil, nil)
+ e.traceSave(expr)
+ }
+ err := iter()
+ for i := 0; i < len(exprs); i++ {
+ e.saveStack.Pop()
+ }
+ return err
+}
+
+func (e *eval) getRules(ref ast.Ref) (*ast.IndexResult, error) {
+ e.instr.startTimer(evalOpRuleIndex)
+ defer e.instr.stopTimer(evalOpRuleIndex)
+
+ index := e.compiler.RuleIndex(ref)
+ if index == nil {
+ return nil, nil
+ }
+
+ var result *ast.IndexResult
+ var err error
+ if e.indexing {
+ result, err = index.Lookup(e)
+ } else {
+ result, err = index.AllRules(e)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ var msg string
+ if len(result.Rules) == 1 {
+ msg = "(matched 1 rule)"
+ } else {
+ var b strings.Builder
+ b.Grow(len("(matched NNNN rules)"))
+ b.WriteString("matched ")
+ b.WriteString(strconv.FormatInt(int64(len(result.Rules)), 10))
+ b.WriteString(" rules)")
+ msg = b.String()
+ }
+ e.traceIndex(e.query[e.index], msg, &ref)
+ return result, err
+}
+
+func (e *eval) Resolve(ref ast.Ref) (ast.Value, error) {
+ e.instr.startTimer(evalOpResolve)
+
+ if e.inliningControl.Disabled(ref, true) || e.saveSet.Contains(ast.NewTerm(ref), nil) {
+ e.instr.stopTimer(evalOpResolve)
+ return nil, ast.UnknownValueErr{}
+ }
+
+ if ref[0].Equal(ast.InputRootDocument) {
+ if e.input != nil {
+ v, err := e.input.Value.Find(ref[1:])
+ if err != nil {
+ v = nil
+ }
+ e.instr.stopTimer(evalOpResolve)
+ return v, nil
+ }
+ e.instr.stopTimer(evalOpResolve)
+ return nil, nil
+ }
+
+ if ref[0].Equal(ast.DefaultRootDocument) {
+
+ var repValue ast.Value
+
+ if e.data != nil {
+ if v, err := e.data.Value.Find(ref[1:]); err == nil {
+ repValue = v
+ } else {
+ repValue = nil
+ }
+ }
+
+ if e.targetStack.Prefixed(ref) {
+ e.instr.stopTimer(evalOpResolve)
+ return repValue, nil
+ }
+
+ var merged ast.Value
+ var err error
+
+ // Converting large JSON values into AST values can be fairly expensive. For
+ // example, a 2MB JSON value can take upwards of 30 millisceonds to convert.
+ // We cache the result of conversion here in case the same base document is
+ // being read multiple times during evaluation.
+ realValue := e.baseCache.Get(ref)
+ if realValue != nil {
+ e.instr.counterIncr(evalOpBaseCacheHit)
+ if repValue == nil {
+ e.instr.stopTimer(evalOpResolve)
+ return realValue, nil
+ }
+ var ok bool
+ merged, ok = merge(repValue, realValue)
+ if !ok {
+ err = mergeConflictErr(ref[0].Location)
+ }
+ } else {
+ e.instr.counterIncr(evalOpBaseCacheMiss)
+ merged, err = e.resolveReadFromStorage(ref, repValue)
+ }
+ e.instr.stopTimer(evalOpResolve)
+ return merged, err
+ }
+ e.instr.stopTimer(evalOpResolve)
+ return nil, fmt.Errorf("illegal ref")
+}
+
+func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, error) {
+ if refContainsNonScalar(ref) {
+ return a, nil
+ }
+
+ v, err := e.external.Resolve(e, ref)
+
+ if err != nil {
+ return nil, err
+ } else if v == nil {
+
+ path, err := storage.NewPathForRef(ref)
+ if err != nil {
+ if !storage.IsNotFound(err) {
+ return nil, err
+ }
+ return a, nil
+ }
+
+ blob, err := e.store.Read(e.ctx, e.txn, path)
+ if err != nil {
+ if !storage.IsNotFound(err) {
+ return nil, err
+ }
+ return a, nil
+ }
+
+ if len(path) == 0 {
+ obj := blob.(map[string]interface{})
+ if len(obj) > 0 {
+ cpy := make(map[string]interface{}, len(obj)-1)
+ for k, v := range obj {
+ if string(ast.SystemDocumentKey) == k {
+ continue
+ }
+ cpy[k] = v
+ }
+ blob = cpy
+ }
+ }
+
+ v, err = ast.InterfaceToValue(blob)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ e.baseCache.Put(ref, v)
+ if a == nil {
+ return v, nil
+ }
+
+ merged, ok := merge(a, v)
+ if !ok {
+ return nil, mergeConflictErr(ref[0].Location)
+ }
+
+ return merged, nil
+}
+
+func (e *eval) generateVar(suffix string) *ast.Term {
+ return ast.VarTerm(fmt.Sprintf("%v_%v", e.genvarprefix, suffix))
+}
+
+func (e *eval) rewrittenVar(v ast.Var) (ast.Var, bool) {
+ if e.compiler != nil {
+ if rw, ok := e.compiler.RewrittenVars[v]; ok {
+ return rw, true
+ }
+ }
+ if e.queryCompiler != nil {
+ if rw, ok := e.queryCompiler.RewrittenVars()[v]; ok {
+ return rw, true
+ }
+ }
+ return v, false
+}
+
+func (e *eval) getDeclArgsLen(x *ast.Expr) (int, error) {
+
+ if !x.IsCall() {
+ return -1, nil
+ }
+
+ operator := x.Operator()
+ bi, _, ok := e.builtinFunc(operator.String())
+
+ if ok {
+ return len(bi.Decl.Args()), nil
+ }
+
+ ir, err := e.getRules(operator)
+ if err != nil {
+ return -1, err
+ } else if ir == nil || ir.Empty() {
+ return -1, nil
+ }
+
+ return len(ir.Rules[0].Head.Args), nil
+}
+
+type evalBuiltin struct {
+ e *eval
+ bi *ast.Builtin
+ bctx BuiltinContext
+ f BuiltinFunc
+ terms []*ast.Term
+}
+
+func (e evalBuiltin) eval(iter unifyIterator) error {
+
+ operands := make([]*ast.Term, len(e.terms))
+
+ for i := 0; i < len(e.terms); i++ {
+ operands[i] = e.e.bindings.Plug(e.terms[i])
+ }
+
+ numDeclArgs := len(e.bi.Decl.Args())
+
+ e.e.instr.startTimer(evalOpBuiltinCall)
+
+ err := e.f(e.bctx, operands, func(output *ast.Term) error {
+
+ e.e.instr.stopTimer(evalOpBuiltinCall)
+
+ var err error
+
+ if len(operands) == numDeclArgs {
+ if output.Value.Compare(ast.Boolean(false)) != 0 {
+ err = iter()
+ }
+ } else {
+ err = e.e.unify(e.terms[len(e.terms)-1], output, iter)
+ }
+
+ if err != nil {
+ err = Halt{Err: err}
+ }
+
+ e.e.instr.startTimer(evalOpBuiltinCall)
+ return err
+ })
+
+ if err != nil {
+ if h, ok := err.(Halt); !ok {
+ e.e.builtinErrors.errs = append(e.e.builtinErrors.errs, err)
+ err = nil
+ } else {
+ err = h.Err
+ }
+ }
+
+ e.e.instr.stopTimer(evalOpBuiltinCall)
+ return err
+}
+
+type evalFunc struct {
+ e *eval
+ ref ast.Ref
+ terms []*ast.Term
+}
+
+func (e evalFunc) eval(iter unifyIterator) error {
+
+ ir, err := e.e.getRules(e.ref)
+ if err != nil {
+ return err
+ }
+
+ if ir.Empty() {
+ return nil
+ }
+
+ if len(ir.Else) > 0 && e.e.unknown(e.e.query[e.e.index], e.e.bindings) {
+ // Partial evaluation of ordered rules is not supported currently. Save the
+ // expression and continue. This could be revisited in the future.
+ return e.e.saveCall(len(ir.Rules[0].Head.Args), e.terms, iter)
+ }
+
+ var prev *ast.Term
+
+ for i := range ir.Rules {
+ next, err := e.evalOneRule(iter, ir.Rules[i], prev)
+ if err != nil {
+ return err
+ }
+ if next == nil {
+ for _, rule := range ir.Else[ir.Rules[i]] {
+ next, err = e.evalOneRule(iter, rule, prev)
+ if err != nil {
+ return err
+ }
+ if next != nil {
+ break
+ }
+ }
+ }
+ if next != nil {
+ prev = next
+ }
+ }
+
+ return nil
+}
+
+func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, prev *ast.Term) (*ast.Term, error) {
+
+ child := e.e.child(rule.Body)
+
+ args := make([]*ast.Term, len(e.terms)-1)
+
+ for i := range rule.Head.Args {
+ args[i] = rule.Head.Args[i]
+ }
+
+ if len(args) == len(rule.Head.Args)+1 {
+ args[len(args)-1] = rule.Head.Value
+ }
+
+ var result *ast.Term
+
+ child.traceEnter(rule)
+
+ err := child.biunifyArrays(ast.NewArray(e.terms[1:]...), ast.NewArray(args...), e.e.bindings, child.bindings, func() error {
+ return child.eval(func(child *eval) error {
+ child.traceExit(rule)
+
+ // Partial evaluation must save an expression that tests the output value if the output value
+ // was not captured to handle the case where the output value may be `false`.
+ if len(rule.Head.Args) == len(e.terms)-1 && e.e.saveSet.Contains(rule.Head.Value, child.bindings) {
+ err := e.e.saveExpr(ast.NewExpr(rule.Head.Value), child.bindings, iter)
+ child.traceRedo(rule)
+ return err
+ }
+
+ result = child.bindings.Plug(rule.Head.Value)
+
+ if len(rule.Head.Args) == len(e.terms)-1 {
+ if result.Value.Compare(ast.Boolean(false)) == 0 {
+ return nil
+ }
+ }
+
+ // Partial evaluation should explore all rules and may not produce
+ // a ground result so we do not perform conflict detection or
+ // deduplication. See "ignore conflicts: functions" test case for
+ // an example.
+ if !e.e.partial() {
+ if prev != nil {
+ if ast.Compare(prev, result) != 0 {
+ return functionConflictErr(rule.Location)
+ }
+ child.traceRedo(rule)
+ return nil
+ }
+ }
+
+ prev = result
+
+ if err := iter(); err != nil {
+ return err
+ }
+
+ child.traceRedo(rule)
+ return nil
+ })
+ })
+
+ return result, err
+}
+
+type evalTree struct {
+ e *eval
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
+ bindings *bindings
+ rterm *ast.Term
+ rbindings *bindings
+ node *ast.TreeNode
+}
+
+func (e evalTree) eval(iter unifyIterator) error {
+
+ if len(e.ref) == e.pos {
+ return e.finish(iter)
+ }
+
+ plugged := e.bindings.Plug(e.ref[e.pos])
+
+ if plugged.IsGround() {
+ return e.next(iter, plugged)
+ }
+
+ return e.enumerate(iter)
+}
+
+func (e evalTree) finish(iter unifyIterator) error {
+
+ // During partial evaluation it may not be possible to compute the value
+ // for this reference if it refers to a virtual document so save the entire
+ // expression. See "save: full extent" test case for an example. We also
+ // need to account for the inlining controls here to prevent base documents
+ // from being inlined when they should not be.
+ save := e.e.unknown(e.ref, e.e.bindings)
+
+ if save {
+ return e.e.saveUnify(ast.NewTerm(e.plugged), e.rterm, e.bindings, e.rbindings, iter)
+ }
+
+ v, err := e.extent()
+ if err != nil || v == nil {
+ return err
+ }
+
+ return e.e.biunify(e.rterm, v, e.rbindings, e.bindings, func() error {
+ return iter()
+ })
+}
+
+func (e evalTree) next(iter unifyIterator, plugged *ast.Term) error {
+
+ var node *ast.TreeNode
+
+ cpy := e
+ cpy.plugged[e.pos] = plugged
+ cpy.pos++
+
+ if !e.e.targetStack.Prefixed(cpy.plugged[:cpy.pos]) {
+ if e.node != nil {
+ node = e.node.Child(plugged.Value)
+ if node != nil && len(node.Values) > 0 {
+ r := evalVirtual{
+ e: e.e,
+ ref: e.ref,
+ plugged: e.plugged,
+ pos: e.pos,
+ bindings: e.bindings,
+ rterm: e.rterm,
+ rbindings: e.rbindings,
+ }
+ r.plugged[e.pos] = plugged
+ return r.eval(iter)
+ }
+ }
+ }
+
+ cpy.node = node
+ return cpy.eval(iter)
+}
+
+func (e evalTree) enumerate(iter unifyIterator) error {
+
+ if e.e.inliningControl.Disabled(e.plugged[:e.pos], true) {
+ return e.e.saveUnify(ast.NewTerm(e.plugged), e.rterm, e.bindings, e.rbindings, iter)
+ }
+
+ doc, err := e.e.Resolve(e.plugged[:e.pos])
+ if err != nil {
+ return err
+ }
+
+ if doc != nil {
+ switch doc := doc.(type) {
+ case *ast.Array:
+ for i := 0; i < doc.Len(); i++ {
+ k := ast.IntNumberTerm(i)
+ err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error {
+ return e.next(iter, k)
+ })
+ if err != nil {
+ return err
+ }
+ }
+ case ast.Object:
+ err := doc.Iter(func(k, _ *ast.Term) error {
+ return e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error {
+ return e.next(iter, k)
+ })
+ })
+ if err != nil {
+ return err
+ }
+ case ast.Set:
+ err := doc.Iter(func(elem *ast.Term) error {
+ return e.e.biunify(elem, e.ref[e.pos], e.bindings, e.bindings, func() error {
+ return e.next(iter, elem)
+ })
+ })
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if e.node == nil {
+ return nil
+ }
+
+ for _, k := range e.node.Sorted {
+ key := ast.NewTerm(k)
+ if err := e.e.biunify(key, e.ref[e.pos], e.bindings, e.bindings, func() error {
+ return e.next(iter, key)
+ }); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e evalTree) extent() (*ast.Term, error) {
+ base, err := e.e.Resolve(e.plugged)
+ if err != nil {
+ return nil, err
+ }
+
+ virtual, err := e.leaves(e.plugged, e.node)
+ if err != nil {
+ return nil, err
+ }
+
+ if virtual == nil {
+ if base == nil {
+ return nil, nil
+ }
+ return ast.NewTerm(base), nil
+ }
+
+ if base != nil {
+ merged, ok := merge(base, virtual)
+ if !ok {
+ return nil, mergeConflictErr(e.plugged[0].Location)
+ }
+ return ast.NewTerm(merged), nil
+ }
+
+ return ast.NewTerm(virtual), nil
+}
+
+func (e evalTree) leaves(plugged ast.Ref, node *ast.TreeNode) (ast.Object, error) {
+
+ if e.node == nil {
+ return nil, nil
+ }
+
+ result := ast.NewObject()
+
+ for _, k := range node.Sorted {
+
+ child := node.Children[k]
+
+ if child.Hide {
+ continue
+ }
+
+ plugged = append(plugged, ast.NewTerm(child.Key))
+
+ var save ast.Value
+ var err error
+
+ if len(child.Values) > 0 {
+ rterm := e.e.generateVar("leaf")
+ err = e.e.unify(ast.NewTerm(plugged), rterm, func() error {
+ save = e.e.bindings.Plug(rterm).Value
+ return nil
+ })
+ } else {
+ save, err = e.leaves(plugged, child)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if save != nil {
+ v := ast.NewObject([2]*ast.Term{plugged[len(plugged)-1], ast.NewTerm(save)})
+ result, _ = result.Merge(v)
+ }
+
+ plugged = plugged[:len(plugged)-1]
+ }
+
+ return result, nil
+}
+
+type evalVirtual struct {
+ e *eval
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
+ bindings *bindings
+ rterm *ast.Term
+ rbindings *bindings
+}
+
+func (e evalVirtual) eval(iter unifyIterator) error {
+
+ ir, err := e.e.getRules(e.plugged[:e.pos+1])
+ if err != nil {
+ return err
+ }
+
+ // Partial evaluation of ordered rules is not supported currently. Save the
+ // expression and continue. This could be revisited in the future.
+ if len(ir.Else) > 0 && e.e.unknown(e.ref, e.bindings) {
+ return e.e.saveUnify(ast.NewTerm(e.ref), e.rterm, e.bindings, e.rbindings, iter)
+ }
+
+ switch ir.Kind {
+ case ast.PartialSetDoc:
+ eval := evalVirtualPartial{
+ e: e.e,
+ ref: e.ref,
+ plugged: e.plugged,
+ pos: e.pos,
+ ir: ir,
+ bindings: e.bindings,
+ rterm: e.rterm,
+ rbindings: e.rbindings,
+ empty: ast.SetTerm(),
+ }
+ return eval.eval(iter)
+ case ast.PartialObjectDoc:
+ eval := evalVirtualPartial{
+ e: e.e,
+ ref: e.ref,
+ plugged: e.plugged,
+ pos: e.pos,
+ ir: ir,
+ bindings: e.bindings,
+ rterm: e.rterm,
+ rbindings: e.rbindings,
+ empty: ast.ObjectTerm(),
+ }
+ return eval.eval(iter)
+ default:
+ eval := evalVirtualComplete{
+ e: e.e,
+ ref: e.ref,
+ plugged: e.plugged,
+ pos: e.pos,
+ ir: ir,
+ bindings: e.bindings,
+ rterm: e.rterm,
+ rbindings: e.rbindings,
+ }
+ return eval.eval(iter)
+ }
+}
+
+type evalVirtualPartial struct {
+ e *eval
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
+ ir *ast.IndexResult
+ bindings *bindings
+ rterm *ast.Term
+ rbindings *bindings
+ empty *ast.Term
+}
+
+func (e evalVirtualPartial) eval(iter unifyIterator) error {
+
+ unknown := e.e.unknown(e.ref[:e.pos+1], e.bindings)
+
+ if len(e.ref) == e.pos+1 {
+ if unknown {
+ return e.partialEvalSupport(iter)
+ }
+ return e.evalAllRules(iter, e.ir.Rules)
+ }
+
+ if (unknown && e.e.inliningControl.shallow) || e.e.inliningControl.Disabled(e.ref[:e.pos+1], false) {
+ return e.partialEvalSupport(iter)
+ }
+
+ return e.evalEachRule(iter, e.ir.Rules, unknown)
+}
+
+func (e evalVirtualPartial) evalEachRule(iter unifyIterator, rules []*ast.Rule, unknown bool) error {
+
+ if e.e.unknown(e.ref[e.pos+1], e.bindings) {
+ for _, rule := range e.ir.Rules {
+ if err := e.evalOneRulePostUnify(iter, rule); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ key, hit, err := e.evalCache(iter)
+ if err != nil {
+ return err
+ } else if hit {
+ return nil
+ }
+
+ result := e.empty
+
+ for _, rule := range e.ir.Rules {
+ if err := e.evalOneRulePreUnify(iter, rule, key, result, unknown); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e evalVirtualPartial) evalAllRules(iter unifyIterator, rules []*ast.Rule) error {
+
+ result := e.empty
+
+ for _, rule := range rules {
+ child := e.e.child(rule.Body)
+ child.traceEnter(rule)
+
+ err := child.eval(func(*eval) error {
+ child.traceExit(rule)
+ var err error
+ result, _, err = e.reduce(rule.Head, child.bindings, result)
+ if err != nil {
+ return err
+ }
+
+ child.traceRedo(rule)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return e.e.biunify(result, e.rterm, e.bindings, e.bindings, iter)
+}
+
+func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Rule, cacheKey ast.Ref, result *ast.Term, unknown bool) error {
+
+ key := e.ref[e.pos+1]
+ child := e.e.child(rule.Body)
+
+ child.traceEnter(rule)
+ var defined bool
+
+ err := child.biunify(rule.Head.Key, key, child.bindings, e.bindings, func() error {
+ defined = true
+ return child.eval(func(child *eval) error {
+
+ term := rule.Head.Value
+ if term == nil {
+ term = rule.Head.Key
+ }
+
+ if cacheKey != nil {
+ result := child.bindings.Plug(term)
+ e.e.virtualCache.Put(cacheKey, result)
+ }
+
+ // NOTE(tsandall): if the rule set depends on any unknowns then do
+ // not perform the duplicate check because evaluation of the ruleset
+ // may not produce a definitive result. This is a bit strict--we
+ // could improve by skipping only when saves occur.
+ if !unknown {
+ var dup bool
+ var err error
+ result, dup, err = e.reduce(rule.Head, child.bindings, result)
+ if err != nil {
+ return err
+ } else if dup {
+ child.traceDuplicate(rule)
+ return nil
+ }
+ }
+
+ child.traceExit(rule)
+ term, termbindings := child.bindings.apply(term)
+ err := e.evalTerm(iter, term, termbindings)
+ if err != nil {
+ return err
+ }
+
+ child.traceRedo(rule)
+ return nil
+ })
+ })
+
+ if err != nil {
+ return err
+ }
+
+ // TODO(tsandall): why are we tracing here? this looks wrong.
+ if !defined {
+ child.traceFail(rule)
+ }
+
+ return nil
+}
+
+func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.Rule) error {
+
+ key := e.ref[e.pos+1]
+ child := e.e.child(rule.Body)
+
+ child.traceEnter(rule)
+ var defined bool
+
+ err := child.eval(func(child *eval) error {
+ defined = true
+ return e.e.biunify(rule.Head.Key, key, child.bindings, e.bindings, func() error {
+ return e.evalOneRuleContinue(iter, rule, child)
+ })
+ })
+
+ if err != nil {
+ return err
+ }
+
+ if !defined {
+ child.traceFail(rule)
+ }
+
+ return nil
+}
+
+func (e evalVirtualPartial) evalOneRuleContinue(iter unifyIterator, rule *ast.Rule, child *eval) error {
+
+ child.traceExit(rule)
+
+ term := rule.Head.Value
+ if term == nil {
+ term = rule.Head.Key
+ }
+
+ term, termbindings := child.bindings.apply(term)
+ err := e.evalTerm(iter, term, termbindings)
+ if err != nil {
+ return err
+ }
+
+ child.traceRedo(rule)
+ return nil
+}
+
+func (e evalVirtualPartial) partialEvalSupport(iter unifyIterator) error {
+
+ path, term := e.e.savePackagePathAndTerm(e.plugged[:e.pos+1], e.ref)
+
+ var defined bool
+
+ if e.e.saveSupport.Exists(path) {
+ defined = true
+ } else {
+ for i := range e.ir.Rules {
+ ok, err := e.partialEvalSupportRule(iter, e.ir.Rules[i], path)
+ if err != nil {
+ return err
+ } else if ok {
+ defined = true
+ }
+ }
+ }
+
+ if !defined {
+ term = e.empty
+ }
+
+ return e.e.saveUnify(term, e.rterm, e.bindings, e.rbindings, iter)
+}
+
+func (e evalVirtualPartial) partialEvalSupportRule(iter unifyIterator, rule *ast.Rule, path ast.Ref) (bool, error) {
+
+ child := e.e.child(rule.Body)
+ child.traceEnter(rule)
+
+ e.e.saveStack.PushQuery(nil)
+ var defined bool
+
+ err := child.eval(func(child *eval) error {
+ child.traceExit(rule)
+ defined = true
+
+ current := e.e.saveStack.PopQuery()
+ plugged := current.Plug(e.e.caller.bindings)
+ // Skip this rule body if it fails to type-check.
+ // Type-checking failure means the rule body will never succeed.
+ if e.e.compiler.PassesTypeCheck(plugged) {
+ var key, value *ast.Term
+
+ if rule.Head.Key != nil {
+ key = child.bindings.PlugNamespaced(rule.Head.Key, e.e.caller.bindings)
+ }
+
+ if rule.Head.Value != nil {
+ value = child.bindings.PlugNamespaced(rule.Head.Value, e.e.caller.bindings)
+ }
+
+ head := ast.NewHead(rule.Head.Name, key, value)
+
+ if !e.e.inliningControl.shallow {
+ cp := copypropagation.New(head.Vars()).
+ WithEnsureNonEmptyBody(true).
+ WithCompiler(e.e.compiler)
+ plugged = applyCopyPropagation(cp, e.e.instr, plugged)
+ }
+
+ e.e.saveSupport.Insert(path, &ast.Rule{
+ Head: head,
+ Body: plugged,
+ Default: rule.Default,
+ })
+ }
+ child.traceRedo(rule)
+ e.e.saveStack.PushQuery(current)
+ return nil
+ })
+ e.e.saveStack.PopQuery()
+ return defined, err
+}
+
+func (e evalVirtualPartial) evalTerm(iter unifyIterator, term *ast.Term, termbindings *bindings) error {
+ eval := evalTerm{
+ e: e.e,
+ ref: e.ref,
+ pos: e.pos + 2,
+ bindings: e.bindings,
+ term: term,
+ termbindings: termbindings,
+ rterm: e.rterm,
+ rbindings: e.rbindings,
+ }
+ return eval.eval(iter)
+}
+
+func (e evalVirtualPartial) evalCache(iter unifyIterator) (ast.Ref, bool, error) {
+
+ if e.e.unknown(e.ref[:e.pos+1], e.bindings) {
+ return nil, false, nil
+ }
+
+ var cacheKey ast.Ref
+
+ if e.ir.Kind == ast.PartialObjectDoc {
+
+ plugged := e.bindings.Plug(e.ref[e.pos+1])
+
+ if plugged.IsGround() {
+ path := e.plugged[:e.pos+2]
+ path[len(path)-1] = plugged
+ cached := e.e.virtualCache.Get(path)
+
+ if cached != nil {
+ e.e.instr.counterIncr(evalOpVirtualCacheHit)
+ return nil, true, e.evalTerm(iter, cached, e.bindings)
+ }
+
+ e.e.instr.counterIncr(evalOpVirtualCacheMiss)
+ cacheKey = path
+ }
+ }
+
+ return cacheKey, false, nil
+}
+
+func (e evalVirtualPartial) reduce(head *ast.Head, b *bindings, result *ast.Term) (*ast.Term, bool, error) {
+
+ var exists bool
+ key := b.Plug(head.Key)
+
+ switch v := result.Value.(type) {
+ case ast.Set:
+ exists = v.Contains(key)
+ v.Add(key)
+ case ast.Object:
+ value := b.Plug(head.Value)
+ if curr := v.Get(key); curr != nil {
+ if !curr.Equal(value) {
+ return nil, false, objectDocKeyConflictErr(head.Location)
+ }
+ exists = true
+ } else {
+ v.Insert(key, value)
+ }
+ }
+
+ return result, exists, nil
+}
+
+type evalVirtualComplete struct {
+ e *eval
+ ref ast.Ref
+ plugged ast.Ref
+ pos int
+ ir *ast.IndexResult
+ bindings *bindings
+ rterm *ast.Term
+ rbindings *bindings
+}
+
+func (e evalVirtualComplete) eval(iter unifyIterator) error {
+
+ if e.ir.Empty() {
+ return nil
+ }
+
+ if len(e.ir.Rules) > 0 && len(e.ir.Rules[0].Head.Args) > 0 {
+ return nil
+ }
+
+ if !e.e.unknown(e.ref, e.bindings) {
+ return e.evalValue(iter)
+ }
+
+ var generateSupport bool
+
+ if e.ir.Default != nil {
+ // If the other term is not constant OR it's equal to the default value, then
+ // a support rule must be produced as the default value _may_ be required. On
+ // the other hand, if the other term is constant (i.e., it does not require
+ // evaluation) and it differs from the default value then the default value is
+ // _not_ required, so partially evaluate the rule normally.
+ rterm := e.rbindings.Plug(e.rterm)
+ generateSupport = !ast.IsConstant(rterm.Value) || e.ir.Default.Head.Value.Equal(rterm)
+ }
+
+ if generateSupport || e.e.inliningControl.shallow || e.e.inliningControl.Disabled(e.plugged[:e.pos+1], false) {
+ return e.partialEvalSupport(iter)
+ }
+
+ return e.partialEval(iter)
+}
+
+func (e evalVirtualComplete) evalValue(iter unifyIterator) error {
+ cached := e.e.virtualCache.Get(e.plugged[:e.pos+1])
+ if cached != nil {
+ e.e.instr.counterIncr(evalOpVirtualCacheHit)
+ return e.evalTerm(iter, cached, e.bindings)
+ }
+
+ e.e.instr.counterIncr(evalOpVirtualCacheMiss)
+
+ var prev *ast.Term
+
+ for i := range e.ir.Rules {
+ next, err := e.evalValueRule(iter, e.ir.Rules[i], prev)
+ if err != nil {
+ return err
+ }
+ if next == nil {
+ for _, rule := range e.ir.Else[e.ir.Rules[i]] {
+ next, err = e.evalValueRule(iter, rule, prev)
+ if err != nil {
+ return err
+ }
+ if next != nil {
+ break
+ }
+ }
+ }
+ if next != nil {
+ prev = next
+ }
+ }
+
+ if e.ir.Default != nil && prev == nil {
+ _, err := e.evalValueRule(iter, e.ir.Default, prev)
+ return err
+ }
+
+ return nil
+}
+
+func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, prev *ast.Term) (*ast.Term, error) {
+
+ child := e.e.child(rule.Body)
+ child.traceEnter(rule)
+ var result *ast.Term
+
+ err := child.eval(func(child *eval) error {
+ child.traceExit(rule)
+ result = child.bindings.Plug(rule.Head.Value)
+
+ if prev != nil {
+ if ast.Compare(result, prev) != 0 {
+ return completeDocConflictErr(rule.Location)
+ }
+ child.traceRedo(rule)
+ return nil
+ }
+
+ prev = result
+ e.e.virtualCache.Put(e.plugged[:e.pos+1], result)
+ term, termbindings := child.bindings.apply(rule.Head.Value)
+
+ err := e.evalTerm(iter, term, termbindings)
+ if err != nil {
+ return err
+ }
+
+ child.traceRedo(rule)
+ return nil
+ })
+
+ return result, err
+}
+
+func (e evalVirtualComplete) partialEval(iter unifyIterator) error {
+
+ for _, rule := range e.ir.Rules {
+ child := e.e.child(rule.Body)
+ child.traceEnter(rule)
+
+ err := child.eval(func(child *eval) error {
+ child.traceExit(rule)
+ term, termbindings := child.bindings.apply(rule.Head.Value)
+
+ err := e.evalTerm(iter, term, termbindings)
+ if err != nil {
+ return err
+ }
+
+ child.traceRedo(rule)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error {
+
+ path, term := e.e.savePackagePathAndTerm(e.plugged[:e.pos+1], e.ref)
+
+ if !e.e.saveSupport.Exists(path) {
+
+ for i := range e.ir.Rules {
+ err := e.partialEvalSupportRule(iter, e.ir.Rules[i], path)
+ if err != nil {
+ return err
+ }
+ }
+
+ if e.ir.Default != nil {
+ err := e.partialEvalSupportRule(iter, e.ir.Default, path)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return e.e.saveUnify(term, e.rterm, e.bindings, e.rbindings, iter)
+}
+
+func (e evalVirtualComplete) partialEvalSupportRule(iter unifyIterator, rule *ast.Rule, path ast.Ref) error {
+
+ child := e.e.child(rule.Body)
+ child.traceEnter(rule)
+
+ e.e.saveStack.PushQuery(nil)
+
+ err := child.eval(func(child *eval) error {
+ child.traceExit(rule)
+
+ current := e.e.saveStack.PopQuery()
+ plugged := current.Plug(e.e.caller.bindings)
+ // Skip this rule body if it fails to type-check.
+ // Type-checking failure means the rule body will never succeed.
+ if e.e.compiler.PassesTypeCheck(plugged) {
+ head := ast.NewHead(rule.Head.Name, nil, child.bindings.PlugNamespaced(rule.Head.Value, e.e.caller.bindings))
+
+ if !e.e.inliningControl.shallow {
+ cp := copypropagation.New(head.Vars()).
+ WithEnsureNonEmptyBody(true).
+ WithCompiler(e.e.compiler)
+ plugged = applyCopyPropagation(cp, e.e.instr, plugged)
+ }
+
+ e.e.saveSupport.Insert(path, &ast.Rule{
+ Head: head,
+ Body: plugged,
+ Default: rule.Default,
+ })
+ }
+ child.traceRedo(rule)
+ e.e.saveStack.PushQuery(current)
+ return nil
+ })
+ e.e.saveStack.PopQuery()
+ return err
+}
+
+func (e evalVirtualComplete) evalTerm(iter unifyIterator, term *ast.Term, termbindings *bindings) error {
+ eval := evalTerm{
+ e: e.e,
+ ref: e.ref,
+ pos: e.pos + 1,
+ bindings: e.bindings,
+ term: term,
+ termbindings: termbindings,
+ rterm: e.rterm,
+ rbindings: e.rbindings,
+ }
+ return eval.eval(iter)
+}
+
+type evalTerm struct {
+ e *eval
+ ref ast.Ref
+ pos int
+ bindings *bindings
+ term *ast.Term
+ termbindings *bindings
+ rterm *ast.Term
+ rbindings *bindings
+}
+
+func (e evalTerm) eval(iter unifyIterator) error {
+
+ if len(e.ref) == e.pos {
+ return e.e.biunify(e.term, e.rterm, e.termbindings, e.rbindings, iter)
+ }
+
+ if e.e.saveSet.Contains(e.term, e.termbindings) {
+ return e.save(iter)
+ }
+
+ plugged := e.bindings.Plug(e.ref[e.pos])
+
+ if plugged.IsGround() {
+ return e.next(iter, plugged)
+ }
+
+ return e.enumerate(iter)
+}
+
+func (e evalTerm) next(iter unifyIterator, plugged *ast.Term) error {
+
+ term, bindings := e.get(plugged)
+ if term == nil {
+ return nil
+ }
+
+ cpy := e
+ cpy.term = term
+ cpy.termbindings = bindings
+ cpy.pos++
+ return cpy.eval(iter)
+}
+
+func (e evalTerm) enumerate(iter unifyIterator) error {
+
+ switch v := e.term.Value.(type) {
+ case *ast.Array:
+ for i := 0; i < v.Len(); i++ {
+ k := ast.IntNumberTerm(i)
+ err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error {
+ return e.next(iter, k)
+ })
+ if err != nil {
+ return err
+ }
+ }
+ case ast.Object:
+ return v.Iter(func(k, _ *ast.Term) error {
+ return e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error {
+ return e.next(iter, e.termbindings.Plug(k))
+ })
+ })
+ case ast.Set:
+ return v.Iter(func(elem *ast.Term) error {
+ return e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error {
+ return e.next(iter, e.termbindings.Plug(elem))
+ })
+ })
+ }
+
+ return nil
+}
+
+func (e evalTerm) get(plugged *ast.Term) (*ast.Term, *bindings) {
+ switch v := e.term.Value.(type) {
+ case ast.Set:
+ if v.IsGround() {
+ if v.Contains(plugged) {
+ return e.termbindings.apply(plugged)
+ }
+ } else {
+ var t *ast.Term
+ var b *bindings
+ stop := v.Until(func(elem *ast.Term) bool {
+ if e.termbindings.Plug(elem).Equal(plugged) {
+ t, b = e.termbindings.apply(plugged)
+ return true
+ }
+ return false
+ })
+ if stop {
+ return t, b
+ }
+ }
+ case ast.Object:
+ if v.IsGround() {
+ term := v.Get(plugged)
+ if term != nil {
+ return e.termbindings.apply(term)
+ }
+ } else {
+ var t *ast.Term
+ var b *bindings
+ stop := v.Until(func(k, v *ast.Term) bool {
+ if e.termbindings.Plug(k).Equal(plugged) {
+ t, b = e.termbindings.apply(v)
+ return true
+ }
+ return false
+ })
+ if stop {
+ return t, b
+ }
+ }
+ case *ast.Array:
+ term := v.Get(plugged)
+ if term != nil {
+ return e.termbindings.apply(term)
+ }
+ }
+ return nil, nil
+}
+
+func (e evalTerm) save(iter unifyIterator) error {
+
+ v := e.e.generateVar(fmt.Sprintf("ref_%d", e.e.genvarid))
+ e.e.genvarid++
+
+ return e.e.biunify(e.term, v, e.termbindings, e.bindings, func() error {
+
+ suffix := e.ref[e.pos:]
+ ref := make(ast.Ref, len(suffix)+1)
+ ref[0] = v
+ for i := 0; i < len(suffix); i++ {
+ ref[i+1] = suffix[i]
+ }
+
+ return e.e.biunify(ast.NewTerm(ref), e.rterm, e.bindings, e.rbindings, iter)
+ })
+
+}
+
+func (e *eval) comprehensionIndex(term *ast.Term) *ast.ComprehensionIndex {
+ if e.queryCompiler != nil {
+ return e.queryCompiler.ComprehensionIndex(term)
+ }
+ return e.compiler.ComprehensionIndex(term)
+}
+
+func (e *eval) savePackagePathAndTerm(plugged, ref ast.Ref) (ast.Ref, *ast.Term) {
+
+ if e.skipSaveNamespace {
+ return plugged, ast.NewTerm(ref)
+ }
+
+ return plugged.Insert(e.saveNamespace, 1), ast.NewTerm(ref.Insert(e.saveNamespace, 1))
+}
+
+type savePair struct {
+ term *ast.Term
+ b *bindings
+}
+
+func getSavePairsFromExpr(declArgsLen int, x *ast.Expr, b *bindings, result []savePair) []savePair {
+ switch terms := x.Terms.(type) {
+ case *ast.Term:
+ return getSavePairsFromTerm(terms, b, result)
+ case []*ast.Term:
+ if x.IsEquality() {
+ return getSavePairsFromTerm(terms[2], b, getSavePairsFromTerm(terms[1], b, result))
+ }
+ if declArgsLen == len(terms)-2 {
+ return getSavePairsFromTerm(terms[len(terms)-1], b, result)
+ }
+ }
+ return result
+}
+
+func getSavePairsFromTerm(x *ast.Term, b *bindings, result []savePair) []savePair {
+ if _, ok := x.Value.(ast.Var); ok {
+ result = append(result, savePair{x, b})
+ return result
+ }
+ vis := ast.NewVarVisitor().WithParams(ast.VarVisitorParams{
+ SkipClosures: true,
+ SkipRefHead: true,
+ })
+ vis.Walk(x)
+ for v := range vis.Vars() {
+ y, next := b.apply(ast.NewTerm(v))
+ result = getSavePairsFromTerm(y, next, result)
+ }
+ return result
+}
+
+func applyCopyPropagation(p *copypropagation.CopyPropagator, instr *Instrumentation, body ast.Body) ast.Body {
+ instr.startTimer(partialOpCopyPropagation)
+ result := p.Apply(body)
+ instr.stopTimer(partialOpCopyPropagation)
+ return result
+}
+
+func nonGroundKeys(a ast.Object) bool {
+ return a.Until(func(k, _ *ast.Term) bool {
+ return !k.IsGround()
+ })
+}
+
+func plugKeys(a ast.Object, b *bindings) ast.Object {
+ plugged, _ := a.Map(func(k, v *ast.Term) (*ast.Term, *ast.Term, error) {
+ return b.Plug(k), v, nil
+ })
+ return plugged
+}
+
+func plugSlice(xs []*ast.Term, b *bindings) []*ast.Term {
+ cpy := make([]*ast.Term, len(xs))
+ for i := range cpy {
+ cpy[i] = b.Plug(xs[i])
+ }
+ return cpy
+}
+
+func canInlineNegation(safe ast.VarSet, queries []ast.Body) bool {
+
+ size := 1
+ vis := newNestedCheckVisitor()
+
+ for _, query := range queries {
+ size *= len(query)
+ for _, expr := range query {
+ if containsNestedRefOrCall(vis, expr) {
+ // Expressions containing nested refs or calls cannot be trivially negated
+ // because the semantics would change. For example, the complement of `not f(input.x)`
+ // is _not_ `f(input.x)`--it is `not input.x` OR `f(input.x)`.
+ //
+ // NOTE(tsandall): Since this would require the complement function to undo the
+ // copy propagation optimization, just bail out here. If this becomes a problem
+ // in the future, we can handle more cases.
+ return false
+ }
+ if !expr.Negated {
+ // Positive expressions containing variables cannot be trivially negated
+ // because they become unsafe (e.g., "x = 1" negated is "not x = 1" making x
+ // unsafe.) We check if the vars in the expr are already safe.
+ vis := ast.NewVarVisitor().WithParams(ast.VarVisitorParams{
+ SkipRefCallHead: true,
+ SkipClosures: true,
+ })
+ vis.Walk(expr)
+ unsafe := vis.Vars().Diff(safe).Diff(ast.ReservedVars)
+ if len(unsafe) > 0 {
+ return false
+ }
+ }
+ }
+ }
+
+ // NOTE(tsandall): this limit is arbitrary–it's only in place to prevent the
+ // partial evaluation result from blowing up. In the future, we could make this
+ // configurable or do something more clever.
+ if size > 16 {
+ return false
+ }
+
+ return true
+}
+
+type nestedCheckVisitor struct {
+ vis *ast.GenericVisitor
+ found bool
+}
+
+func newNestedCheckVisitor() *nestedCheckVisitor {
+ v := &nestedCheckVisitor{}
+ v.vis = ast.NewGenericVisitor(v.visit)
+ return v
+}
+
+func (v *nestedCheckVisitor) visit(x interface{}) bool {
+ switch x.(type) {
+ case ast.Ref, ast.Call:
+ v.found = true
+ }
+ return v.found
+}
+
+func containsNestedRefOrCall(vis *nestedCheckVisitor, expr *ast.Expr) bool {
+
+ if expr.IsEquality() {
+ for _, term := range expr.Operands() {
+ if containsNestedRefOrCallInTerm(vis, term) {
+ return true
+ }
+ }
+ return false
+ }
+
+ if expr.IsCall() {
+ for _, term := range expr.Operands() {
+ vis.vis.Walk(term)
+ if vis.found {
+ return true
+ }
+ }
+ return false
+ }
+
+ return containsNestedRefOrCallInTerm(vis, expr.Terms.(*ast.Term))
+}
+
+func containsNestedRefOrCallInTerm(vis *nestedCheckVisitor, term *ast.Term) bool {
+ switch v := term.Value.(type) {
+ case ast.Ref:
+ for i := 1; i < len(v); i++ {
+ vis.vis.Walk(v[i])
+ if vis.found {
+ return true
+ }
+ }
+ return false
+ default:
+ vis.vis.Walk(v)
+ if vis.found {
+ return true
+ }
+ return false
+ }
+}
+
+func complementedCartesianProduct(queries []ast.Body, idx int, curr ast.Body, iter func(ast.Body) error) error {
+ if idx == len(queries) {
+ return iter(curr)
+ }
+ for _, expr := range queries[idx] {
+ curr = append(curr, expr.Complement())
+ if err := complementedCartesianProduct(queries, idx+1, curr, iter); err != nil {
+ return err
+ }
+ curr = curr[:len(curr)-1]
+ }
+ return nil
+}
+
+func isInputRef(term *ast.Term) bool {
+ if ref, ok := term.Value.(ast.Ref); ok {
+ if ref.HasPrefix(ast.InputRootRef) {
+ return true
+ }
+ }
+ return false
+}
+
+func isDataRef(term *ast.Term) bool {
+ if ref, ok := term.Value.(ast.Ref); ok {
+ if ref.HasPrefix(ast.DefaultRootRef) {
+ return true
+ }
+ }
+ return false
+}
+
+func merge(a, b ast.Value) (ast.Value, bool) {
+ aObj, ok1 := a.(ast.Object)
+ bObj, ok2 := b.(ast.Object)
+
+ if ok1 && ok2 {
+ return mergeObjects(aObj, bObj)
+ }
+
+ // nothing to merge, a wins
+ return a, true
+}
+
+// mergeObjects returns a new Object containing the non-overlapping keys of
+// the objA and objB. If there are overlapping keys between objA and objB,
+// the values of associated with the keys are merged. Only
+// objects can be merged with other objects. If the values cannot be merged,
+// objB value will be overwritten by objA value.
+func mergeObjects(objA, objB ast.Object) (result ast.Object, ok bool) {
+ result = ast.NewObject()
+ stop := objA.Until(func(k, v *ast.Term) bool {
+ if v2 := objB.Get(k); v2 == nil {
+ result.Insert(k, v)
+ } else {
+ obj1, ok1 := v.Value.(ast.Object)
+ obj2, ok2 := v2.Value.(ast.Object)
+
+ if !ok1 || !ok2 {
+ result.Insert(k, v)
+ return false
+ }
+ obj3, ok := mergeObjects(obj1, obj2)
+ if !ok {
+ return true
+ }
+ result.Insert(k, ast.NewTerm(obj3))
+ }
+ return false
+ })
+ if stop {
+ return nil, false
+ }
+ objB.Foreach(func(k, v *ast.Term) {
+ if v2 := objA.Get(k); v2 == nil {
+ result.Insert(k, v)
+ }
+ })
+ return result, true
+}
+
+func refContainsNonScalar(ref ast.Ref) bool {
+ for _, term := range ref[1:] {
+ if !ast.IsScalar(term.Value) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/glob.go b/vendor/github.com/open-policy-agent/opa/topdown/glob.go
new file mode 100644
index 00000000..98052a0c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/glob.go
@@ -0,0 +1,65 @@
+package topdown
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/gobwas/glob"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+var globCacheLock = sync.Mutex{}
+var globCache map[string]glob.Glob
+
+func builtinGlobMatch(a, b, c ast.Value) (ast.Value, error) {
+ pattern, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ delimiters, err := builtins.RuneSliceOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(delimiters) == 0 {
+ delimiters = []rune{'.'}
+ }
+
+ match, err := builtins.StringOperand(c, 3)
+ if err != nil {
+ return nil, err
+ }
+
+ id := fmt.Sprintf("%s-%v", pattern, delimiters)
+
+ globCacheLock.Lock()
+ defer globCacheLock.Unlock()
+ p, ok := globCache[id]
+ if !ok {
+ var err error
+ if p, err = glob.Compile(string(pattern), delimiters...); err != nil {
+ return nil, err
+ }
+ globCache[id] = p
+ }
+
+ return ast.Boolean(p.Match(string(match))), nil
+}
+
+func builtinGlobQuoteMeta(a ast.Value) (ast.Value, error) {
+ pattern, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(glob.QuoteMeta(string(pattern))), nil
+}
+
+func init() {
+ globCache = map[string]glob.Glob{}
+ RegisterFunctionalBuiltin3(ast.GlobMatch.Name, builtinGlobMatch)
+ RegisterFunctionalBuiltin1(ast.GlobQuoteMeta.Name, builtinGlobQuoteMeta)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/http.go b/vendor/github.com/open-policy-agent/opa/topdown/http.go
new file mode 100644
index 00000000..841fe37f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/http.go
@@ -0,0 +1,1136 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/internal/version"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/util"
+)
+
+const defaultHTTPRequestTimeoutEnv = "HTTP_SEND_TIMEOUT"
+
+var defaultHTTPRequestTimeout = time.Second * 5
+
+var allowedKeyNames = [...]string{
+ "method",
+ "url",
+ "body",
+ "enable_redirect",
+ "force_json_decode",
+ "headers",
+ "raw_body",
+ "tls_use_system_certs",
+ "tls_ca_cert",
+ "tls_ca_cert_file",
+ "tls_ca_cert_env_variable",
+ "tls_client_cert",
+ "tls_client_cert_file",
+ "tls_client_cert_env_variable",
+ "tls_client_key",
+ "tls_client_key_file",
+ "tls_client_key_env_variable",
+ "tls_insecure_skip_verify",
+ "tls_server_name",
+ "timeout",
+ "cache",
+ "force_cache",
+ "force_cache_duration_seconds",
+ "raise_error",
+}
+
+var (
+ allowedKeys = ast.NewSet()
+ requiredKeys = ast.NewSet(ast.StringTerm("method"), ast.StringTerm("url"))
+ httpSendLatencyMetricKey = "rego_builtin_" + strings.ReplaceAll(ast.HTTPSend.Name, ".", "_")
+)
+
+type httpSendKey string
+
+const (
+ // httpSendBuiltinCacheKey is the key in the builtin context cache that
+ // points to the http.send() specific cache resides at.
+ httpSendBuiltinCacheKey httpSendKey = "HTTP_SEND_CACHE_KEY"
+
+ // HTTPSendInternalErr represents a runtime evaluation error.
+ HTTPSendInternalErr string = "eval_http_send_internal_error"
+
+ // HTTPSendNetworkErr represents a network error.
+ HTTPSendNetworkErr string = "eval_http_send_network_error"
+)
+
+func builtinHTTPSend(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ req, err := validateHTTPRequestOperand(args[0], 1)
+ if err != nil {
+ return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
+ }
+
+ raiseError, err := getRaiseErrorValue(req)
+ if err != nil {
+ return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
+ }
+
+ result, err := getHTTPResponse(bctx, req)
+ if err != nil {
+ if raiseError {
+ return handleHTTPSendErr(bctx, err)
+ }
+
+ obj := ast.NewObject()
+ obj.Insert(ast.StringTerm("status_code"), ast.IntNumberTerm(0))
+
+ errObj := ast.NewObject()
+
+ switch err.(type) {
+ case *url.Error:
+ errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendNetworkErr))
+ default:
+ errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendInternalErr))
+ }
+
+ errObj.Insert(ast.StringTerm("message"), ast.StringTerm(err.Error()))
+ obj.Insert(ast.StringTerm("error"), ast.NewTerm(errObj))
+
+ result = ast.NewTerm(obj)
+ }
+ return iter(result)
+}
+
+func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) {
+
+ bctx.Metrics.Timer(httpSendLatencyMetricKey).Start()
+
+ reqExecutor, err := newHTTPRequestExecutor(bctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if cache already has a response for this query
+ resp, err := reqExecutor.CheckCache()
+ if err != nil {
+ return nil, err
+ }
+
+ if resp == nil {
+ httpResp, err := reqExecutor.ExecuteHTTPRequest()
+ if err != nil {
+ return nil, err
+ }
+ defer util.Close(httpResp)
+ // add result to cache
+ resp, err = reqExecutor.InsertIntoCache(httpResp)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ bctx.Metrics.Timer(httpSendLatencyMetricKey).Stop()
+
+ return ast.NewTerm(resp), nil
+}
+
+func init() {
+ createAllowedKeys()
+ initDefaults()
+ RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend)
+}
+
+func handleHTTPSendErr(bctx BuiltinContext, err error) error {
+ // Return HTTP client timeout errors in a generic error message to avoid confusion about what happened.
+ // Do not do this if the builtin context was cancelled and is what caused the request to stop.
+ if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() && bctx.Context.Err() == nil {
+ err = fmt.Errorf("%s %s: request timed out", urlErr.Op, urlErr.URL)
+ }
+ return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err)
+}
+
+func initDefaults() {
+ timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv)
+ if timeoutDuration != "" {
+ var err error
+ defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration)
+ if err != nil {
+ // If it is set to something not valid don't let the process continue in a state
+ // that will almost definitely give unexpected results by having it set at 0
+ // which means no timeout..
+ // This environment variable isn't considered part of the public API.
+ // TODO(patrick-east): Remove the environment variable
+ panic(fmt.Sprintf("invalid value for HTTP_SEND_TIMEOUT: %s", err))
+ }
+ }
+}
+
+func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) {
+
+ obj, err := builtins.ObjectOperand(term.Value, pos)
+ if err != nil {
+ return nil, err
+ }
+
+ requestKeys := ast.NewSet(obj.Keys()...)
+
+ invalidKeys := requestKeys.Diff(allowedKeys)
+ if invalidKeys.Len() != 0 {
+ return nil, builtins.NewOperandErr(pos, "invalid request parameters(s): %v", invalidKeys)
+ }
+
+ missingKeys := requiredKeys.Diff(requestKeys)
+ if missingKeys.Len() != 0 {
+ return nil, builtins.NewOperandErr(pos, "missing required request parameters(s): %v", missingKeys)
+ }
+
+ return obj, nil
+
+}
+
+// canonicalizeHeaders returns a copy of the headers where the keys are in
+// canonical HTTP form.
+func canonicalizeHeaders(headers map[string]interface{}) map[string]interface{} {
+ canonicalized := map[string]interface{}{}
+
+ for k, v := range headers {
+ canonicalized[http.CanonicalHeaderKey(k)] = v
+ }
+
+ return canonicalized
+}
+
+func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) {
+ var url string
+ var method string
+
+ // Additional CA certificates loading options.
+ var tlsCaCert []byte
+ var tlsCaCertEnvVar string
+ var tlsCaCertFile string
+
+ // Client TLS certificate and key options. Each input source
+ // comes in a matched pair.
+ var tlsClientCert []byte
+ var tlsClientKey []byte
+
+ var tlsClientCertEnvVar string
+ var tlsClientKeyEnvVar string
+
+ var tlsClientCertFile string
+ var tlsClientKeyFile string
+
+ var tlsServerName string
+ var body *bytes.Buffer
+ var rawBody *bytes.Buffer
+ var enableRedirect bool
+ var tlsUseSystemCerts bool
+ var tlsConfig tls.Config
+ var customHeaders map[string]interface{}
+ var tlsInsecureSkipVerify bool
+ var timeout = defaultHTTPRequestTimeout
+
+ for _, val := range obj.Keys() {
+ key, err := ast.JSON(val.Value)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ key = key.(string)
+
+ var strVal string
+
+ if s, ok := obj.Get(val).Value.(ast.String); ok {
+ strVal = string(s)
+ } else {
+ // Most parameters are strings, so consolidate the type checking.
+ switch key {
+ case "method",
+ "url",
+ "raw_body",
+ "tls_ca_cert",
+ "tls_ca_cert_file",
+ "tls_ca_cert_env_variable",
+ "tls_client_cert",
+ "tls_client_cert_file",
+ "tls_client_cert_env_variable",
+ "tls_client_key",
+ "tls_client_key_file",
+ "tls_client_key_env_variable",
+ "tls_server_name":
+ return nil, nil, fmt.Errorf("%q must be a string", key)
+ }
+ }
+
+ switch key {
+ case "method":
+ method = strings.ToUpper(strings.Trim(strVal, "\""))
+ case "url":
+ url = strings.Trim(strVal, "\"")
+ case "enable_redirect":
+ enableRedirect, err = strconv.ParseBool(obj.Get(val).String())
+ if err != nil {
+ return nil, nil, err
+ }
+ case "body":
+ bodyVal := obj.Get(val).Value
+ bodyValInterface, err := ast.JSON(bodyVal)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bodyValBytes, err := json.Marshal(bodyValInterface)
+ if err != nil {
+ return nil, nil, err
+ }
+ body = bytes.NewBuffer(bodyValBytes)
+ case "raw_body":
+ rawBody = bytes.NewBuffer([]byte(strVal))
+ case "tls_use_system_certs":
+ tlsUseSystemCerts, err = strconv.ParseBool(obj.Get(val).String())
+ if err != nil {
+ return nil, nil, err
+ }
+ case "tls_ca_cert":
+ tlsCaCert = bytes.Trim([]byte(strVal), "\"")
+ case "tls_ca_cert_file":
+ tlsCaCertFile = strings.Trim(strVal, "\"")
+ case "tls_ca_cert_env_variable":
+ tlsCaCertEnvVar = strings.Trim(strVal, "\"")
+ case "tls_client_cert":
+ tlsClientCert = bytes.Trim([]byte(strVal), "\"")
+ case "tls_client_cert_file":
+ tlsClientCertFile = strings.Trim(strVal, "\"")
+ case "tls_client_cert_env_variable":
+ tlsClientCertEnvVar = strings.Trim(strVal, "\"")
+ case "tls_client_key":
+ tlsClientKey = bytes.Trim([]byte(strVal), "\"")
+ case "tls_client_key_file":
+ tlsClientKeyFile = strings.Trim(strVal, "\"")
+ case "tls_client_key_env_variable":
+ tlsClientKeyEnvVar = strings.Trim(strVal, "\"")
+ case "tls_server_name":
+ tlsServerName = strings.Trim(strVal, "\"")
+ case "headers":
+ headersVal := obj.Get(val).Value
+ headersValInterface, err := ast.JSON(headersVal)
+ if err != nil {
+ return nil, nil, err
+ }
+ var ok bool
+ customHeaders, ok = headersValInterface.(map[string]interface{})
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid type for headers key")
+ }
+ case "tls_insecure_skip_verify":
+ tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String())
+ if err != nil {
+ return nil, nil, err
+ }
+ case "timeout":
+ timeout, err = parseTimeout(obj.Get(val).Value)
+ if err != nil {
+ return nil, nil, err
+ }
+ case "cache", "force_cache", "force_cache_duration_seconds", "force_json_decode", "raise_error": // no-op
+ default:
+ return nil, nil, fmt.Errorf("invalid parameter %q", key)
+ }
+ }
+
+ isTLS := false
+ client := &http.Client{
+ Timeout: timeout,
+ }
+
+ if tlsInsecureSkipVerify {
+ isTLS = true
+ tlsConfig.InsecureSkipVerify = tlsInsecureSkipVerify
+ }
+
+ if len(tlsClientCert) > 0 && len(tlsClientKey) > 0 {
+ cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+ }
+
+ if tlsClientCertFile != "" && tlsClientKeyFile != "" {
+ cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+ }
+
+ if tlsClientCertEnvVar != "" && tlsClientKeyEnvVar != "" {
+ cert, err := tls.X509KeyPair(
+ []byte(os.Getenv(tlsClientCertEnvVar)),
+ []byte(os.Getenv(tlsClientKeyEnvVar)))
+ if err != nil {
+ return nil, nil, fmt.Errorf("cannot extract public/private key pair from envvars %q, %q: %w",
+ tlsClientCertEnvVar, tlsClientKeyEnvVar, err)
+ }
+
+ isTLS = true
+ tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+ }
+
+ // Check the system certificates config first so that we
+ // load additional certificated into the correct pool.
+ if tlsUseSystemCerts {
+ pool, err := x509.SystemCertPool()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ if len(tlsCaCert) != 0 {
+ tlsCaCert = bytes.Replace(tlsCaCert, []byte("\\n"), []byte("\n"), -1)
+ pool, err := addCACertsFromBytes(tlsConfig.RootCAs, []byte(tlsCaCert))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ if tlsCaCertFile != "" {
+ pool, err := addCACertsFromFile(tlsConfig.RootCAs, tlsCaCertFile)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ if tlsCaCertEnvVar != "" {
+ pool, err := addCACertsFromEnv(tlsConfig.RootCAs, tlsCaCertEnvVar)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isTLS = true
+ tlsConfig.RootCAs = pool
+ }
+
+ if isTLS {
+ client.Transport = &http.Transport{
+ TLSClientConfig: &tlsConfig,
+ }
+ }
+
+ // check if redirects are enabled
+ if !enableRedirect {
+ client.CheckRedirect = func(*http.Request, []*http.Request) error {
+ return http.ErrUseLastResponse
+ }
+ }
+
+ if rawBody != nil {
+ body = rawBody
+ } else if body == nil {
+ body = bytes.NewBufferString("")
+ }
+
+ // create the http request, use the builtin context's context to ensure
+ // the request is cancelled if evaluation is cancelled.
+ req, err := http.NewRequest(method, url, body)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req = req.WithContext(bctx.Context)
+
+ // Add custom headers
+ if len(customHeaders) != 0 {
+ customHeaders = canonicalizeHeaders(customHeaders)
+
+ for k, v := range customHeaders {
+ header, ok := v.(string)
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid type for headers value %q", v)
+ }
+
+ req.Header.Add(k, header)
+ }
+
+ // Don't overwrite or append to one that was set in the custom headers
+ if _, hasUA := customHeaders["User-Agent"]; !hasUA {
+ req.Header.Add("User-Agent", version.UserAgent)
+ }
+
+ // If the caller specifies the Host header, use it for the HTTP
+ // request host and the TLS server name.
+ if host, hasHost := customHeaders["Host"]; hasHost {
+ host := host.(string) // We already checked that it's a string.
+ req.Host = host
+
+ // Only default the ServerName if the caller has
+ // specified the host. If we don't specify anything,
+ // Go will default to the target hostname. This name
+ // is not the same as the default that Go populates
+ // `req.Host` with, which is why we don't just set
+ // this unconditionally.
+ tlsConfig.ServerName = host
+ }
+ }
+
+ if tlsServerName != "" {
+ tlsConfig.ServerName = tlsServerName
+ }
+
+ return req, client, nil
+}
+
+func executeHTTPRequest(req *http.Request, client *http.Client) (*http.Response, error) {
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func isContentTypeJSON(header http.Header) bool {
+ return strings.Contains(header.Get("Content-Type"), "application/json")
+}
+
+// In the BuiltinContext cache we only store a single entry that points to
+// our ValueMap which is the "real" http.send() cache.
+func getHTTPSendCache(bctx BuiltinContext) *ast.ValueMap {
+ raw, ok := bctx.Cache.Get(httpSendBuiltinCacheKey)
+ if !ok {
+ // Initialize if it isn't there
+ cache := ast.NewValueMap()
+ bctx.Cache.Put(httpSendBuiltinCacheKey, cache)
+ return cache
+ }
+
+ cache, ok := raw.(*ast.ValueMap)
+ if !ok {
+ return nil
+ }
+ return cache
+}
+
+// checkHTTPSendCache checks for the given key's value in the cache
+func checkHTTPSendCache(bctx BuiltinContext, key ast.Object) ast.Value {
+ requestCache := getHTTPSendCache(bctx)
+ if requestCache == nil {
+ return nil
+ }
+
+ return requestCache.Get(key)
+}
+
+func insertIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, value ast.Value) {
+ requestCache := getHTTPSendCache(bctx)
+ if requestCache == nil {
+ // Should never happen.. if it does just skip caching the value
+ return
+ }
+ requestCache.Put(key, value)
+}
+
+// checkHTTPSendInterQueryCache checks for the given key's value in the inter-query cache
+func checkHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Object, req *http.Request, client *http.Client, forceJSONDecode bool, cacheParams *forceCacheParams) (ast.Value, error) {
+ requestCache := bctx.InterQueryBuiltinCache
+
+ value, found := requestCache.Get(key)
+ if !found {
+ return nil, nil
+ }
+
+ cachedResp, ok := value.(*interQueryCacheValue)
+ if !ok {
+ return nil, nil
+ }
+
+ // check the freshness of the cached response
+ if isCachedResponseFresh(bctx, cachedResp, cacheParams) {
+ return cachedResp.value, nil
+ }
+
+ // check with the server if the stale response is still up-to-date.
+ // If server returns a new response (ie. status_code=200), update the cache with the new response
+ // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response
+ result, modified, err := revalidateCachedResponse(req, client, cachedResp)
+ requestCache.Delete(key)
+ if err != nil || result == nil {
+ return nil, err
+ }
+
+ defer result.Body.Close()
+
+ var newValue ast.Value
+ var size int
+
+ if !modified {
+ // update the headers in the cached response with their corresponding values from the 304 (Not Modified) response
+ cachedRespObj := cachedResp.value.(ast.Object)
+ existingHeaders := cachedRespObj.Get(ast.StringTerm("headers"))
+ existingHeadersObj := existingHeaders.Value.(ast.Object)
+
+ newHeaders := getResponseHeaders(result.Header)
+ for k, v := range newHeaders {
+ valueAST, err := ast.InterfaceToValue(v)
+ if err != nil {
+ return nil, err
+ }
+ existingHeadersObj.Insert(ast.StringTerm(k), ast.NewTerm(valueAST))
+ }
+
+ cachedRespObj.Insert(ast.StringTerm("headers"), ast.NewTerm(existingHeadersObj))
+ newValue = cachedRespObj
+ size = cachedResp.size
+ } else {
+ newValue, size, err = formatHTTPResponseToAST(result, forceJSONDecode)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ err = insertIntoHTTPSendInterQueryCache(bctx, key, newValue, result, size, cacheParams != nil)
+ if err != nil {
+ return nil, err
+ }
+ return newValue, nil
+}
+
+// insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache
+func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key, value ast.Value, resp *http.Response, size int, force bool) error {
+ if resp == nil || (!force && !canStore(resp.Header)) {
+ return nil
+ }
+
+ requestCache := bctx.InterQueryBuiltinCache
+
+ pcv, err := newInterQueryCacheValue(resp, value, size)
+ if err != nil {
+ return err
+ }
+
+ requestCache.Insert(key, pcv)
+ return nil
+}
+
+func createAllowedKeys() {
+ for _, element := range allowedKeyNames {
+ allowedKeys.Add(ast.StringTerm(element))
+ }
+}
+
+func parseTimeout(timeoutVal ast.Value) (time.Duration, error) {
+ var timeout time.Duration
+ switch t := timeoutVal.(type) {
+ case ast.Number:
+ timeoutInt, ok := t.Int64()
+ if !ok {
+ return timeout, fmt.Errorf("invalid timeout number value %v, must be int64", timeoutVal)
+ }
+ return time.Duration(timeoutInt), nil
+ case ast.String:
+ // Support strings without a unit, treat them the same as just a number value (ns)
+ var err error
+ timeoutInt, err := strconv.ParseInt(string(t), 10, 64)
+ if err == nil {
+ return time.Duration(timeoutInt), nil
+ }
+
+ // Try parsing it as a duration (requires a supported units suffix)
+ timeout, err = time.ParseDuration(string(t))
+ if err != nil {
+ return timeout, fmt.Errorf("invalid timeout value %v: %s", timeoutVal, err)
+ }
+ return timeout, nil
+ default:
+ return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.TypeName(t))
+ }
+}
+
+func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) {
+ var b ast.Boolean
+ var ok bool
+ if v := req.Get(key); v != nil {
+ if b, ok = v.Value.(ast.Boolean); !ok {
+ return false, fmt.Errorf("invalid value for %v field", key.String())
+ }
+ }
+ return bool(b), nil
+}
+
+type interQueryCacheValue struct {
+ value ast.Value // http response
+ size int // size of response body
+ date time.Time // origination date and time of response
+ cacheControl map[string]string // response cache-control header
+ maxAge deltaSeconds // max-age cache control directive
+ expires time.Time // date/time after which the response is considered stale
+ etag string // identifier for a specific version of the response
+ lastModified string // date and time response was last modified as per origin server
+}
+
+// deltaSeconds specifies a non-negative integer, representing
+// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1
+type deltaSeconds int32
+
+func newInterQueryCacheValue(resp *http.Response, value ast.Value, size int) (*interQueryCacheValue, error) {
+ cv := interQueryCacheValue{value: value, size: size, maxAge: -1}
+
+ err := parseResponseAndInjectHeaders(resp, &cv)
+ if err != nil {
+ return nil, err
+ }
+ return &cv, nil
+}
+
+func (c interQueryCacheValue) SizeInBytes() int64 {
+ return int64(c.size)
+}
+
+func parseResponseAndInjectHeaders(resp *http.Response, pcv *interQueryCacheValue) error {
+ var err error
+
+ pcv.date, err = getResponseHeaderDate(resp.Header)
+ if err != nil {
+ return err
+ }
+
+ pcv.cacheControl = parseCacheControlHeader(resp.Header)
+ pcv.maxAge, err = parseMaxAgeCacheDirective(pcv.cacheControl)
+ if err != nil {
+ return err
+ }
+
+ pcv.expires, err = getResponseHeaderExpires(resp.Header)
+ if err != nil {
+ return err
+ }
+
+ pcv.etag = resp.Header.Get("etag")
+
+ pcv.lastModified = resp.Header.Get("last-modified")
+
+ return nil
+}
+
+func revalidateCachedResponse(req *http.Request, client *http.Client, resp *interQueryCacheValue) (*http.Response, bool, error) {
+ etag := resp.etag
+ lastModified := resp.lastModified
+
+ if etag == "" && lastModified == "" {
+ return nil, false, nil
+ }
+
+ cloneReq := req.Clone(req.Context())
+
+ if etag != "" {
+ cloneReq.Header.Set("if-none-match", etag)
+ }
+
+ if lastModified != "" {
+ cloneReq.Header.Set("if-modified-since", lastModified)
+ }
+
+ response, err := client.Do(cloneReq)
+ if err != nil {
+ return nil, false, err
+ }
+
+ switch response.StatusCode {
+ case http.StatusOK:
+ return response, true, nil
+
+ case http.StatusNotModified:
+ return response, false, nil
+ }
+ util.Close(response)
+ return nil, false, nil
+}
+
+func canStore(headers http.Header) bool {
+ ccHeaders := parseCacheControlHeader(headers)
+
+ // Check "no-store" cache directive
+ // The "no-store" response directive indicates that a cache MUST NOT
+ // store any part of either the immediate request or response.
+ if _, ok := ccHeaders["no-store"]; ok {
+ return false
+ }
+ return true
+}
+
+func isCachedResponseFresh(bctx BuiltinContext, resp *interQueryCacheValue, cacheParams *forceCacheParams) bool {
+ if resp.date.IsZero() {
+ return false
+ }
+
+ currentTime := getCurrentTime(bctx)
+ if currentTime.IsZero() {
+ return false
+ }
+
+ currentAge := currentTime.Sub(resp.date)
+
+ // The time.Sub operation uses wall clock readings and
+ // not monotonic clock readings as the parsed version of the response time
+ // does not contain monotonic clock readings. This can result in negative durations.
+ // Another scenario where a negative duration can occur, is when a server sets the Date
+ // response header. As per https://tools.ietf.org/html/rfc7231#section-7.1.1.2,
+ // an origin server MUST NOT send a Date header field if it does not
+ // have a clock capable of providing a reasonable approximation of the
+ // current instance in Coordinated Universal Time.
+ // Hence, consider the cached response as stale if a negative duration is encountered.
+ if currentAge < 0 {
+ return false
+ }
+
+ if cacheParams != nil {
+ // override the cache directives set by the server
+ maxAgeDur := time.Second * time.Duration(cacheParams.forceCacheDurationSeconds)
+ if maxAgeDur > currentAge {
+ return true
+ }
+ } else {
+ // Check "max-age" cache directive.
+ // The "max-age" response directive indicates that the response is to be
+ // considered stale after its age is greater than the specified number
+ // of seconds.
+ if resp.maxAge != -1 {
+ maxAgeDur := time.Second * time.Duration(resp.maxAge)
+ if maxAgeDur > currentAge {
+ return true
+ }
+ } else {
+ // Check "Expires" header.
+ // Note: "max-age" if set, takes precedence over "Expires"
+ if resp.expires.Sub(resp.date) > currentAge {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func getCurrentTime(bctx BuiltinContext) time.Time {
+ var current time.Time
+
+ value, err := ast.JSON(bctx.Time.Value)
+ if err != nil {
+ return current
+ }
+
+ valueNum, ok := value.(json.Number)
+ if !ok {
+ return current
+ }
+
+ valueNumInt, err := valueNum.Int64()
+ if err != nil {
+ return current
+ }
+
+ current = time.Unix(0, valueNumInt).UTC()
+ return current
+}
+
+func parseCacheControlHeader(headers http.Header) map[string]string {
+ ccDirectives := map[string]string{}
+ ccHeader := headers.Get("cache-control")
+
+ for _, part := range strings.Split(ccHeader, ",") {
+ part = strings.Trim(part, " ")
+ if part == "" {
+ continue
+ }
+ if strings.ContainsRune(part, '=') {
+ items := strings.Split(part, "=")
+ if len(items) != 2 {
+ continue
+ }
+ ccDirectives[strings.Trim(items[0], " ")] = strings.Trim(items[1], ",")
+ } else {
+ ccDirectives[part] = ""
+ }
+ }
+
+ return ccDirectives
+}
+
+func getResponseHeaderDate(headers http.Header) (date time.Time, err error) {
+ dateHeader := headers.Get("date")
+ if dateHeader == "" {
+ err = fmt.Errorf("no date header")
+ return
+ }
+ return time.Parse(time.RFC1123, dateHeader)
+}
+
+func getResponseHeaderExpires(headers http.Header) (date time.Time, err error) {
+ expiresHeader := headers.Get("expires")
+ if expiresHeader == "" {
+ return
+ }
+ return time.Parse(time.RFC1123, expiresHeader)
+}
+
+// parseMaxAgeCacheDirective parses the max-age directive expressed in delta-seconds as per
+// https://tools.ietf.org/html/rfc7234#section-1.2.1
+func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) {
+ maxAge, ok := cc["max-age"]
+ if !ok {
+ return deltaSeconds(-1), nil
+ }
+
+ val, err := strconv.ParseUint(maxAge, 10, 32)
+ if err != nil {
+ if numError, ok := err.(*strconv.NumError); ok {
+ if numError.Err == strconv.ErrRange {
+ return deltaSeconds(math.MaxInt32), nil
+ }
+ }
+ return deltaSeconds(-1), err
+ }
+
+ if val > math.MaxInt32 {
+ return deltaSeconds(math.MaxInt32), nil
+ }
+ return deltaSeconds(val), nil
+}
+
+func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode bool) (ast.Value, int, error) {
+
+ var resultBody interface{}
+ var resultRawBody []byte
+
+ var buf bytes.Buffer
+ tee := io.TeeReader(resp.Body, &buf)
+ resultRawBody, err := ioutil.ReadAll(tee)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // If the response body cannot be JSON decoded,
+ // an error will not be returned. Instead the "body" field
+ // in the result will be null.
+ if isContentTypeJSON(resp.Header) || forceJSONDecode {
+ json.NewDecoder(&buf).Decode(&resultBody)
+ }
+
+ result := make(map[string]interface{})
+ result["status"] = resp.Status
+ result["status_code"] = resp.StatusCode
+ result["body"] = resultBody
+ result["raw_body"] = string(resultRawBody)
+ result["headers"] = getResponseHeaders(resp.Header)
+
+ resultObj, err := ast.InterfaceToValue(result)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return resultObj, len(resultRawBody), nil
+}
+
+func getResponseHeaders(headers http.Header) map[string]interface{} {
+ respHeaders := map[string]interface{}{}
+ for headerName, values := range headers {
+ var respValues []interface{}
+ for _, v := range values {
+ respValues = append(respValues, v)
+ }
+ respHeaders[strings.ToLower(headerName)] = respValues
+ }
+ return respHeaders
+}
+
+// httpRequestExecutor defines an interface for the http send cache
+type httpRequestExecutor interface {
+ CheckCache() (ast.Value, error)
+ InsertIntoCache(value *http.Response) (ast.Value, error)
+ ExecuteHTTPRequest() (*http.Response, error)
+}
+
+// newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or
+// intra-query cache implementation
+func newHTTPRequestExecutor(bctx BuiltinContext, key ast.Object) (httpRequestExecutor, error) {
+ useInterQueryCache, forceCacheParams, err := useInterQueryCache(key)
+ if err != nil {
+ return nil, handleHTTPSendErr(bctx, err)
+ }
+
+ if useInterQueryCache && bctx.InterQueryBuiltinCache != nil {
+ return newInterQueryCache(bctx, key, forceCacheParams)
+ }
+ return newIntraQueryCache(bctx, key)
+}
+
+type interQueryCache struct {
+ bctx BuiltinContext
+ key ast.Object
+ httpReq *http.Request
+ httpClient *http.Client
+ forceJSONDecode bool
+ forceCacheParams *forceCacheParams
+}
+
+func newInterQueryCache(bctx BuiltinContext, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) {
+ return &interQueryCache{bctx: bctx, key: key, forceCacheParams: forceCacheParams}, nil
+}
+
+// CheckCache checks the cache for the value of the key set on this object
+func (c *interQueryCache) CheckCache() (ast.Value, error) {
+ var err error
+
+ c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ c.forceJSONDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode"))
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ resp, err := checkHTTPSendInterQueryCache(c.bctx, c.key, c.httpReq, c.httpClient, c.forceJSONDecode, c.forceCacheParams)
+
+ // fallback to the http send cache if response not found in the inter-query cache or inter-query cache look-up results
+ // in an error
+ if resp == nil || err != nil {
+ return checkHTTPSendCache(c.bctx, c.key), nil
+ }
+ return resp, err
+}
+
+// InsertIntoCache inserts the key set on this object into the cache with the given value
+func (c *interQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) {
+ result, size, err := formatHTTPResponseToAST(value, c.forceJSONDecode)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ // fallback to the http send cache if error encountered while inserting response in inter-query cache
+ err = insertIntoHTTPSendInterQueryCache(c.bctx, c.key, result, value, size, c.forceCacheParams != nil)
+ if err != nil {
+ insertIntoHTTPSendCache(c.bctx, c.key, result)
+ }
+ return result, nil
+}
+
+// ExecuteHTTPRequest executes a HTTP request
+func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) {
+ return executeHTTPRequest(c.httpReq, c.httpClient)
+}
+
+type intraQueryCache struct {
+ bctx BuiltinContext
+ key ast.Object
+}
+
+func newIntraQueryCache(bctx BuiltinContext, key ast.Object) (*intraQueryCache, error) {
+ return &intraQueryCache{bctx: bctx, key: key}, nil
+}
+
+// CheckCache checks the cache for the value of the key set on this object
+func (c *intraQueryCache) CheckCache() (ast.Value, error) {
+ return checkHTTPSendCache(c.bctx, c.key), nil
+}
+
+// InsertIntoCache inserts the key set on this object into the cache with the given value
+func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) {
+ forceJSONDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode"))
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ result, _, err := formatHTTPResponseToAST(value, forceJSONDecode)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+
+ insertIntoHTTPSendCache(c.bctx, c.key, result)
+ return result, nil
+}
+
+// ExecuteHTTPRequest executes a HTTP request
+func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) {
+ httpReq, httpClient, err := createHTTPRequest(c.bctx, c.key)
+ if err != nil {
+ return nil, handleHTTPSendErr(c.bctx, err)
+ }
+ return executeHTTPRequest(httpReq, httpClient)
+}
+
+func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) {
+ value, err := getBoolValFromReqObj(req, ast.StringTerm("cache"))
+ if err != nil {
+ return false, nil, err
+ }
+
+ valueForceCache, err := getBoolValFromReqObj(req, ast.StringTerm("force_cache"))
+ if err != nil {
+ return false, nil, err
+ }
+
+ if valueForceCache {
+ forceCacheParams, err := newForceCacheParams(req)
+ return true, forceCacheParams, err
+ }
+
+ return value, nil, nil
+}
+
+type forceCacheParams struct {
+ forceCacheDurationSeconds int32
+}
+
+func newForceCacheParams(req ast.Object) (*forceCacheParams, error) {
+ term := req.Get(ast.StringTerm("force_cache_duration_seconds"))
+ if term == nil {
+ return nil, fmt.Errorf("'force_cache' set but 'force_cache_duration_seconds' parameter is missing")
+ }
+
+ forceCacheDurationSeconds := term.String()
+
+ value, err := strconv.ParseInt(forceCacheDurationSeconds, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ return &forceCacheParams{forceCacheDurationSeconds: int32(value)}, nil
+}
+
+func getRaiseErrorValue(req ast.Object) (bool, error) {
+ result := ast.Boolean(true)
+ var ok bool
+ if v := req.Get(ast.StringTerm("raise_error")); v != nil {
+ if result, ok = v.Value.(ast.Boolean); !ok {
+ return false, fmt.Errorf("invalid value for raise_error field")
+ }
+ }
+ return bool(result), nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/input.go b/vendor/github.com/open-policy-agent/opa/topdown/input.go
new file mode 100644
index 00000000..cb70aeb7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/input.go
@@ -0,0 +1,100 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+var errBadPath = fmt.Errorf("bad document path")
+
+func mergeTermWithValues(exist *ast.Term, pairs [][2]*ast.Term) (*ast.Term, error) {
+
+ var result *ast.Term
+ var init bool
+
+ for i, pair := range pairs {
+
+ if err := ast.IsValidImportPath(pair[0].Value); err != nil {
+ return nil, errBadPath
+ }
+
+ target := pair[0].Value.(ast.Ref)
+
+ // Copy the value if subsequent pairs in the slice would modify it.
+ for j := i + 1; j < len(pairs); j++ {
+ other := pairs[j][0].Value.(ast.Ref)
+ if len(other) > len(target) && other.HasPrefix(target) {
+ pair[1] = pair[1].Copy()
+ break
+ }
+ }
+
+ if len(target) == 1 {
+ result = pair[1]
+ init = true
+ } else {
+ if !init {
+ result = exist.Copy()
+ init = true
+ }
+ if result == nil {
+ result = ast.NewTerm(makeTree(target[1:], pair[1]))
+ } else {
+ node := result
+ done := false
+ for i := 1; i < len(target)-1 && !done; i++ {
+ obj, ok := node.Value.(ast.Object)
+ if !ok {
+ result = ast.NewTerm(makeTree(target[i:], pair[1]))
+ done = true
+ continue
+ }
+ if child := obj.Get(target[i]); !isObject(child) {
+ obj.Insert(target[i], ast.NewTerm(makeTree(target[i+1:], pair[1])))
+ done = true
+ } else { // child is object
+ node = child
+ }
+ }
+ if !done {
+ if obj, ok := node.Value.(ast.Object); ok {
+ obj.Insert(target[len(target)-1], pair[1])
+ } else {
+ result = ast.NewTerm(makeTree(target[len(target)-1:], pair[1]))
+ }
+ }
+ }
+ }
+ }
+
+ if !init {
+ result = exist
+ }
+
+ return result, nil
+}
+
+// makeTree returns an object that represents a document where the value v is
+// the leaf and elements in k represent intermediate objects.
+func makeTree(k ast.Ref, v *ast.Term) ast.Object {
+ var obj ast.Object
+ for i := len(k) - 1; i >= 1; i-- {
+ obj = ast.NewObject(ast.Item(k[i], v))
+ v = &ast.Term{Value: obj}
+ }
+ obj = ast.NewObject(ast.Item(k[0], v))
+ return obj
+}
+
+func isObject(x *ast.Term) bool {
+ if x == nil {
+ return false
+ }
+ _, ok := x.Value.(ast.Object)
+ return ok
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go b/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go
new file mode 100644
index 00000000..6eacc338
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go
@@ -0,0 +1,63 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import "github.com/open-policy-agent/opa/metrics"
+
+const (
+ evalOpPlug = "eval_op_plug"
+ evalOpResolve = "eval_op_resolve"
+ evalOpRuleIndex = "eval_op_rule_index"
+ evalOpBuiltinCall = "eval_op_builtin_call"
+ evalOpVirtualCacheHit = "eval_op_virtual_cache_hit"
+ evalOpVirtualCacheMiss = "eval_op_virtual_cache_miss"
+ evalOpBaseCacheHit = "eval_op_base_cache_hit"
+ evalOpBaseCacheMiss = "eval_op_base_cache_miss"
+ evalOpComprehensionCacheSkip = "eval_op_comprehension_cache_skip"
+ evalOpComprehensionCacheBuild = "eval_op_comprehension_cache_build"
+ evalOpComprehensionCacheHit = "eval_op_comprehension_cache_hit"
+ evalOpComprehensionCacheMiss = "eval_op_comprehension_cache_miss"
+ partialOpSaveUnify = "partial_op_save_unify"
+ partialOpSaveSetContains = "partial_op_save_set_contains"
+ partialOpSaveSetContainsRec = "partial_op_save_set_contains_rec"
+ partialOpCopyPropagation = "partial_op_copy_propagation"
+)
+
+// Instrumentation implements helper functions to instrument query evaluation
+// to diagnose performance issues. Instrumentation may be expensive in some
+// cases, so it is disabled by default.
+type Instrumentation struct {
+ m metrics.Metrics
+}
+
+// NewInstrumentation returns a new Instrumentation object. Performance
+// diagnostics recorded on this Instrumentation object will stored in m.
+func NewInstrumentation(m metrics.Metrics) *Instrumentation {
+ return &Instrumentation{
+ m: m,
+ }
+}
+
+func (instr *Instrumentation) startTimer(name string) {
+ if instr == nil {
+ return
+ }
+ instr.m.Timer(name).Start()
+}
+
+func (instr *Instrumentation) stopTimer(name string) {
+ if instr == nil {
+ return
+ }
+ delta := instr.m.Timer(name).Stop()
+ instr.m.Histogram(name).Update(delta)
+}
+
+func (instr *Instrumentation) counterIncr(name string) {
+ if instr == nil {
+ return
+ }
+ instr.m.Counter(name).Incr()
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/json.go b/vendor/github.com/open-policy-agent/opa/topdown/json.go
new file mode 100644
index 00000000..a0241e7d
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/json.go
@@ -0,0 +1,623 @@
+// Copyright 2019 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinJSONRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ // Expect an object and a string or array/set of strings
+ _, err := builtins.ObjectOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ // Build a list of json pointers to remove
+ paths, err := getJSONPaths(operands[1].Value)
+ if err != nil {
+ return err
+ }
+
+ newObj, err := jsonRemove(operands[0], ast.NewTerm(pathsToObject(paths)))
+ if err != nil {
+ return err
+ }
+
+ if newObj == nil {
+ return nil
+ }
+
+ return iter(newObj)
+}
+
+// jsonRemove returns a new term that is the result of walking
+// through a and omitting removing any values that are in b but
+// have ast.Null values (ie leaf nodes for b).
+func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) {
+ if b == nil {
+ // The paths diverged, return a
+ return a, nil
+ }
+
+ var bObj ast.Object
+ switch bValue := b.Value.(type) {
+ case ast.Object:
+ bObj = bValue
+ case ast.Null:
+ // Means we hit a leaf node on "b", dont add the value for a
+ return nil, nil
+ default:
+ // The paths diverged, return a
+ return a, nil
+ }
+
+ switch aValue := a.Value.(type) {
+ case ast.String, ast.Number, ast.Boolean, ast.Null:
+ return a, nil
+ case ast.Object:
+ newObj := ast.NewObject()
+ err := aValue.Iter(func(k *ast.Term, v *ast.Term) error {
+ // recurse and add the diff of sub objects as needed
+ diffValue, err := jsonRemove(v, bObj.Get(k))
+ if err != nil || diffValue == nil {
+ return err
+ }
+ newObj.Insert(k, diffValue)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return ast.NewTerm(newObj), nil
+ case ast.Set:
+ newSet := ast.NewSet()
+ err := aValue.Iter(func(v *ast.Term) error {
+ // recurse and add the diff of sub objects as needed
+ diffValue, err := jsonRemove(v, bObj.Get(v))
+ if err != nil || diffValue == nil {
+ return err
+ }
+ newSet.Add(diffValue)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return ast.NewTerm(newSet), nil
+ case *ast.Array:
+ // When indexes are removed we shift left to close empty spots in the array
+ // as per the JSON patch spec.
+ newArray := ast.NewArray()
+ for i := 0; i < aValue.Len(); i++ {
+ v := aValue.Elem(i)
+ // recurse and add the diff of sub objects as needed
+ // Note: Keys in b will be strings for the index, eg path /a/1/b => {"a": {"1": {"b": null}}}
+ diffValue, err := jsonRemove(v, bObj.Get(ast.StringTerm(strconv.Itoa(i))))
+ if err != nil {
+ return nil, err
+ }
+ if diffValue != nil {
+ newArray = newArray.Append(diffValue)
+ }
+ }
+ return ast.NewTerm(newArray), nil
+ default:
+ return nil, fmt.Errorf("invalid value type %T", a)
+ }
+}
+
+func builtinJSONFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ // Ensure we have the right parameters, expect an object and a string or array/set of strings
+ obj, err := builtins.ObjectOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ // Build a list of filter strings
+ filters, err := getJSONPaths(operands[1].Value)
+ if err != nil {
+ return err
+ }
+
+ // Actually do the filtering
+ filterObj := pathsToObject(filters)
+ r, err := obj.Filter(filterObj)
+ if err != nil {
+ return err
+ }
+
+ return iter(ast.NewTerm(r))
+}
+
+func getJSONPaths(operand ast.Value) ([]ast.Ref, error) {
+ var paths []ast.Ref
+
+ switch v := operand.(type) {
+ case *ast.Array:
+ for i := 0; i < v.Len(); i++ {
+ filter, err := parsePath(v.Elem(i))
+ if err != nil {
+ return nil, err
+ }
+ paths = append(paths, filter)
+ }
+ case ast.Set:
+ err := v.Iter(func(f *ast.Term) error {
+ filter, err := parsePath(f)
+ if err != nil {
+ return err
+ }
+ paths = append(paths, filter)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, builtins.NewOperandTypeErr(2, v, "set", "array")
+ }
+
+ return paths, nil
+}
+
+func parsePath(path *ast.Term) (ast.Ref, error) {
+ // paths can either be a `/` separated json path or
+ // an array or set of values
+ var pathSegments ast.Ref
+ switch p := path.Value.(type) {
+ case ast.String:
+ if p == "" {
+ return ast.Ref{}, nil
+ }
+ parts := strings.Split(strings.TrimLeft(string(p), "/"), "/")
+ for _, part := range parts {
+ part = strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~")
+ pathSegments = append(pathSegments, ast.StringTerm(part))
+ }
+ case *ast.Array:
+ p.Foreach(func(term *ast.Term) {
+ pathSegments = append(pathSegments, term)
+ })
+ default:
+ return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.TypeName(p))
+ }
+
+ return pathSegments, nil
+}
+
+func pathsToObject(paths []ast.Ref) ast.Object {
+
+ root := ast.NewObject()
+
+ for _, path := range paths {
+ node := root
+ var done bool
+
+ for i := 0; i < len(path)-1 && !done; i++ {
+
+ k := path[i]
+ child := node.Get(k)
+
+ if child == nil {
+ obj := ast.NewObject()
+ node.Insert(k, ast.NewTerm(obj))
+ node = obj
+ continue
+ }
+
+ switch v := child.Value.(type) {
+ case ast.Null:
+ done = true
+ case ast.Object:
+ node = v
+ default:
+ panic("unreachable")
+ }
+ }
+
+ if !done {
+ node.Insert(path[len(path)-1], ast.NullTerm())
+ }
+ }
+
+ return root
+}
+
+// toIndex tries to convert path elements (that may be strings) into indices into
+// an array.
+func toIndex(arr *ast.Array, term *ast.Term) (int, error) {
+ i := 0
+ ok := true
+ switch v := term.Value.(type) {
+ case ast.Number:
+ if i, ok = v.Int(); !ok {
+ return 0, fmt.Errorf("Invalid number type for indexing")
+ }
+ case ast.String:
+ if v == "-" {
+ return arr.Len(), nil
+ }
+ num := ast.Number(v)
+ if i, ok = num.Int(); !ok {
+ return 0, fmt.Errorf("Invalid string for indexing")
+ }
+ if v != "0" && strings.HasPrefix(string(v), "0") {
+ return 0, fmt.Errorf("Leading zeros are not allowed in JSON paths")
+ }
+ default:
+ return 0, fmt.Errorf("Invalid type for indexing")
+ }
+
+ return i, nil
+}
+
+// patchWorkerris a worker that modifies a direct child of a term located
+// at the given key. It returns the new term, and optionally a result that
+// is passed back to the caller.
+type patchWorker = func(parent, key *ast.Term) (updated, result *ast.Term)
+
+func jsonPatchTraverse(
+ target *ast.Term,
+ path ast.Ref,
+ worker patchWorker,
+) (*ast.Term, *ast.Term) {
+ if len(path) < 1 {
+ return nil, nil
+ }
+
+ key := path[0]
+ if len(path) == 1 {
+ return worker(target, key)
+ }
+
+ success := false
+ var updated, result *ast.Term
+ switch parent := target.Value.(type) {
+ case ast.Object:
+ obj := ast.NewObject()
+ parent.Foreach(func(k, v *ast.Term) {
+ if k.Equal(key) {
+ if v, result = jsonPatchTraverse(v, path[1:], worker); v != nil {
+ obj.Insert(k, v)
+ success = true
+ }
+ } else {
+ obj.Insert(k, v)
+ }
+ })
+ updated = ast.NewTerm(obj)
+
+ case *ast.Array:
+ idx, err := toIndex(parent, key)
+ if err != nil {
+ return nil, nil
+ }
+ arr := ast.NewArray()
+ for i := 0; i < parent.Len(); i++ {
+ v := parent.Elem(i)
+ if idx == i {
+ if v, result = jsonPatchTraverse(v, path[1:], worker); v != nil {
+ arr = arr.Append(v)
+ success = true
+ }
+ } else {
+ arr = arr.Append(v)
+ }
+ }
+ updated = ast.NewTerm(arr)
+
+ case ast.Set:
+ set := ast.NewSet()
+ parent.Foreach(func(k *ast.Term) {
+ if k.Equal(key) {
+ if k, result = jsonPatchTraverse(k, path[1:], worker); k != nil {
+ set.Add(k)
+ success = true
+ }
+ } else {
+ set.Add(k)
+ }
+ })
+ updated = ast.NewTerm(set)
+ }
+
+ if success {
+ return updated, result
+ }
+
+ return nil, nil
+}
+
+// jsonPatchGet goes one step further than jsonPatchTraverse and returns the
+// term at the location specified by the path. It is used in functions
+// where we want to read a value but not manipulate its parent: for example
+// jsonPatchTest and jsonPatchCopy.
+//
+// Because it uses jsonPatchTraverse, it makes shallow copies of the objects
+// along the path. We could possibly add a signaling mechanism that we didn't
+// make any changes to avoid this.
+func jsonPatchGet(target *ast.Term, path ast.Ref) *ast.Term {
+ // Special case: get entire document.
+ if len(path) == 0 {
+ return target
+ }
+
+ _, result := jsonPatchTraverse(target, path, func(parent, key *ast.Term) (*ast.Term, *ast.Term) {
+ switch v := parent.Value.(type) {
+ case ast.Object:
+ return parent, v.Get(key)
+ case *ast.Array:
+ i, err := toIndex(v, key)
+ if err == nil {
+ return parent, v.Elem(i)
+ }
+ case ast.Set:
+ if v.Contains(key) {
+ return parent, key
+ }
+ }
+ return nil, nil
+ })
+ return result
+}
+
+func jsonPatchAdd(target *ast.Term, path ast.Ref, value *ast.Term) *ast.Term {
+ // Special case: replacing root document.
+ if len(path) == 0 {
+ return value
+ }
+
+ target, _ = jsonPatchTraverse(target, path, func(parent *ast.Term, key *ast.Term) (*ast.Term, *ast.Term) {
+ switch original := parent.Value.(type) {
+ case ast.Object:
+ obj := ast.NewObject()
+ original.Foreach(func(k, v *ast.Term) {
+ obj.Insert(k, v)
+ })
+ obj.Insert(key, value)
+ return ast.NewTerm(obj), nil
+ case *ast.Array:
+ idx, err := toIndex(original, key)
+ if err != nil || idx < 0 || idx > original.Len() {
+ return nil, nil
+ }
+ arr := ast.NewArray()
+ for i := 0; i < idx; i++ {
+ arr = arr.Append(original.Elem(i))
+ }
+ arr = arr.Append(value)
+ for i := idx; i < original.Len(); i++ {
+ arr = arr.Append(original.Elem(i))
+ }
+ return ast.NewTerm(arr), nil
+ case ast.Set:
+ if !key.Equal(value) {
+ return nil, nil
+ }
+ set := ast.NewSet()
+ original.Foreach(func(k *ast.Term) {
+ set.Add(k)
+ })
+ set.Add(key)
+ return ast.NewTerm(set), nil
+ }
+ return nil, nil
+ })
+
+ return target
+}
+
+func jsonPatchRemove(target *ast.Term, path ast.Ref) (*ast.Term, *ast.Term) {
+ // Special case: replacing root document.
+ if len(path) == 0 {
+ return nil, nil
+ }
+
+ target, removed := jsonPatchTraverse(target, path, func(parent *ast.Term, key *ast.Term) (*ast.Term, *ast.Term) {
+ var removed *ast.Term
+ switch original := parent.Value.(type) {
+ case ast.Object:
+ obj := ast.NewObject()
+ original.Foreach(func(k, v *ast.Term) {
+ if k.Equal(key) {
+ removed = v
+ } else {
+ obj.Insert(k, v)
+ }
+ })
+ return ast.NewTerm(obj), removed
+ case *ast.Array:
+ idx, err := toIndex(original, key)
+ if err != nil || idx < 0 || idx >= original.Len() {
+ return nil, nil
+ }
+ arr := ast.NewArray()
+ for i := 0; i < idx; i++ {
+ arr = arr.Append(original.Elem(i))
+ }
+ removed = original.Elem(idx)
+ for i := idx + 1; i < original.Len(); i++ {
+ arr = arr.Append(original.Elem(i))
+ }
+ return ast.NewTerm(arr), removed
+ case ast.Set:
+ set := ast.NewSet()
+ original.Foreach(func(k *ast.Term) {
+ if k.Equal(key) {
+ removed = k
+ } else {
+ set.Add(k)
+ }
+ })
+ return ast.NewTerm(set), removed
+ }
+ return nil, nil
+ })
+
+ if target != nil && removed != nil {
+ return target, removed
+ }
+
+ return nil, nil
+}
+
+func jsonPatchReplace(target *ast.Term, path ast.Ref, value *ast.Term) *ast.Term {
+ // Special case: replacing the whole document.
+ if len(path) == 0 {
+ return value
+ }
+
+ // Replace is specified as `remove` followed by `add`.
+ if target, _ = jsonPatchRemove(target, path); target == nil {
+ return nil
+ }
+
+ return jsonPatchAdd(target, path, value)
+}
+
+func jsonPatchMove(target *ast.Term, path ast.Ref, from ast.Ref) *ast.Term {
+ // Move is specified as `remove` followed by `add`.
+ target, removed := jsonPatchRemove(target, from)
+ if target == nil || removed == nil {
+ return nil
+ }
+
+ return jsonPatchAdd(target, path, removed)
+}
+
+func jsonPatchCopy(target *ast.Term, path ast.Ref, from ast.Ref) *ast.Term {
+ value := jsonPatchGet(target, from)
+ if value == nil {
+ return nil
+ }
+
+ return jsonPatchAdd(target, path, value)
+}
+
+func jsonPatchTest(target *ast.Term, path ast.Ref, value *ast.Term) *ast.Term {
+ actual := jsonPatchGet(target, path)
+ if actual == nil {
+ return nil
+ }
+
+ if actual.Equal(value) {
+ return target
+ }
+
+ return nil
+}
+
+func builtinJSONPatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ // JSON patch supports arrays, objects as well as values as the target.
+ target := ast.NewTerm(operands[0].Value)
+
+ // Expect an array of operations.
+ operations, err := builtins.ArrayOperand(operands[1].Value, 2)
+ if err != nil {
+ return err
+ }
+
+ // Apply operations one by one.
+ for i := 0; i < operations.Len(); i++ {
+ if object, ok := operations.Elem(i).Value.(ast.Object); ok {
+ getAttribute := func(attr string) (*ast.Term, error) {
+ if term := object.Get(ast.StringTerm(attr)); term != nil {
+ return term, nil
+ }
+
+ return nil, builtins.NewOperandErr(2, fmt.Sprintf("patch is missing '%s' attribute", attr))
+ }
+
+ getPathAttribute := func(attr string) (ast.Ref, error) {
+ term, err := getAttribute(attr)
+ if err != nil {
+ return ast.Ref{}, err
+ }
+ path, err := parsePath(term)
+ if err != nil {
+ return ast.Ref{}, err
+ }
+ return path, nil
+ }
+
+ // Parse operation.
+ opTerm, err := getAttribute("op")
+ if err != nil {
+ return err
+ }
+ op, ok := opTerm.Value.(ast.String)
+ if !ok {
+ return builtins.NewOperandErr(2, "patch attribute 'op' must be a string")
+ }
+
+ // Parse path.
+ path, err := getPathAttribute("path")
+ if err != nil {
+ return err
+ }
+
+ switch op {
+ case "add":
+ value, err := getAttribute("value")
+ if err != nil {
+ return err
+ }
+ target = jsonPatchAdd(target, path, value)
+ case "remove":
+ target, _ = jsonPatchRemove(target, path)
+ case "replace":
+ value, err := getAttribute("value")
+ if err != nil {
+ return err
+ }
+ target = jsonPatchReplace(target, path, value)
+ case "move":
+ from, err := getPathAttribute("from")
+ if err != nil {
+ return err
+ }
+ target = jsonPatchMove(target, path, from)
+ case "copy":
+ from, err := getPathAttribute("from")
+ if err != nil {
+ return err
+ }
+ target = jsonPatchCopy(target, path, from)
+ case "test":
+ value, err := getAttribute("value")
+ if err != nil {
+ return err
+ }
+ target = jsonPatchTest(target, path, value)
+ default:
+ return builtins.NewOperandErr(2, "must be an array of JSON-Patch objects")
+ }
+ } else {
+ return builtins.NewOperandErr(2, "must be an array of JSON-Patch objects")
+ }
+
+ // JSON patches should work atomically; and if one of them fails,
+ // we should not try to continue.
+ if target == nil {
+ return nil
+ }
+ }
+
+ return iter(target)
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.JSONFilter.Name, builtinJSONFilter)
+ RegisterBuiltinFunc(ast.JSONRemove.Name, builtinJSONRemove)
+ RegisterBuiltinFunc(ast.JSONPatch.Name, builtinJSONPatch)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/numbers.go b/vendor/github.com/open-policy-agent/opa/topdown/numbers.go
new file mode 100644
index 00000000..e9edbf2b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/numbers.go
@@ -0,0 +1,46 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "math/big"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+var one = big.NewInt(1)
+
+func builtinNumbersRange(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ x, err := builtins.BigIntOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ y, err := builtins.BigIntOperand(operands[1].Value, 2)
+ if err != nil {
+ return err
+ }
+
+ result := ast.NewArray()
+ cmp := x.Cmp(y)
+
+ if cmp <= 0 {
+ for i := new(big.Int).Set(x); i.Cmp(y) <= 0; i = i.Add(i, one) {
+ result = result.Append(ast.NewTerm(builtins.IntToNumber(i)))
+ }
+ } else {
+ for i := new(big.Int).Set(x); i.Cmp(y) >= 0; i = i.Sub(i, one) {
+ result = result.Append(ast.NewTerm(builtins.IntToNumber(i)))
+ }
+ }
+
+ return iter(ast.NewTerm(result))
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.NumbersRange.Name, builtinNumbersRange)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/object.go b/vendor/github.com/open-policy-agent/opa/topdown/object.go
new file mode 100644
index 00000000..ff01b4f9
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/object.go
@@ -0,0 +1,140 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/types"
+)
+
+func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ objA, err := builtins.ObjectOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ objB, err := builtins.ObjectOperand(operands[1].Value, 2)
+ if err != nil {
+ return err
+ }
+
+ r := mergeWithOverwrite(objA, objB)
+
+ return iter(ast.NewTerm(r))
+}
+
+func builtinObjectRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ // Expect an object and an array/set/object of keys
+ obj, err := builtins.ObjectOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ // Build a set of keys to remove
+ keysToRemove, err := getObjectKeysParam(operands[1].Value)
+ if err != nil {
+ return err
+ }
+ r := ast.NewObject()
+ obj.Foreach(func(key *ast.Term, value *ast.Term) {
+ if !keysToRemove.Contains(key) {
+ r.Insert(key, value)
+ }
+ })
+
+ return iter(ast.NewTerm(r))
+}
+
+func builtinObjectFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ // Expect an object and an array/set/object of keys
+ obj, err := builtins.ObjectOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ // Build a new object from the supplied filter keys
+ keys, err := getObjectKeysParam(operands[1].Value)
+ if err != nil {
+ return err
+ }
+
+ filterObj := ast.NewObject()
+ keys.Foreach(func(key *ast.Term) {
+ filterObj.Insert(key, ast.NullTerm())
+ })
+
+ // Actually do the filtering
+ r, err := obj.Filter(filterObj)
+ if err != nil {
+ return err
+ }
+
+ return iter(ast.NewTerm(r))
+}
+
+func builtinObjectGet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ object, err := builtins.ObjectOperand(operands[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ if ret := object.Get(operands[1]); ret != nil {
+ return iter(ret)
+ }
+
+ return iter(operands[2])
+}
+
+// getObjectKeysParam returns a set of key values
+// from a supplied ast array, object, set value
+func getObjectKeysParam(arrayOrSet ast.Value) (ast.Set, error) {
+ keys := ast.NewSet()
+
+ switch v := arrayOrSet.(type) {
+ case *ast.Array:
+ _ = v.Iter(func(f *ast.Term) error {
+ keys.Add(f)
+ return nil
+ })
+ case ast.Set:
+ _ = v.Iter(func(f *ast.Term) error {
+ keys.Add(f)
+ return nil
+ })
+ case ast.Object:
+ _ = v.Iter(func(k *ast.Term, _ *ast.Term) error {
+ keys.Add(k)
+ return nil
+ })
+ default:
+ return nil, builtins.NewOperandTypeErr(2, arrayOrSet, ast.TypeName(types.Object{}), ast.TypeName(types.S), ast.TypeName(types.Array{}))
+ }
+
+ return keys, nil
+}
+
+func mergeWithOverwrite(objA, objB ast.Object) ast.Object {
+ merged, _ := objA.MergeWith(objB, func(v1, v2 *ast.Term) (*ast.Term, bool) {
+ originalValueObj, ok2 := v1.Value.(ast.Object)
+ updateValueObj, ok1 := v2.Value.(ast.Object)
+ if !ok1 || !ok2 {
+ // If we can't merge, stick with the right-hand value
+ return v2, false
+ }
+
+ // Recursively update the existing value
+ merged := mergeWithOverwrite(originalValueObj, updateValueObj)
+ return ast.NewTerm(merged), false
+ })
+ return merged
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.ObjectUnion.Name, builtinObjectUnion)
+ RegisterBuiltinFunc(ast.ObjectRemove.Name, builtinObjectRemove)
+ RegisterBuiltinFunc(ast.ObjectFilter.Name, builtinObjectFilter)
+ RegisterBuiltinFunc(ast.ObjectGet.Name, builtinObjectGet)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse.go b/vendor/github.com/open-policy-agent/opa/topdown/parse.go
new file mode 100644
index 00000000..dc6e6fc5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/parse.go
@@ -0,0 +1,47 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinRegoParseModule(a, b ast.Value) (ast.Value, error) {
+
+ filename, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ input, err := builtins.StringOperand(b, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ module, err := ast.ParseModule(string(filename), string(input))
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(module); err != nil {
+ return nil, err
+ }
+
+ term, err := ast.ParseTerm(buf.String())
+ if err != nil {
+ return nil, err
+ }
+
+ return term.Value, nil
+}
+
+func init() {
+ RegisterFunctionalBuiltin2(ast.RegoParseModule.Name, builtinRegoParseModule)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go b/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go
new file mode 100644
index 00000000..13e3a66f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go
@@ -0,0 +1,153 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+const (
+ none int64 = 1
+ kb = 1000
+ ki = 1024
+ mb = kb * 1000
+ mi = ki * 1024
+ gb = mb * 1000
+ gi = mi * 1024
+ tb = gb * 1000
+ ti = gi * 1024
+)
+
+// The rune values for 0..9 as well as the period symbol (for parsing floats)
+var numRunes = []rune("0123456789.")
+
+func parseNumBytesError(msg string) error {
+ return fmt.Errorf("%s error: %s", ast.UnitsParseBytes.Name, msg)
+}
+
+func errUnitNotRecognized(unit string) error {
+ return parseNumBytesError(fmt.Sprintf("byte unit %s not recognized", unit))
+}
+
+var (
+ errNoAmount = parseNumBytesError("no byte amount provided")
+ errIntConv = parseNumBytesError("could not parse byte amount to integer")
+ errIncludesSpaces = parseNumBytesError("spaces not allowed in resource strings")
+)
+
+func builtinNumBytes(a ast.Value) (ast.Value, error) {
+ var m int64
+
+ raw, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ s := formatString(raw)
+
+ if strings.Contains(s, " ") {
+ return nil, errIncludesSpaces
+ }
+
+ numStr, unitStr := extractNumAndUnit(s)
+
+ if numStr == "" {
+ return nil, errNoAmount
+ }
+
+ switch unitStr {
+ case "":
+ m = none
+ case "kb", "k":
+ m = kb
+ case "kib", "ki":
+ m = ki
+ case "mb", "m":
+ m = mb
+ case "mib", "mi":
+ m = mi
+ case "gb", "g":
+ m = gb
+ case "gib", "gi":
+ m = gi
+ case "tb", "t":
+ m = tb
+ case "tib", "ti":
+ m = ti
+ default:
+ return nil, errUnitNotRecognized(unitStr)
+ }
+
+ num, err := strconv.ParseInt(numStr, 10, 64)
+ if err != nil {
+ return nil, errIntConv
+ }
+
+ total := num * m
+
+ return builtins.IntToNumber(big.NewInt(total)), nil
+}
+
+// Makes the string lower case and removes spaces and quotation marks
+func formatString(s ast.String) string {
+ str := string(s)
+ lower := strings.ToLower(str)
+ return strings.Replace(lower, "\"", "", -1)
+}
+
+// Splits the string into a number string à la "10" or "10.2" and a unit string à la "gb" or "MiB" or "foo". Either
+// can be an empty string (error handling is provided elsewhere).
+func extractNumAndUnit(s string) (string, string) {
+ isNum := func(r rune) (isNum bool) {
+ for _, nr := range numRunes {
+ if nr == r {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ // Returns the index of the first rune that's not a number (or 0 if there are only numbers)
+ getFirstNonNumIdx := func(s string) int {
+ for idx, r := range s {
+ if !isNum(r) {
+ return idx
+ }
+ }
+
+ return 0
+ }
+
+ firstRuneIsNum := func(s string) bool {
+ return len(s) > 0 && isNum(rune(s[0]))
+ }
+
+ firstNonNumIdx := getFirstNonNumIdx(s)
+
+ // The string contains only a number
+ numOnly := firstNonNumIdx == 0 && firstRuneIsNum(s)
+
+ // The string contains only a unit
+ unitOnly := firstNonNumIdx == 0 && !firstRuneIsNum(s)
+
+ if numOnly {
+ return s, ""
+ } else if unitOnly {
+ return "", s
+ } else {
+ return s[0:firstNonNumIdx], s[firstNonNumIdx:]
+ }
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.UnitsParseBytes.Name, builtinNumBytes)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/query.go b/vendor/github.com/open-policy-agent/opa/topdown/query.go
new file mode 100644
index 00000000..b22fa9c5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/query.go
@@ -0,0 +1,448 @@
+package topdown
+
+import (
+ "context"
+ "crypto/rand"
+ "io"
+ "sort"
+ "time"
+
+ "github.com/open-policy-agent/opa/resolver"
+ "github.com/open-policy-agent/opa/topdown/cache"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/storage"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+ "github.com/open-policy-agent/opa/topdown/copypropagation"
+)
+
+// QueryResultSet represents a collection of results returned by a query.
+type QueryResultSet []QueryResult
+
+// QueryResult represents a single result returned by a query. The result
+// contains bindings for all variables that appear in the query.
+type QueryResult map[ast.Var]*ast.Term
+
+// Query provides a configurable interface for performing query evaluation.
+type Query struct {
+ seed io.Reader
+ time time.Time
+ cancel Cancel
+ query ast.Body
+ queryCompiler ast.QueryCompiler
+ compiler *ast.Compiler
+ store storage.Store
+ txn storage.Transaction
+ input *ast.Term
+ external *resolverTrie
+ tracers []QueryTracer
+ plugTraceVars bool
+ unknowns []*ast.Term
+ partialNamespace string
+ skipSaveNamespace bool
+ metrics metrics.Metrics
+ instr *Instrumentation
+ disableInlining []ast.Ref
+ shallowInlining bool
+ genvarprefix string
+ runtime *ast.Term
+ builtins map[string]*Builtin
+ indexing bool
+ interQueryBuiltinCache cache.InterQueryCache
+ strictBuiltinErrors bool
+}
+
+// Builtin represents a built-in function that queries can call.
+type Builtin struct {
+ Decl *ast.Builtin
+ Func BuiltinFunc
+}
+
+// NewQuery returns a new Query object that can be run.
+func NewQuery(query ast.Body) *Query {
+ return &Query{
+ query: query,
+ genvarprefix: ast.WildcardPrefix,
+ indexing: true,
+ external: newResolverTrie(),
+ }
+}
+
+// WithQueryCompiler sets the queryCompiler used for the query.
+func (q *Query) WithQueryCompiler(queryCompiler ast.QueryCompiler) *Query {
+ q.queryCompiler = queryCompiler
+ return q
+}
+
+// WithCompiler sets the compiler to use for the query.
+func (q *Query) WithCompiler(compiler *ast.Compiler) *Query {
+ q.compiler = compiler
+ return q
+}
+
+// WithStore sets the store to use for the query.
+func (q *Query) WithStore(store storage.Store) *Query {
+ q.store = store
+ return q
+}
+
+// WithTransaction sets the transaction to use for the query. All queries
+// should be performed over a consistent snapshot of the storage layer.
+func (q *Query) WithTransaction(txn storage.Transaction) *Query {
+ q.txn = txn
+ return q
+}
+
+// WithCancel sets the cancellation object to use for the query. Set this if
+// you need to abort queries based on a deadline. This is optional.
+func (q *Query) WithCancel(cancel Cancel) *Query {
+ q.cancel = cancel
+ return q
+}
+
+// WithInput sets the input object to use for the query. References rooted at
+// input will be evaluated against this value. This is optional.
+func (q *Query) WithInput(input *ast.Term) *Query {
+ q.input = input
+ return q
+}
+
+// WithTracer adds a query tracer to use during evaluation. This is optional.
+// Deprecated: Use WithQueryTracer instead.
+func (q *Query) WithTracer(tracer Tracer) *Query {
+ qt, ok := tracer.(QueryTracer)
+ if !ok {
+ qt = WrapLegacyTracer(tracer)
+ }
+ return q.WithQueryTracer(qt)
+}
+
+// WithQueryTracer adds a query tracer to use during evaluation. This is optional.
+// Disabled QueryTracers will be ignored.
+func (q *Query) WithQueryTracer(tracer QueryTracer) *Query {
+ if !tracer.Enabled() {
+ return q
+ }
+
+ q.tracers = append(q.tracers, tracer)
+
+ // If *any* of the tracers require local variable metadata we need to
+ // enabled plugging local trace variables.
+ conf := tracer.Config()
+ if conf.PlugLocalVars {
+ q.plugTraceVars = true
+ }
+
+ return q
+}
+
+// WithMetrics sets the metrics collection to add evaluation metrics to. This
+// is optional.
+func (q *Query) WithMetrics(m metrics.Metrics) *Query {
+ q.metrics = m
+ return q
+}
+
+// WithInstrumentation sets the instrumentation configuration to enable on the
+// evaluation process. By default, instrumentation is turned off.
+func (q *Query) WithInstrumentation(instr *Instrumentation) *Query {
+ q.instr = instr
+ return q
+}
+
+// WithUnknowns sets the initial set of variables or references to treat as
+// unknown during query evaluation. This is required for partial evaluation.
+func (q *Query) WithUnknowns(terms []*ast.Term) *Query {
+ q.unknowns = terms
+ return q
+}
+
+// WithPartialNamespace sets the namespace to use for supporting rules
+// generated as part of the partial evaluation process. The ns value must be a
+// valid package path component.
+func (q *Query) WithPartialNamespace(ns string) *Query {
+ q.partialNamespace = ns
+ return q
+}
+
+// WithSkipPartialNamespace disables namespacing of saved support rules that are generated
+// from the original policy (rules which are completely syntethic are still namespaced.)
+func (q *Query) WithSkipPartialNamespace(yes bool) *Query {
+ q.skipSaveNamespace = yes
+ return q
+}
+
+// WithDisableInlining adds a set of paths to the query that should be excluded from
+// inlining. Inlining during partial evaluation can be expensive in some cases
+// (e.g., when a cross-product is computed.) Disabling inlining avoids expensive
+// computation at the cost of generating support rules.
+func (q *Query) WithDisableInlining(paths []ast.Ref) *Query {
+ q.disableInlining = paths
+ return q
+}
+
+// WithShallowInlining disables aggressive inlining performed during partial evaluation.
+// When shallow inlining is enabled rules that depend (transitively) on unknowns are not inlined.
+// Only rules/values that are completely known will be inlined.
+func (q *Query) WithShallowInlining(yes bool) *Query {
+ q.shallowInlining = yes
+ return q
+}
+
+// WithRuntime sets the runtime data to execute the query with. The runtime data
+// can be returned by the `opa.runtime` built-in function.
+func (q *Query) WithRuntime(runtime *ast.Term) *Query {
+ q.runtime = runtime
+ return q
+}
+
+// WithBuiltins adds a set of built-in functions that can be called by the
+// query.
+func (q *Query) WithBuiltins(builtins map[string]*Builtin) *Query {
+ q.builtins = builtins
+ return q
+}
+
+// WithIndexing will enable or disable using rule indexing for the evaluation
+// of the query. The default is enabled.
+func (q *Query) WithIndexing(enabled bool) *Query {
+ q.indexing = enabled
+ return q
+}
+
+// WithSeed sets a reader that will seed randomization required by built-in functions.
+// If a seed is not provided crypto/rand.Reader is used.
+func (q *Query) WithSeed(r io.Reader) *Query {
+ q.seed = r
+ return q
+}
+
+// WithTime sets the time that will be returned by the time.now_ns() built-in function.
+func (q *Query) WithTime(x time.Time) *Query {
+ q.time = x
+ return q
+}
+
+// WithInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize.
+func (q *Query) WithInterQueryBuiltinCache(c cache.InterQueryCache) *Query {
+ q.interQueryBuiltinCache = c
+ return q
+}
+
+// WithStrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors.
+func (q *Query) WithStrictBuiltinErrors(yes bool) *Query {
+ q.strictBuiltinErrors = yes
+ return q
+}
+
+// WithResolver configures an external resolver to use for the given ref.
+func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query {
+ q.external.Put(ref, r)
+ return q
+}
+
+// PartialRun executes partial evaluation on the query with respect to unknown
+// values. Partial evaluation attempts to evaluate as much of the query as
+// possible without requiring values for the unknowns set on the query. The
+// result of partial evaluation is a new set of queries that can be evaluated
+// once the unknown value is known. In addition to new queries, partial
+// evaluation may produce additional support modules that should be used in
+// conjunction with the partially evaluated queries.
+func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []*ast.Module, err error) {
+ if q.partialNamespace == "" {
+ q.partialNamespace = "partial" // lazily initialize partial namespace
+ }
+ if q.seed == nil {
+ q.seed = rand.Reader
+ }
+ if !q.time.IsZero() {
+ q.time = time.Now()
+ }
+ if q.metrics == nil {
+ q.metrics = metrics.New()
+ }
+ f := &queryIDFactory{}
+ b := newBindings(0, q.instr)
+ e := &eval{
+ ctx: ctx,
+ metrics: q.metrics,
+ seed: q.seed,
+ time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())),
+ cancel: q.cancel,
+ query: q.query,
+ queryCompiler: q.queryCompiler,
+ queryIDFact: f,
+ queryID: f.Next(),
+ bindings: b,
+ compiler: q.compiler,
+ store: q.store,
+ baseCache: newBaseCache(),
+ targetStack: newRefStack(),
+ txn: q.txn,
+ input: q.input,
+ external: q.external,
+ tracers: q.tracers,
+ traceEnabled: len(q.tracers) > 0,
+ plugTraceVars: q.plugTraceVars,
+ instr: q.instr,
+ builtins: q.builtins,
+ builtinCache: builtins.Cache{},
+ interQueryBuiltinCache: q.interQueryBuiltinCache,
+ virtualCache: newVirtualCache(),
+ comprehensionCache: newComprehensionCache(),
+ saveSet: newSaveSet(q.unknowns, b, q.instr),
+ saveStack: newSaveStack(),
+ saveSupport: newSaveSupport(),
+ saveNamespace: ast.StringTerm(q.partialNamespace),
+ skipSaveNamespace: q.skipSaveNamespace,
+ inliningControl: &inliningControl{
+ shallow: q.shallowInlining,
+ },
+ genvarprefix: q.genvarprefix,
+ runtime: q.runtime,
+ indexing: q.indexing,
+ builtinErrors: &builtinErrors{},
+ }
+
+ if len(q.disableInlining) > 0 {
+ e.inliningControl.PushDisable(q.disableInlining, false)
+ }
+
+ e.caller = e
+ q.metrics.Timer(metrics.RegoPartialEval).Start()
+ defer q.metrics.Timer(metrics.RegoPartialEval).Stop()
+
+ livevars := ast.NewVarSet()
+
+ ast.WalkVars(q.query, func(x ast.Var) bool {
+ if !x.IsGenerated() {
+ livevars.Add(x)
+ }
+ return false
+ })
+
+ p := copypropagation.New(livevars).WithCompiler(q.compiler)
+
+ err = e.Run(func(e *eval) error {
+
+ // Build output from saved expressions.
+ body := ast.NewBody()
+
+ for _, elem := range e.saveStack.Stack[len(e.saveStack.Stack)-1] {
+ body.Append(elem.Plug(e.bindings))
+ }
+
+ // Include bindings as exprs so that when caller evals the result, they
+ // can obtain values for the vars in their query.
+ bindingExprs := []*ast.Expr{}
+ e.bindings.Iter(e.bindings, func(a, b *ast.Term) error {
+ bindingExprs = append(bindingExprs, ast.Equality.Expr(a, b))
+ return nil
+ })
+
+ // Sort binding expressions so that results are deterministic.
+ sort.Slice(bindingExprs, func(i, j int) bool {
+ return bindingExprs[i].Compare(bindingExprs[j]) < 0
+ })
+
+ for i := range bindingExprs {
+ body.Append(bindingExprs[i])
+ }
+
+ // Skip this rule body if it fails to type-check.
+ // Type-checking failure means the rule body will never succeed.
+ if !e.compiler.PassesTypeCheck(body) {
+ return nil
+ }
+
+ if !q.shallowInlining {
+ body = applyCopyPropagation(p, e.instr, body)
+ }
+
+ partials = append(partials, body)
+ return nil
+ })
+
+ support = e.saveSupport.List()
+
+ if q.strictBuiltinErrors && len(e.builtinErrors.errs) > 0 {
+ err = e.builtinErrors.errs[0]
+ }
+
+ return partials, support, err
+}
+
+// Run is a wrapper around Iter that accumulates query results and returns them
+// in one shot.
+func (q *Query) Run(ctx context.Context) (QueryResultSet, error) {
+ qrs := QueryResultSet{}
+ return qrs, q.Iter(ctx, func(qr QueryResult) error {
+ qrs = append(qrs, qr)
+ return nil
+ })
+}
+
+// Iter executes the query and invokes the iter function with query results
+// produced by evaluating the query.
+func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error {
+ if q.seed == nil {
+ q.seed = rand.Reader
+ }
+ if q.time.IsZero() {
+ q.time = time.Now()
+ }
+ if q.metrics == nil {
+ q.metrics = metrics.New()
+ }
+ f := &queryIDFactory{}
+ e := &eval{
+ ctx: ctx,
+ metrics: q.metrics,
+ seed: q.seed,
+ time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())),
+ cancel: q.cancel,
+ query: q.query,
+ queryCompiler: q.queryCompiler,
+ queryIDFact: f,
+ queryID: f.Next(),
+ bindings: newBindings(0, q.instr),
+ compiler: q.compiler,
+ store: q.store,
+ baseCache: newBaseCache(),
+ targetStack: newRefStack(),
+ txn: q.txn,
+ input: q.input,
+ external: q.external,
+ tracers: q.tracers,
+ traceEnabled: len(q.tracers) > 0,
+ plugTraceVars: q.plugTraceVars,
+ instr: q.instr,
+ builtins: q.builtins,
+ builtinCache: builtins.Cache{},
+ interQueryBuiltinCache: q.interQueryBuiltinCache,
+ virtualCache: newVirtualCache(),
+ comprehensionCache: newComprehensionCache(),
+ genvarprefix: q.genvarprefix,
+ runtime: q.runtime,
+ indexing: q.indexing,
+ builtinErrors: &builtinErrors{},
+ }
+ e.caller = e
+ q.metrics.Timer(metrics.RegoQueryEval).Start()
+ err := e.Run(func(e *eval) error {
+ qr := QueryResult{}
+ e.bindings.Iter(nil, func(k, v *ast.Term) error {
+ qr[k.Value.(ast.Var)] = v
+ return nil
+ })
+ return iter(qr)
+ })
+
+ if q.strictBuiltinErrors && err == nil && len(e.builtinErrors.errs) > 0 {
+ err = e.builtinErrors.errs[0]
+ }
+
+ q.metrics.Timer(metrics.RegoQueryEval).Stop()
+ return err
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/reachable.go b/vendor/github.com/open-policy-agent/opa/topdown/reachable.go
new file mode 100644
index 00000000..6b73fa2c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/reachable.go
@@ -0,0 +1,61 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// Helper: sets of vertices can be represented as Arrays or Sets.
+func foreachVertex(collection *ast.Term, f func(*ast.Term)) {
+ switch v := collection.Value.(type) {
+ case ast.Set:
+ v.Foreach(f)
+ case *ast.Array:
+ v.Foreach(f)
+ }
+}
+
+func builtinReachable(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ // Return the empty set if the first argument is not an object.
+ graph, ok := args[0].Value.(ast.Object)
+ if !ok {
+ return iter(ast.NewTerm(ast.NewSet()))
+ }
+
+ // This is a queue that holds all nodes we still need to visit. It is
+ // initialised to the initial set of nodes we start out with.
+ queue := []*ast.Term{}
+ foreachVertex(args[1], func(t *ast.Term) {
+ queue = append(queue, t)
+ })
+
+ // This is the set of nodes we have reached.
+ reached := ast.NewSet()
+
+ // Keep going as long as we have nodes in the queue.
+ for len(queue) > 0 {
+ // Get the edges for this node. If the node was not in the graph,
+ // `edges` will be `nil` and we can ignore it.
+ node := queue[0]
+ if edges := graph.Get(node); edges != nil {
+ // Add all the newly discovered neighbors.
+ foreachVertex(edges, func(neighbor *ast.Term) {
+ if !reached.Contains(neighbor) {
+ queue = append(queue, neighbor)
+ }
+ })
+ // Mark the node as reached.
+ reached.Add(node)
+ }
+ queue = queue[1:]
+ }
+
+ return iter(ast.NewTerm(reached))
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.ReachableBuiltin.Name, builtinReachable)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex.go b/vendor/github.com/open-policy-agent/opa/topdown/regex.go
new file mode 100644
index 00000000..7e29db79
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/regex.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+ "regexp"
+ "sync"
+
+ gintersect "github.com/yashtewari/glob-intersection"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+var regexpCacheLock = sync.Mutex{}
+var regexpCache map[string]*regexp.Regexp
+
+func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+
+ s, err := builtins.StringOperand(operands[0].Value, 1)
+ if err != nil {
+ return iter(ast.BooleanTerm(false))
+ }
+
+ _, err = regexp.Compile(string(s))
+ if err != nil {
+ return iter(ast.BooleanTerm(false))
+ }
+
+ return iter(ast.BooleanTerm(true))
+}
+
+func builtinRegexMatch(a, b ast.Value) (ast.Value, error) {
+ s1, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ s2, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ re, err := getRegexp(string(s1))
+ if err != nil {
+ return nil, err
+ }
+ return ast.Boolean(re.Match([]byte(s2))), nil
+}
+
+func builtinRegexMatchTemplate(a, b, c, d ast.Value) (ast.Value, error) {
+ pattern, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ match, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ start, err := builtins.StringOperand(c, 3)
+ if err != nil {
+ return nil, err
+ }
+ end, err := builtins.StringOperand(d, 4)
+ if err != nil {
+ return nil, err
+ }
+ if len(start) != 1 {
+ return nil, fmt.Errorf("start delimiter has to be exactly one character long but is %d long", len(start))
+ }
+ if len(end) != 1 {
+ return nil, fmt.Errorf("end delimiter has to be exactly one character long but is %d long", len(start))
+ }
+ re, err := getRegexpTemplate(string(pattern), string(start)[0], string(end)[0])
+ if err != nil {
+ return nil, err
+ }
+ return ast.Boolean(re.MatchString(string(match))), nil
+}
+
+func builtinRegexSplit(a, b ast.Value) (ast.Value, error) {
+ s1, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ s2, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ re, err := getRegexp(string(s1))
+ if err != nil {
+ return nil, err
+ }
+
+ elems := re.Split(string(s2), -1)
+ arr := make([]*ast.Term, len(elems))
+ for i := range elems {
+ arr[i] = ast.StringTerm(elems[i])
+ }
+ return ast.NewArray(arr...), nil
+}
+
+func getRegexp(pat string) (*regexp.Regexp, error) {
+ regexpCacheLock.Lock()
+ defer regexpCacheLock.Unlock()
+ re, ok := regexpCache[pat]
+ if !ok {
+ var err error
+ re, err = regexp.Compile(string(pat))
+ if err != nil {
+ return nil, err
+ }
+ regexpCache[pat] = re
+ }
+ return re, nil
+}
+
+func getRegexpTemplate(pat string, delimStart, delimEnd byte) (*regexp.Regexp, error) {
+ regexpCacheLock.Lock()
+ defer regexpCacheLock.Unlock()
+ re, ok := regexpCache[pat]
+ if !ok {
+ var err error
+ re, err = compileRegexTemplate(string(pat), delimStart, delimEnd)
+ if err != nil {
+ return nil, err
+ }
+ regexpCache[pat] = re
+ }
+ return re, nil
+}
+
+func builtinGlobsMatch(a, b ast.Value) (ast.Value, error) {
+ s1, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ s2, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ ne, err := gintersect.NonEmpty(string(s1), string(s2))
+ if err != nil {
+ return nil, err
+ }
+ return ast.Boolean(ne), nil
+}
+
+func builtinRegexFind(a, b, c ast.Value) (ast.Value, error) {
+ s1, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ s2, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ n, err := builtins.IntOperand(c, 3)
+ if err != nil {
+ return nil, err
+ }
+ re, err := getRegexp(string(s1))
+ if err != nil {
+ return nil, err
+ }
+
+ elems := re.FindAllString(string(s2), n)
+ arr := make([]*ast.Term, len(elems))
+ for i := range elems {
+ arr[i] = ast.StringTerm(elems[i])
+ }
+ return ast.NewArray(arr...), nil
+}
+
+func builtinRegexFindAllStringSubmatch(a, b, c ast.Value) (ast.Value, error) {
+ s1, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ s2, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ n, err := builtins.IntOperand(c, 3)
+ if err != nil {
+ return nil, err
+ }
+
+ re, err := getRegexp(string(s1))
+ if err != nil {
+ return nil, err
+ }
+ matches := re.FindAllStringSubmatch(string(s2), n)
+
+ outer := make([]*ast.Term, len(matches))
+ for i := range matches {
+ inner := make([]*ast.Term, len(matches[i]))
+ for j := range matches[i] {
+ inner[j] = ast.StringTerm(matches[i][j])
+ }
+ outer[i] = ast.NewTerm(ast.NewArray(inner...))
+ }
+
+ return ast.NewArray(outer...), nil
+}
+
+func init() {
+ regexpCache = map[string]*regexp.Regexp{}
+ RegisterBuiltinFunc(ast.RegexIsValid.Name, builtinRegexIsValid)
+ RegisterFunctionalBuiltin2(ast.RegexMatch.Name, builtinRegexMatch)
+ RegisterFunctionalBuiltin2(ast.RegexMatchDeprecated.Name, builtinRegexMatch)
+ RegisterFunctionalBuiltin2(ast.RegexSplit.Name, builtinRegexSplit)
+ RegisterFunctionalBuiltin2(ast.GlobsMatch.Name, builtinGlobsMatch)
+ RegisterFunctionalBuiltin4(ast.RegexTemplateMatch.Name, builtinRegexMatchTemplate)
+ RegisterFunctionalBuiltin3(ast.RegexFind.Name, builtinRegexFind)
+ RegisterFunctionalBuiltin3(ast.RegexFindAllStringSubmatch.Name, builtinRegexFindAllStringSubmatch)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go b/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go
new file mode 100644
index 00000000..39f92346
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go
@@ -0,0 +1,122 @@
+package topdown
+
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license as follows:
+
+// Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file was forked from https://github.com/gorilla/mux/commit/eac83ba2c004bb75
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+)
+
+// delimiterIndices returns the first level delimiter indices from a string.
+// It returns an error in case of unbalanced delimiters.
+func delimiterIndices(s string, delimiterStart, delimiterEnd byte) ([]int, error) {
+ var level, idx int
+ idxs := make([]int, 0)
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case delimiterStart:
+ if level++; level == 1 {
+ idx = i
+ }
+ case delimiterEnd:
+ if level--; level == 0 {
+ idxs = append(idxs, idx, i+1)
+ } else if level < 0 {
+ return nil, fmt.Errorf(`unbalanced braces in %q`, s)
+ }
+ }
+ }
+
+ if level != 0 {
+ return nil, fmt.Errorf(`unbalanced braces in %q`, s)
+ }
+
+ return idxs, nil
+}
+
+// compileRegexTemplate parses a template and returns a Regexp.
+//
+// You can define your own delimiters. It is e.g. common to use curly braces {} but I recommend using characters
+// which have no special meaning in Regex, e.g.: <, >
+//
+// reg, err := compiler.CompileRegex("foo:bar.baz:<[0-9]{2,10}>", '<', '>')
+// // if err != nil ...
+// reg.MatchString("foo:bar.baz:123")
+func compileRegexTemplate(tpl string, delimiterStart, delimiterEnd byte) (*regexp.Regexp, error) {
+ // Check if it is well-formed.
+ idxs, errBraces := delimiterIndices(tpl, delimiterStart, delimiterEnd)
+ if errBraces != nil {
+ return nil, errBraces
+ }
+ varsR := make([]*regexp.Regexp, len(idxs)/2)
+ pattern := bytes.NewBufferString("")
+
+ // WriteByte's error value is always nil for bytes.Buffer, no need to check it.
+ pattern.WriteByte('^')
+
+ var end int
+ var err error
+ for i := 0; i < len(idxs); i += 2 {
+ // Set all values we are interested in.
+ raw := tpl[end:idxs[i]]
+ end = idxs[i+1]
+ patt := tpl[idxs[i]+1 : end-1]
+ // Build the regexp pattern.
+ varIdx := i / 2
+ fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt)
+ varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Add the remaining.
+ raw := tpl[end:]
+
+ // WriteString's error value is always nil for bytes.Buffer, no need to check it.
+ pattern.WriteString(regexp.QuoteMeta(raw))
+
+ // WriteByte's error value is always nil for bytes.Buffer, no need to check it.
+ pattern.WriteByte('$')
+
+ // Compile full regexp.
+ reg, errCompile := regexp.Compile(pattern.String())
+ if errCompile != nil {
+ return nil, errCompile
+ }
+
+ return reg, nil
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/resolver.go b/vendor/github.com/open-policy-agent/opa/topdown/resolver.go
new file mode 100644
index 00000000..5ed6c1e4
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/resolver.go
@@ -0,0 +1,107 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/metrics"
+ "github.com/open-policy-agent/opa/resolver"
+)
+
+type resolverTrie struct {
+ r resolver.Resolver
+ children map[ast.Value]*resolverTrie
+}
+
+func newResolverTrie() *resolverTrie {
+ return &resolverTrie{children: map[ast.Value]*resolverTrie{}}
+}
+
+func (t *resolverTrie) Put(ref ast.Ref, r resolver.Resolver) {
+ node := t
+ for _, t := range ref {
+ child, ok := node.children[t.Value]
+ if !ok {
+ child = &resolverTrie{children: map[ast.Value]*resolverTrie{}}
+ node.children[t.Value] = child
+ }
+ node = child
+ }
+ node.r = r
+}
+
+func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) {
+ e.metrics.Timer(metrics.RegoExternalResolve).Start()
+ defer e.metrics.Timer(metrics.RegoExternalResolve).Stop()
+ node := t
+ for i, t := range ref {
+ child, ok := node.children[t.Value]
+ if !ok {
+ return nil, nil
+ }
+ node = child
+ if node.r != nil {
+ in := resolver.Input{
+ Ref: ref[:i+1],
+ Input: e.input,
+ Metrics: e.metrics,
+ }
+ e.traceWasm(e.query[e.index], &in.Ref)
+ if e.data != nil {
+ return nil, errInScopeWithStmt
+ }
+ result, err := node.r.Eval(e.ctx, in)
+ if err != nil {
+ return nil, err
+ }
+ if result.Value == nil {
+ return nil, nil
+ }
+ val, err := result.Value.Find(ref[i+1:])
+ if err != nil {
+ return nil, nil
+ }
+ return val, nil
+ }
+ }
+ return node.mktree(e, resolver.Input{
+ Ref: ref,
+ Input: e.input,
+ Metrics: e.metrics,
+ })
+}
+
+func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) {
+ if t.r != nil {
+ e.traceWasm(e.query[e.index], &in.Ref)
+ if e.data != nil {
+ return nil, errInScopeWithStmt
+ }
+ result, err := t.r.Eval(e.ctx, in)
+ if err != nil {
+ return nil, err
+ }
+ if result.Value == nil {
+ return nil, nil
+ }
+ return result.Value, nil
+ }
+ obj := ast.NewObject()
+ for k, child := range t.children {
+ v, err := child.mktree(e, resolver.Input{Ref: append(in.Ref, ast.NewTerm(k)), Input: in.Input, Metrics: in.Metrics})
+ if err != nil {
+ return nil, err
+ }
+ if v != nil {
+ obj.Insert(ast.NewTerm(k), ast.NewTerm(v))
+ }
+ }
+ return obj, nil
+}
+
+var errInScopeWithStmt = &Error{
+ Code: InternalErr,
+ Message: "wasm cannot be executed when 'with' statements are in-scope",
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/runtime.go b/vendor/github.com/open-policy-agent/opa/topdown/runtime.go
new file mode 100644
index 00000000..67e183d0
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/runtime.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import "github.com/open-policy-agent/opa/ast"
+
+func builtinOPARuntime(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error {
+
+ if bctx.Runtime == nil {
+ return iter(ast.ObjectTerm())
+ }
+
+ return iter(bctx.Runtime)
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.OPARuntime.Name, builtinOPARuntime)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/save.go b/vendor/github.com/open-policy-agent/opa/topdown/save.go
new file mode 100644
index 00000000..851647da
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/save.go
@@ -0,0 +1,427 @@
+package topdown
+
+import (
+ "container/list"
+ "fmt"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+// saveSet contains a stack of terms that are considered 'unknown' during
+// partial evaluation. Only var and ref terms (rooted at one of the root
+// documents) can be added to the save set. Vars added to the save set are
+// namespaced by the binding list they are added with. This means the save set
+// can be shared across queries.
+type saveSet struct {
+ instr *Instrumentation
+ l *list.List
+}
+
+func newSaveSet(ts []*ast.Term, b *bindings, instr *Instrumentation) *saveSet {
+ ss := &saveSet{
+ l: list.New(),
+ instr: instr,
+ }
+ ss.Push(ts, b)
+ return ss
+}
+
+func (ss *saveSet) Push(ts []*ast.Term, b *bindings) {
+ ss.l.PushBack(newSaveSetElem(ts, b))
+}
+
+func (ss *saveSet) Pop() {
+ ss.l.Remove(ss.l.Back())
+}
+
+// Contains returns true if the term t is contained in the save set. Non-var and
+// non-ref terms are never contained. Ref terms are contained if they share a
+// prefix with a ref that was added (in either direction).
+func (ss *saveSet) Contains(t *ast.Term, b *bindings) bool {
+ if ss != nil {
+ ss.instr.startTimer(partialOpSaveSetContains)
+ ret := ss.contains(t, b)
+ ss.instr.stopTimer(partialOpSaveSetContains)
+ return ret
+ }
+ return false
+}
+
+func (ss *saveSet) contains(t *ast.Term, b *bindings) bool {
+ for el := ss.l.Back(); el != nil; el = el.Prev() {
+ if el.Value.(*saveSetElem).Contains(t, b) {
+ return true
+ }
+ }
+ return false
+}
+
+// ContainsRecursive retruns true if the term t is or contains a term that is
+// contained in the save set. This function will close over the binding list
+// when it encounters vars.
+func (ss *saveSet) ContainsRecursive(t *ast.Term, b *bindings) bool {
+ if ss != nil {
+ ss.instr.startTimer(partialOpSaveSetContainsRec)
+ ret := ss.containsrec(t, b)
+ ss.instr.stopTimer(partialOpSaveSetContainsRec)
+ return ret
+ }
+ return false
+}
+
+func (ss *saveSet) containsrec(t *ast.Term, b *bindings) bool {
+ var found bool
+ ast.WalkTerms(t, func(x *ast.Term) bool {
+ if _, ok := x.Value.(ast.Var); ok {
+ x1, b1 := b.apply(x)
+ if x1 != x || b1 != b {
+ if ss.containsrec(x1, b1) {
+ found = true
+ }
+ } else if ss.contains(x1, b1) {
+ found = true
+ }
+ }
+ return found
+ })
+ return found
+}
+
+func (ss *saveSet) Vars(caller *bindings) ast.VarSet {
+ result := ast.NewVarSet()
+ for x := ss.l.Front(); x != nil; x = x.Next() {
+ elem := x.Value.(*saveSetElem)
+ for _, v := range elem.vars {
+ if v, ok := elem.b.PlugNamespaced(v, caller).Value.(ast.Var); ok {
+ result.Add(v)
+ }
+ }
+ }
+ return result
+}
+
+func (ss *saveSet) String() string {
+ var buf []string
+
+ for x := ss.l.Front(); x != nil; x = x.Next() {
+ buf = append(buf, x.Value.(*saveSetElem).String())
+ }
+
+ return "(" + strings.Join(buf, " ") + ")"
+}
+
+type saveSetElem struct {
+ refs []ast.Ref
+ vars []*ast.Term
+ b *bindings
+}
+
+func newSaveSetElem(ts []*ast.Term, b *bindings) *saveSetElem {
+
+ var refs []ast.Ref
+ var vars []*ast.Term
+
+ for _, t := range ts {
+ switch v := t.Value.(type) {
+ case ast.Var:
+ vars = append(vars, t)
+ case ast.Ref:
+ refs = append(refs, v)
+ default:
+ panic("illegal value")
+ }
+ }
+
+ return &saveSetElem{
+ b: b,
+ vars: vars,
+ refs: refs,
+ }
+}
+
+func (sse *saveSetElem) Contains(t *ast.Term, b *bindings) bool {
+ switch other := t.Value.(type) {
+ case ast.Var:
+ return sse.containsVar(t, b)
+ case ast.Ref:
+ for _, ref := range sse.refs {
+ if ref.HasPrefix(other) || other.HasPrefix(ref) {
+ return true
+ }
+ }
+ return sse.containsVar(other[0], b)
+ }
+ return false
+}
+
+func (sse *saveSetElem) String() string {
+ return fmt.Sprintf("(refs: %v, vars: %v, b: %v)", sse.refs, sse.vars, sse.b)
+}
+
+func (sse *saveSetElem) containsVar(t *ast.Term, b *bindings) bool {
+ if b == sse.b {
+ for _, v := range sse.vars {
+ if v.Equal(t) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// saveStack contains a stack of queries that represent the result of partial
+// evaluation. When partial evaluation completes, the top of the stack
+// represents a complete, partially evaluated query that can be saved and
+// evaluated later.
+//
+// The result is stored in a stack so that partial evaluation of a query can be
+// paused and then resumed in cases where different queries make up the result
+// of partial evaluation, such as when a rule with a default clause is
+// partially evaluated. In this case, the partially evaluated rule will be
+// output in the support module.
+type saveStack struct {
+ Stack []saveStackQuery
+}
+
+func newSaveStack() *saveStack {
+ return &saveStack{
+ Stack: []saveStackQuery{
+ {},
+ },
+ }
+}
+
+func (s *saveStack) PushQuery(query saveStackQuery) {
+ s.Stack = append(s.Stack, query)
+}
+
+func (s *saveStack) PopQuery() saveStackQuery {
+ last := s.Stack[len(s.Stack)-1]
+ s.Stack = s.Stack[:len(s.Stack)-1]
+ return last
+}
+
+func (s *saveStack) Peek() saveStackQuery {
+ return s.Stack[len(s.Stack)-1]
+}
+
+func (s *saveStack) Push(expr *ast.Expr, b1 *bindings, b2 *bindings) {
+ idx := len(s.Stack) - 1
+ s.Stack[idx] = append(s.Stack[idx], saveStackElem{expr, b1, b2})
+}
+
+func (s *saveStack) Pop() {
+ idx := len(s.Stack) - 1
+ query := s.Stack[idx]
+ s.Stack[idx] = query[:len(query)-1]
+}
+
+type saveStackQuery []saveStackElem
+
+func (s saveStackQuery) Plug(b *bindings) ast.Body {
+ if len(s) == 0 {
+ return ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))
+ }
+ result := make(ast.Body, len(s))
+ for i := range s {
+ expr := s[i].Plug(b)
+ result.Set(expr, i)
+ }
+ return result
+}
+
+type saveStackElem struct {
+ Expr *ast.Expr
+ B1 *bindings
+ B2 *bindings
+}
+
+func (e saveStackElem) Plug(caller *bindings) *ast.Expr {
+ if e.B1 == nil && e.B2 == nil {
+ return e.Expr
+ }
+ expr := e.Expr.Copy()
+ switch terms := expr.Terms.(type) {
+ case []*ast.Term:
+ if expr.IsEquality() {
+ terms[1] = e.B1.PlugNamespaced(terms[1], caller)
+ terms[2] = e.B2.PlugNamespaced(terms[2], caller)
+ } else {
+ for i := 1; i < len(terms); i++ {
+ terms[i] = e.B1.PlugNamespaced(terms[i], caller)
+ }
+ }
+ case *ast.Term:
+ expr.Terms = e.B1.PlugNamespaced(terms, caller)
+ }
+ for i := range expr.With {
+ expr.With[i].Value = e.B1.PlugNamespaced(expr.With[i].Value, caller)
+ }
+ return expr
+}
+
+// saveSupport contains additional partially evaluated policies that are part
+// of the output of partial evaluation.
+//
+// The support structure is accumulated as partial evaluation runs and then
+// considered complete once partial evaluation finishes (but not before). This
+// differs from partially evaluated queries which are considered complete as
+// soon as each one finishes.
+type saveSupport struct {
+ modules map[string]*ast.Module
+}
+
+func newSaveSupport() *saveSupport {
+ return &saveSupport{
+ modules: map[string]*ast.Module{},
+ }
+}
+
+func (s *saveSupport) List() []*ast.Module {
+ result := []*ast.Module{}
+ for _, module := range s.modules {
+ result = append(result, module)
+ }
+ return result
+}
+
+func (s *saveSupport) Exists(path ast.Ref) bool {
+ k := path[:len(path)-1].String()
+ module, ok := s.modules[k]
+ if !ok {
+ return false
+ }
+ name := ast.Var(path[len(path)-1].Value.(ast.String))
+ for _, rule := range module.Rules {
+ if rule.Head.Name.Equal(name) {
+ return true
+ }
+ }
+ return false
+}
+
+func (s *saveSupport) Insert(path ast.Ref, rule *ast.Rule) {
+ pkg := path[:len(path)-1]
+ k := pkg.String()
+ module, ok := s.modules[k]
+ if !ok {
+ module = &ast.Module{
+ Package: &ast.Package{
+ Path: pkg,
+ },
+ }
+ s.modules[k] = module
+ }
+ rule.Module = module
+ module.Rules = append(module.Rules, rule)
+}
+
+// saveRequired returns true if the statement x will result in some expressions
+// being saved. This check allows the evaluator to evaluate statements
+// completely during partial evaluation as long as they do not depend on any
+// kind of unknown value or statements that would generate saves.
+func saveRequired(c *ast.Compiler, ic *inliningControl, icIgnoreInternal bool, ss *saveSet, b *bindings, x interface{}, rec bool) bool {
+
+ var found bool
+
+ vis := ast.NewGenericVisitor(func(node interface{}) bool {
+ if found {
+ return found
+ }
+ switch node := node.(type) {
+ case *ast.Expr:
+ found = len(node.With) > 0 || ignoreExprDuringPartial(node)
+ case *ast.Term:
+ switch v := node.Value.(type) {
+ case ast.Var:
+ // Variables only need to be tested in the node from call site
+ // because once traversal recurses into a rule existing unknown
+ // variables are out-of-scope.
+ if !rec && ss.ContainsRecursive(node, b) {
+ found = true
+ }
+ case ast.Ref:
+ if ss.Contains(node, b) {
+ found = true
+ } else if ic.Disabled(v.ConstantPrefix(), icIgnoreInternal) {
+ found = true
+ } else {
+ for _, rule := range c.GetRulesDynamic(v) {
+ if saveRequired(c, ic, icIgnoreInternal, ss, b, rule, true) {
+ found = true
+ break
+ }
+ }
+ }
+ }
+ }
+ return found
+ })
+
+ vis.Walk(x)
+
+ return found
+}
+
+func ignoreExprDuringPartial(expr *ast.Expr) bool {
+ if !expr.IsCall() {
+ return false
+ }
+
+ bi, ok := ast.BuiltinMap[expr.Operator().String()]
+
+ return ok && ignoreDuringPartial(bi)
+}
+
+func ignoreDuringPartial(bi *ast.Builtin) bool {
+ for _, ignore := range ast.IgnoreDuringPartialEval {
+ if bi == ignore {
+ return true
+ }
+ }
+ return false
+}
+
+type inliningControl struct {
+ shallow bool
+ disable []disableInliningFrame
+}
+
+type disableInliningFrame struct {
+ internal bool
+ refs []ast.Ref
+}
+
+func (i *inliningControl) PushDisable(refs []ast.Ref, internal bool) {
+ if i == nil {
+ return
+ }
+ i.disable = append(i.disable, disableInliningFrame{
+ internal: internal,
+ refs: refs,
+ })
+}
+
+func (i *inliningControl) PopDisable() {
+ if i == nil {
+ return
+ }
+ i.disable = i.disable[:len(i.disable)-1]
+}
+
+func (i *inliningControl) Disabled(ref ast.Ref, ignoreInternal bool) bool {
+ if i == nil {
+ return false
+ }
+ for _, frame := range i.disable {
+ if !frame.internal || !ignoreInternal {
+ for _, other := range frame.refs {
+ if other.HasPrefix(ref) || ref.HasPrefix(other) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/semver.go b/vendor/github.com/open-policy-agent/opa/topdown/semver.go
new file mode 100644
index 00000000..b91a5a99
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/semver.go
@@ -0,0 +1,60 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/internal/semver"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinSemVerCompare(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ versionStringA, err := builtins.StringOperand(args[0].Value, 1)
+ if err != nil {
+ return err
+ }
+
+ versionStringB, err := builtins.StringOperand(args[1].Value, 2)
+ if err != nil {
+ return err
+ }
+
+ versionA, err := semver.NewVersion(string(versionStringA))
+ if err != nil {
+ return fmt.Errorf("operand 1: string %s is not a valid SemVer", versionStringA)
+ }
+ versionB, err := semver.NewVersion(string(versionStringB))
+ if err != nil {
+ return fmt.Errorf("operand 2: string %s is not a valid SemVer", versionStringB)
+ }
+
+ result := versionA.Compare(*versionB)
+
+ return iter(ast.IntNumberTerm(result))
+}
+
+func builtinSemVerIsValid(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ versionString, err := builtins.StringOperand(args[0].Value, 1)
+ if err != nil {
+ result := ast.BooleanTerm(false)
+ return iter(result)
+ }
+
+ result := true
+
+ _, err = semver.NewVersion(string(versionString))
+ if err != nil {
+ result = false
+ }
+
+ return iter(ast.BooleanTerm(result))
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.SemVerCompare.Name, builtinSemVerCompare)
+ RegisterBuiltinFunc(ast.SemVerIsValid.Name, builtinSemVerIsValid)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/sets.go b/vendor/github.com/open-policy-agent/opa/topdown/sets.go
new file mode 100644
index 00000000..a9c5ad86
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/sets.go
@@ -0,0 +1,84 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+// Deprecated in v0.4.2 in favour of minus/infix "-" operation.
+func builtinSetDiff(a, b ast.Value) (ast.Value, error) {
+
+ s1, err := builtins.SetOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ s2, err := builtins.SetOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return s1.Diff(s2), nil
+}
+
+// builtinSetIntersection returns the intersection of the given input sets
+func builtinSetIntersection(a ast.Value) (ast.Value, error) {
+
+ inputSet, err := builtins.SetOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ // empty input set
+ if inputSet.Len() == 0 {
+ return ast.NewSet(), nil
+ }
+
+ var result ast.Set
+
+ err = inputSet.Iter(func(x *ast.Term) error {
+ n, err := builtins.SetOperand(x.Value, 1)
+ if err != nil {
+ return err
+ }
+
+ if result == nil {
+ result = n
+ } else {
+ result = result.Intersect(n)
+ }
+ return nil
+ })
+ return result, err
+}
+
+// builtinSetUnion returns the union of the given input sets
+func builtinSetUnion(a ast.Value) (ast.Value, error) {
+
+ inputSet, err := builtins.SetOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ result := ast.NewSet()
+
+ err = inputSet.Iter(func(x *ast.Term) error {
+ n, err := builtins.SetOperand(x.Value, 1)
+ if err != nil {
+ return err
+ }
+ result = result.Union(n)
+ return nil
+ })
+ return result, err
+}
+
+func init() {
+ RegisterFunctionalBuiltin2(ast.SetDiff.Name, builtinSetDiff)
+ RegisterFunctionalBuiltin1(ast.Intersection.Name, builtinSetIntersection)
+ RegisterFunctionalBuiltin1(ast.Union.Name, builtinSetUnion)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/strings.go b/vendor/github.com/open-policy-agent/opa/topdown/strings.go
new file mode 100644
index 00000000..71d635cb
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/strings.go
@@ -0,0 +1,404 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+func builtinFormatInt(a, b ast.Value) (ast.Value, error) {
+
+ input, err := builtins.NumberOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ base, err := builtins.NumberOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ var format string
+ switch base {
+ case ast.Number("2"):
+ format = "%b"
+ case ast.Number("8"):
+ format = "%o"
+ case ast.Number("10"):
+ format = "%d"
+ case ast.Number("16"):
+ format = "%x"
+ default:
+ return nil, builtins.NewOperandEnumErr(2, "2", "8", "10", "16")
+ }
+
+ f := builtins.NumberToFloat(input)
+ i, _ := f.Int(nil)
+
+ return ast.String(fmt.Sprintf(format, i)), nil
+}
+
+func builtinConcat(a, b ast.Value) (ast.Value, error) {
+
+ join, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ strs := []string{}
+
+ switch b := b.(type) {
+ case *ast.Array:
+ err := b.Iter(func(x *ast.Term) error {
+ s, ok := x.Value.(ast.String)
+ if !ok {
+ return builtins.NewOperandElementErr(2, b, x.Value, "string")
+ }
+ strs = append(strs, string(s))
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ case ast.Set:
+ err := b.Iter(func(x *ast.Term) error {
+ s, ok := x.Value.(ast.String)
+ if !ok {
+ return builtins.NewOperandElementErr(2, b, x.Value, "string")
+ }
+ strs = append(strs, string(s))
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, builtins.NewOperandTypeErr(2, b, "set", "array")
+ }
+
+ return ast.String(strings.Join(strs, string(join))), nil
+}
+
+func builtinIndexOf(a, b ast.Value) (ast.Value, error) {
+ base, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ search, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ index := strings.Index(string(base), string(search))
+ return ast.IntNumberTerm(index).Value, nil
+}
+
+func builtinSubstring(a, b, c ast.Value) (ast.Value, error) {
+
+ base, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ runes := []rune(base)
+
+ startIndex, err := builtins.IntOperand(b, 2)
+ if err != nil {
+ return nil, err
+ } else if startIndex >= len(runes) {
+ return ast.String(""), nil
+ } else if startIndex < 0 {
+ return nil, fmt.Errorf("negative offset")
+ }
+
+ length, err := builtins.IntOperand(c, 3)
+ if err != nil {
+ return nil, err
+ }
+
+ var s ast.String
+ if length < 0 {
+ s = ast.String(runes[startIndex:])
+ } else {
+ upto := startIndex + length
+ if len(runes) < upto {
+ upto = len(runes)
+ }
+ s = ast.String(runes[startIndex:upto])
+ }
+
+ return s, nil
+}
+
+func builtinContains(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ substr, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.Boolean(strings.Contains(string(s), string(substr))), nil
+}
+
+func builtinStartsWith(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ prefix, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.Boolean(strings.HasPrefix(string(s), string(prefix))), nil
+}
+
+func builtinEndsWith(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ suffix, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.Boolean(strings.HasSuffix(string(s), string(suffix))), nil
+}
+
+func builtinLower(a ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.ToLower(string(s))), nil
+}
+
+func builtinUpper(a ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.ToUpper(string(s))), nil
+}
+
+func builtinSplit(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ d, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+ elems := strings.Split(string(s), string(d))
+ arr := make([]*ast.Term, len(elems))
+ for i := range elems {
+ arr[i] = ast.StringTerm(elems[i])
+ }
+ return ast.NewArray(arr...), nil
+}
+
+func builtinReplace(a, b, c ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ old, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ new, err := builtins.StringOperand(c, 3)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.Replace(string(s), string(old), string(new), -1)), nil
+}
+
+func builtinReplaceN(a, b ast.Value) (ast.Value, error) {
+ patterns, err := builtins.ObjectOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ keys := patterns.Keys()
+ sort.Slice(keys, func(i, j int) bool { return ast.Compare(keys[i].Value, keys[j].Value) < 0 })
+
+ s, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ var oldnewArr []string
+ for _, k := range keys {
+ keyVal, ok := k.Value.(ast.String)
+ if !ok {
+ return nil, builtins.NewOperandErr(1, "non-string key found in pattern object")
+ }
+ val := patterns.Get(k) // cannot be nil
+ strVal, ok := val.Value.(ast.String)
+ if !ok {
+ return nil, builtins.NewOperandErr(1, "non-string value found in pattern object")
+ }
+ oldnewArr = append(oldnewArr, string(keyVal), string(strVal))
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ r := strings.NewReplacer(oldnewArr...)
+ replaced := r.Replace(string(s))
+
+ return ast.String(replaced), nil
+}
+
+func builtinTrim(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.Trim(string(s), string(c))), nil
+}
+
+func builtinTrimLeft(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.TrimLeft(string(s), string(c))), nil
+}
+
+func builtinTrimPrefix(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ pre, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.TrimPrefix(string(s), string(pre))), nil
+}
+
+func builtinTrimRight(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.TrimRight(string(s), string(c))), nil
+}
+
+func builtinTrimSuffix(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ suf, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.TrimSuffix(string(s), string(suf))), nil
+}
+
+func builtinTrimSpace(a ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.String(strings.TrimSpace(string(s))), nil
+}
+
+func builtinSprintf(a, b ast.Value) (ast.Value, error) {
+ s, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ astArr, ok := b.(*ast.Array)
+ if !ok {
+ return nil, builtins.NewOperandTypeErr(2, b, "array")
+ }
+
+ args := make([]interface{}, astArr.Len())
+
+ for i := range args {
+ switch v := astArr.Elem(i).Value.(type) {
+ case ast.Number:
+ if n, ok := v.Int(); ok {
+ args[i] = n
+ } else if f, ok := v.Float64(); ok {
+ args[i] = f
+ } else {
+ args[i] = v.String()
+ }
+ case ast.String:
+ args[i] = string(v)
+ default:
+ args[i] = astArr.Elem(i).String()
+ }
+ }
+
+ return ast.String(fmt.Sprintf(string(s), args...)), nil
+}
+
+func init() {
+ RegisterFunctionalBuiltin2(ast.FormatInt.Name, builtinFormatInt)
+ RegisterFunctionalBuiltin2(ast.Concat.Name, builtinConcat)
+ RegisterFunctionalBuiltin2(ast.IndexOf.Name, builtinIndexOf)
+ RegisterFunctionalBuiltin3(ast.Substring.Name, builtinSubstring)
+ RegisterFunctionalBuiltin2(ast.Contains.Name, builtinContains)
+ RegisterFunctionalBuiltin2(ast.StartsWith.Name, builtinStartsWith)
+ RegisterFunctionalBuiltin2(ast.EndsWith.Name, builtinEndsWith)
+ RegisterFunctionalBuiltin1(ast.Upper.Name, builtinUpper)
+ RegisterFunctionalBuiltin1(ast.Lower.Name, builtinLower)
+ RegisterFunctionalBuiltin2(ast.Split.Name, builtinSplit)
+ RegisterFunctionalBuiltin3(ast.Replace.Name, builtinReplace)
+ RegisterFunctionalBuiltin2(ast.ReplaceN.Name, builtinReplaceN)
+ RegisterFunctionalBuiltin2(ast.Trim.Name, builtinTrim)
+ RegisterFunctionalBuiltin2(ast.TrimLeft.Name, builtinTrimLeft)
+ RegisterFunctionalBuiltin2(ast.TrimPrefix.Name, builtinTrimPrefix)
+ RegisterFunctionalBuiltin2(ast.TrimRight.Name, builtinTrimRight)
+ RegisterFunctionalBuiltin2(ast.TrimSuffix.Name, builtinTrimSuffix)
+ RegisterFunctionalBuiltin1(ast.TrimSpace.Name, builtinTrimSpace)
+ RegisterFunctionalBuiltin2(ast.Sprintf.Name, builtinSprintf)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/time.go b/vendor/github.com/open-policy-agent/opa/topdown/time.go
new file mode 100644
index 00000000..d8ed4e2a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/time.go
@@ -0,0 +1,213 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+var tzCache map[string]*time.Location
+var tzCacheMutex *sync.Mutex
+
+func builtinTimeNowNanos(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error {
+ return iter(bctx.Time)
+}
+
+func builtinTimeParseNanos(a, b ast.Value) (ast.Value, error) {
+
+ format, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ result, err := time.Parse(string(format), string(value))
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.Number(int64ToJSONNumber(result.UnixNano())), nil
+}
+
+func builtinTimeParseRFC3339Nanos(a ast.Value) (ast.Value, error) {
+
+ value, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ result, err := time.Parse(time.RFC3339, string(value))
+ if err != nil {
+ return nil, err
+ }
+
+ return ast.Number(int64ToJSONNumber(result.UnixNano())), nil
+}
+func builtinParseDurationNanos(a ast.Value) (ast.Value, error) {
+
+ duration, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ value, err := time.ParseDuration(string(duration))
+ if err != nil {
+ return nil, err
+ }
+ return ast.Number(int64ToJSONNumber(int64(value))), nil
+}
+
+func builtinDate(a ast.Value) (ast.Value, error) {
+ t, err := tzTime(a)
+ if err != nil {
+ return nil, err
+ }
+ year, month, day := t.Date()
+ result := ast.NewArray(ast.IntNumberTerm(year), ast.IntNumberTerm(int(month)), ast.IntNumberTerm(day))
+ return result, nil
+}
+
+func builtinClock(a ast.Value) (ast.Value, error) {
+ t, err := tzTime(a)
+ if err != nil {
+ return nil, err
+ }
+ hour, minute, second := t.Clock()
+ result := ast.NewArray(ast.IntNumberTerm(hour), ast.IntNumberTerm(minute), ast.IntNumberTerm(second))
+ return result, nil
+}
+
+func builtinWeekday(a ast.Value) (ast.Value, error) {
+ t, err := tzTime(a)
+ if err != nil {
+ return nil, err
+ }
+ weekday := t.Weekday().String()
+ return ast.String(weekday), nil
+}
+
+func builtinAddDate(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error {
+ t, err := tzTime(operands[0].Value)
+ if err != nil {
+ return err
+ }
+
+ years, err := builtins.IntOperand(operands[1].Value, 2)
+ if err != nil {
+ return err
+ }
+
+ months, err := builtins.IntOperand(operands[2].Value, 3)
+ if err != nil {
+ return err
+ }
+
+ days, err := builtins.IntOperand(operands[3].Value, 4)
+ if err != nil {
+ return err
+ }
+
+ result := t.AddDate(years, months, days)
+ return iter(ast.NewTerm(ast.Number(int64ToJSONNumber(result.UnixNano()))))
+}
+
+func tzTime(a ast.Value) (t time.Time, err error) {
+ var nVal ast.Value
+ loc := time.UTC
+
+ switch va := a.(type) {
+ case *ast.Array:
+ if va.Len() == 0 {
+ return time.Time{}, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]")
+ }
+
+ nVal, err = builtins.NumberOperand(va.Elem(0).Value, 1)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ if va.Len() > 1 {
+ tzVal, err := builtins.StringOperand(va.Elem(1).Value, 1)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ tzName := string(tzVal)
+
+ switch tzName {
+ case "", "UTC":
+ // loc is already UTC
+
+ case "Local":
+ loc = time.Local
+
+ default:
+ var ok bool
+
+ tzCacheMutex.Lock()
+ loc, ok = tzCache[tzName]
+
+ if !ok {
+ loc, err = time.LoadLocation(tzName)
+ if err != nil {
+ tzCacheMutex.Unlock()
+ return time.Time{}, err
+ }
+ tzCache[tzName] = loc
+ }
+ tzCacheMutex.Unlock()
+ }
+ }
+
+ case ast.Number:
+ nVal = a
+
+ default:
+ return time.Time{}, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]")
+ }
+
+ value, err := builtins.NumberOperand(nVal, 1)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ f := builtins.NumberToFloat(value)
+ i64, acc := f.Int64()
+ if acc != big.Exact {
+ return time.Time{}, fmt.Errorf("timestamp too big")
+ }
+
+ t = time.Unix(0, i64).In(loc)
+
+ return t, nil
+}
+
+func int64ToJSONNumber(i int64) json.Number {
+ return json.Number(strconv.FormatInt(i, 10))
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.NowNanos.Name, builtinTimeNowNanos)
+ RegisterFunctionalBuiltin1(ast.ParseRFC3339Nanos.Name, builtinTimeParseRFC3339Nanos)
+ RegisterFunctionalBuiltin2(ast.ParseNanos.Name, builtinTimeParseNanos)
+ RegisterFunctionalBuiltin1(ast.ParseDurationNanos.Name, builtinParseDurationNanos)
+ RegisterFunctionalBuiltin1(ast.Date.Name, builtinDate)
+ RegisterFunctionalBuiltin1(ast.Clock.Name, builtinClock)
+ RegisterFunctionalBuiltin1(ast.Weekday.Name, builtinWeekday)
+ RegisterBuiltinFunc(ast.AddDate.Name, builtinAddDate)
+ tzCacheMutex = &sync.Mutex{}
+ tzCache = make(map[string]*time.Location)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/tokens.go b/vendor/github.com/open-policy-agent/opa/topdown/tokens.go
new file mode 100644
index 00000000..fd30869b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/tokens.go
@@ -0,0 +1,1127 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/hmac"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/x509"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "hash"
+ "math/big"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/internal/jwx/jwk"
+ "github.com/open-policy-agent/opa/internal/jwx/jws"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+var (
+ jwtEncKey = ast.StringTerm("enc")
+ jwtCtyKey = ast.StringTerm("cty")
+ jwtAlgKey = ast.StringTerm("alg")
+ jwtIssKey = ast.StringTerm("iss")
+ jwtExpKey = ast.StringTerm("exp")
+ jwtNbfKey = ast.StringTerm("nbf")
+ jwtAudKey = ast.StringTerm("aud")
+)
+
+// JSONWebToken represent the 3 parts (header, payload & signature) of
+// a JWT in Base64.
+type JSONWebToken struct {
+ header string
+ payload string
+ signature string
+ decodedHeader ast.Object
+}
+
+// decodeHeader populates the decodedHeader field.
+func (token *JSONWebToken) decodeHeader() (err error) {
+ var h ast.Value
+ if h, err = builtinBase64UrlDecode(ast.String(token.header)); err != nil {
+ return fmt.Errorf("JWT header had invalid encoding: %v", err)
+ }
+ if token.decodedHeader, err = validateJWTHeader(string(h.(ast.String))); err != nil {
+ return err
+ }
+ return
+}
+
+// Implements JWT decoding/validation based on RFC 7519 Section 7.2:
+// https://tools.ietf.org/html/rfc7519#section-7.2
+// It does no data validation, it merely checks that the given string
+// represents a structurally valid JWT. It supports JWTs using JWS compact
+// serialization.
+func builtinJWTDecode(a ast.Value) (ast.Value, error) {
+ token, err := decodeJWT(a)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = token.decodeHeader(); err != nil {
+ return nil, err
+ }
+
+ p, err := builtinBase64UrlDecode(ast.String(token.payload))
+ if err != nil {
+ return nil, fmt.Errorf("JWT payload had invalid encoding: %v", err)
+ }
+
+ if cty := token.decodedHeader.Get(jwtCtyKey); cty != nil {
+ ctyVal := string(cty.Value.(ast.String))
+ // It is possible for the contents of a token to be another
+ // token as a result of nested signing or encryption. To handle
+ // the case where we are given a token such as this, we check
+ // the content type and recurse on the payload if the content
+ // is "JWT".
+ // When the payload is itself another encoded JWT, then its
+ // contents are quoted (behavior of https://jwt.io/). To fix
+ // this, remove leading and trailing quotes.
+ if ctyVal == "JWT" {
+ p, err = builtinTrim(p, ast.String(`"'`))
+ if err != nil {
+ panic("not reached")
+ }
+ return builtinJWTDecode(p)
+ }
+ }
+
+ payload, err := extractJSONObject(string(p.(ast.String)))
+ if err != nil {
+ return nil, err
+ }
+
+ s, err := builtinBase64UrlDecode(ast.String(token.signature))
+ if err != nil {
+ return nil, fmt.Errorf("JWT signature had invalid encoding: %v", err)
+ }
+ sign := hex.EncodeToString([]byte(s.(ast.String)))
+
+ arr := []*ast.Term{
+ ast.NewTerm(token.decodedHeader),
+ ast.NewTerm(payload),
+ ast.StringTerm(sign),
+ }
+
+ return ast.NewArray(arr...), nil
+}
+
+// Implements RS256 JWT signature verification
+func builtinJWTVerifyRS256(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(args[0].Value, args[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+ return rsa.VerifyPKCS1v15(
+ publicKey,
+ crypto.SHA256,
+ digest,
+ signature)
+ })
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements RS384 JWT signature verification
+func builtinJWTVerifyRS384(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(args[0].Value, args[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+ return rsa.VerifyPKCS1v15(
+ publicKey,
+ crypto.SHA384,
+ digest,
+ signature)
+ })
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements RS512 JWT signature verification
+func builtinJWTVerifyRS512(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(args[0].Value, args[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+ return rsa.VerifyPKCS1v15(
+ publicKey,
+ crypto.SHA512,
+ digest,
+ signature)
+ })
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements PS256 JWT signature verification
+func builtinJWTVerifyPS256(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(args[0].Value, args[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+ return rsa.VerifyPSS(
+ publicKey,
+ crypto.SHA256,
+ digest,
+ signature,
+ nil)
+ })
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements PS384 JWT signature verification
+func builtinJWTVerifyPS384(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(args[0].Value, args[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+ return rsa.VerifyPSS(
+ publicKey,
+ crypto.SHA384,
+ digest,
+ signature,
+ nil)
+ })
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements PS512 JWT signature verification
+func builtinJWTVerifyPS512(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerifyRSA(args[0].Value, args[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error {
+ return rsa.VerifyPSS(
+ publicKey,
+ crypto.SHA512,
+ digest,
+ signature,
+ nil)
+ })
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements RSA JWT signature verification.
+func builtinJWTVerifyRSA(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (ast.Value, error) {
+ return builtinJWTVerify(a, b, hasher, func(publicKey interface{}, digest []byte, signature []byte) error {
+ publicKeyRsa, ok := publicKey.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("incorrect public key type")
+ }
+ return verify(publicKeyRsa, digest, signature)
+ })
+}
+
+// Implements ES256 JWT signature verification.
+func builtinJWTVerifyES256(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerify(args[0].Value, args[1].Value, sha256.New, verifyES)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements ES384 JWT signature verification
+func builtinJWTVerifyES384(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerify(args[0].Value, args[1].Value, sha512.New384, verifyES)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+// Implements ES512 JWT signature verification
+func builtinJWTVerifyES512(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ result, err := builtinJWTVerify(args[0].Value, args[1].Value, sha512.New, verifyES)
+ if err == nil {
+ return iter(ast.NewTerm(result))
+ }
+ return err
+}
+
+func verifyES(publicKey interface{}, digest []byte, signature []byte) error {
+ publicKeyEcdsa, ok := publicKey.(*ecdsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("incorrect public key type")
+ }
+ r, s := &big.Int{}, &big.Int{}
+ n := len(signature) / 2
+ r.SetBytes(signature[:n])
+ s.SetBytes(signature[n:])
+ if ecdsa.Verify(publicKeyEcdsa, digest, r, s) {
+ return nil
+ }
+ return fmt.Errorf("ECDSA signature verification error")
+}
+
+// getKeyFromCertOrJWK returns the public key found in a X.509 certificate or JWK key(s).
+// A valid PEM block is never valid JSON (and vice versa), hence can try parsing both.
+func getKeyFromCertOrJWK(certificate string) ([]interface{}, error) {
+ if block, rest := pem.Decode([]byte(certificate)); block != nil {
+ if len(rest) > 0 {
+ return nil, fmt.Errorf("extra data after a PEM certificate block")
+ }
+
+ if block.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse a PEM certificate")
+ }
+
+ return []interface{}{cert.PublicKey}, nil
+ }
+
+ if block.Type == "PUBLIC KEY" {
+ key, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse a PEM public key")
+ }
+
+ return []interface{}{key}, nil
+ }
+
+ return nil, fmt.Errorf("failed to extract a Key from the PEM certificate")
+ }
+
+ jwks, err := jwk.ParseString(certificate)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to parse a JWK key (set)")
+ }
+
+ var keys []interface{}
+ for _, k := range jwks.Keys {
+ key, err := k.Materialize()
+ if err != nil {
+ return nil, err
+ }
+ keys = append(keys, key)
+ }
+
+ return keys, nil
+}
+
+// Implements JWT signature verification.
+func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey interface{}, digest []byte, signature []byte) error) (ast.Value, error) {
+ token, err := decodeJWT(a)
+ if err != nil {
+ return nil, err
+ }
+
+ s, err := builtins.StringOperand(b, 2)
+ if err != nil {
+ return nil, err
+ }
+
+ keys, err := getKeyFromCertOrJWK(string(s))
+ if err != nil {
+ return nil, err
+ }
+
+ signature, err := token.decodeSignature()
+ if err != nil {
+ return nil, err
+ }
+
+ // Validate the JWT signature
+ for _, key := range keys {
+ err = verify(key,
+ getInputSHA([]byte(token.header+"."+token.payload), hasher),
+ []byte(signature))
+
+ if err == nil {
+ return ast.Boolean(true), nil
+ }
+ }
+
+ // None of the keys worked, return false
+ return ast.Boolean(false), nil
+}
+
+// Implements HS256 (secret) JWT signature verification
+func builtinJWTVerifyHS256(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ // Decode the JSON Web Token
+ token, err := decodeJWT(args[0].Value)
+ if err != nil {
+ return err
+ }
+
+ // Process Secret input
+ astSecret, err := builtins.StringOperand(args[1].Value, 2)
+ if err != nil {
+ return err
+ }
+ secret := string(astSecret)
+
+ mac := hmac.New(sha256.New, []byte(secret))
+ _, err = mac.Write([]byte(token.header + "." + token.payload))
+ if err != nil {
+ return err
+ }
+
+ signature, err := token.decodeSignature()
+ if err != nil {
+ return err
+ }
+
+ return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil)))))
+}
+
+// Implements HS384 JWT signature verification
+func builtinJWTVerifyHS384(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ // Decode the JSON Web Token
+ token, err := decodeJWT(args[0].Value)
+ if err != nil {
+ return err
+ }
+
+ // Process Secret input
+ astSecret, err := builtins.StringOperand(args[1].Value, 2)
+ if err != nil {
+ return err
+ }
+ secret := string(astSecret)
+
+ mac := hmac.New(sha512.New384, []byte(secret))
+ _, err = mac.Write([]byte(token.header + "." + token.payload))
+ if err != nil {
+ return err
+ }
+
+ signature, err := token.decodeSignature()
+ if err != nil {
+ return err
+ }
+
+ return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil)))))
+}
+
+// Implements HS512 JWT signature verification
+func builtinJWTVerifyHS512(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ // Decode the JSON Web Token
+ token, err := decodeJWT(args[0].Value)
+ if err != nil {
+ return err
+ }
+
+ // Process Secret input
+ astSecret, err := builtins.StringOperand(args[1].Value, 2)
+ if err != nil {
+ return err
+ }
+ secret := string(astSecret)
+
+ mac := hmac.New(sha512.New, []byte(secret))
+ _, err = mac.Write([]byte(token.header + "." + token.payload))
+ if err != nil {
+ return err
+ }
+
+ signature, err := token.decodeSignature()
+ if err != nil {
+ return err
+ }
+
+ return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil)))))
+}
+
+// -- Full JWT verification and decoding --
+
+// Verification constraints. See tokens_test.go for unit tests.
+
+// tokenConstraints holds decoded JWT verification constraints.
+type tokenConstraints struct {
+ // The set of asymmetric keys we can verify with.
+ keys []interface{}
+
+ // The single symmetric key we will verify with.
+ secret string
+
+ // The algorithm that must be used to verify.
+ // If "", any algorithm is acceptable.
+ alg string
+
+ // The required issuer.
+ // If "", any issuer is acceptable.
+ iss string
+
+ // The required audience.
+ // If "", no audience is acceptable.
+ aud string
+
+ // The time to validate against, or -1 if no constraint set.
+ // (If unset, the current time will be used.)
+ time int64
+}
+
+// tokenConstraintHandler is the handler type for JWT verification constraints.
+type tokenConstraintHandler func(value ast.Value, parameters *tokenConstraints) (err error)
+
+// tokenConstraintTypes maps known JWT verification constraints to handlers.
+var tokenConstraintTypes = map[string]tokenConstraintHandler{
+ "cert": tokenConstraintCert,
+ "secret": func(value ast.Value, constraints *tokenConstraints) (err error) {
+ return tokenConstraintString("secret", value, &constraints.secret)
+ },
+ "alg": func(value ast.Value, constraints *tokenConstraints) (err error) {
+ return tokenConstraintString("alg", value, &constraints.alg)
+ },
+ "iss": func(value ast.Value, constraints *tokenConstraints) (err error) {
+ return tokenConstraintString("iss", value, &constraints.iss)
+ },
+ "aud": func(value ast.Value, constraints *tokenConstraints) (err error) {
+ return tokenConstraintString("aud", value, &constraints.aud)
+ },
+ "time": tokenConstraintTime,
+}
+
+// tokenConstraintCert handles the `cert` constraint.
+func tokenConstraintCert(value ast.Value, constraints *tokenConstraints) (err error) {
+ var s ast.String
+ var ok bool
+ if s, ok = value.(ast.String); !ok {
+ return fmt.Errorf("cert constraint: must be a string")
+ }
+
+ constraints.keys, err = getKeyFromCertOrJWK(string(s))
+ return
+}
+
+// tokenConstraintTime handles the `time` constraint.
+func tokenConstraintTime(value ast.Value, constraints *tokenConstraints) (err error) {
+ var time ast.Number
+ var ok bool
+ if time, ok = value.(ast.Number); !ok {
+ err = fmt.Errorf("token time constraint: must be a number")
+ return
+ }
+ var timeFloat float64
+ if timeFloat, err = strconv.ParseFloat(string(time), 64); err != nil {
+ err = fmt.Errorf("token time constraint: %v", err)
+ return
+ }
+ if timeFloat < 0 {
+ err = fmt.Errorf("token time constraint: must not be negative")
+ return
+ }
+ constraints.time = int64(timeFloat)
+ return
+}
+
+// tokenConstraintString handles string constraints.
+func tokenConstraintString(name string, value ast.Value, where *string) (err error) {
+ var av ast.String
+ var ok bool
+ if av, ok = value.(ast.String); !ok {
+ err = fmt.Errorf("%s constraint: must be a string", name)
+ return
+ }
+ *where = string(av)
+ return
+}
+
+// parseTokenConstraints parses the constraints argument.
+func parseTokenConstraints(a ast.Value) (constraints tokenConstraints, err error) {
+ constraints.time = -1
+ var o ast.Object
+ var ok bool
+ if o, ok = a.(ast.Object); !ok {
+ err = fmt.Errorf("token constraints must be object")
+ return
+ }
+ if err = o.Iter(func(k *ast.Term, v *ast.Term) (err error) {
+ var handler tokenConstraintHandler
+ var ok bool
+ name := string(k.Value.(ast.String))
+ if handler, ok = tokenConstraintTypes[name]; ok {
+ if err = handler(v.Value, &constraints); err != nil {
+ return
+ }
+ } else {
+ // Anything unknown is rejected.
+ err = fmt.Errorf("unknown token validation constraint: %s", name)
+ return
+ }
+ return
+ }); err != nil {
+ return
+ }
+ return
+}
+
+// validate validates the constraints argument.
+func (constraints *tokenConstraints) validate() (err error) {
+ keys := 0
+ if constraints.keys != nil {
+ keys++
+ }
+ if constraints.secret != "" {
+ keys++
+ }
+ if keys > 1 {
+ err = fmt.Errorf("duplicate key constraints")
+ return
+ }
+ if keys < 1 {
+ err = fmt.Errorf("no key constraint")
+ return
+ }
+ return
+}
+
+// verify verifies a JWT using the constraints and the algorithm from the header
+func (constraints *tokenConstraints) verify(kid, alg, header, payload, signature string) error {
+ // Construct the payload
+ plaintext := []byte(header)
+ plaintext = append(plaintext, []byte(".")...)
+ plaintext = append(plaintext, payload...)
+ // Look up the algorithm
+ var ok bool
+ var a tokenAlgorithm
+ a, ok = tokenAlgorithms[alg]
+ if !ok {
+ return fmt.Errorf("unknown JWS algorithm: %s", alg)
+ }
+ // If we're configured with asymmetric key(s) then only trust that
+ if constraints.keys != nil {
+ verified := false
+ for _, key := range constraints.keys {
+ err := a.verify(key, a.hash, plaintext, []byte(signature))
+ if err == nil {
+ verified = true
+ break
+ }
+ }
+ if !verified {
+ return errSignatureNotVerified
+ }
+ return nil
+ }
+ if constraints.secret != "" {
+ return a.verify([]byte(constraints.secret), a.hash, plaintext, []byte(signature))
+ }
+ // (*tokenConstraints)validate() should prevent this happening
+ return errors.New("unexpectedly found no keys to trust")
+}
+
+// validAudience checks the audience of the JWT.
+// It returns true if it meets the constraints and false otherwise.
+func (constraints *tokenConstraints) validAudience(aud ast.Value) (valid bool) {
+ var ok bool
+ var s ast.String
+ if s, ok = aud.(ast.String); ok {
+ return string(s) == constraints.aud
+ }
+ var a *ast.Array
+ if a, ok = aud.(*ast.Array); ok {
+ for i := 0; i < a.Len(); i++ {
+ if s, ok = a.Elem(i).Value.(ast.String); ok {
+ if string(s) == constraints.aud {
+ return true
+ }
+ } else {
+ // Ill-formed aud claim
+ return false
+ }
+ }
+ }
+ return false
+}
+
+// JWT algorithms
+
+type tokenVerifyFunction func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) (err error)
+type tokenVerifyAsymmetricFunction func(key interface{}, hash crypto.Hash, digest []byte, signature []byte) (err error)
+
+// jwtAlgorithm describes a JWS 'alg' value
+type tokenAlgorithm struct {
+ hash crypto.Hash
+ verify tokenVerifyFunction
+}
+
+// tokenAlgorithms is the known JWT algorithms
+var tokenAlgorithms = map[string]tokenAlgorithm{
+ "RS256": {crypto.SHA256, verifyAsymmetric(verifyRSAPKCS)},
+ "RS384": {crypto.SHA384, verifyAsymmetric(verifyRSAPKCS)},
+ "RS512": {crypto.SHA512, verifyAsymmetric(verifyRSAPKCS)},
+ "PS256": {crypto.SHA256, verifyAsymmetric(verifyRSAPSS)},
+ "PS384": {crypto.SHA384, verifyAsymmetric(verifyRSAPSS)},
+ "PS512": {crypto.SHA512, verifyAsymmetric(verifyRSAPSS)},
+ "ES256": {crypto.SHA256, verifyAsymmetric(verifyECDSA)},
+ "ES384": {crypto.SHA384, verifyAsymmetric(verifyECDSA)},
+ "ES512": {crypto.SHA512, verifyAsymmetric(verifyECDSA)},
+ "HS256": {crypto.SHA256, verifyHMAC},
+ "HS384": {crypto.SHA384, verifyHMAC},
+ "HS512": {crypto.SHA512, verifyHMAC},
+}
+
+// errSignatureNotVerified is returned when a signature cannot be verified.
+var errSignatureNotVerified = errors.New("signature not verified")
+
+func verifyHMAC(key interface{}, hash crypto.Hash, payload []byte, signature []byte) (err error) {
+ macKey, ok := key.([]byte)
+ if !ok {
+ return fmt.Errorf("incorrect symmetric key type")
+ }
+ mac := hmac.New(hash.New, macKey)
+ if _, err = mac.Write([]byte(payload)); err != nil {
+ return
+ }
+ if !hmac.Equal(signature, mac.Sum([]byte{})) {
+ err = errSignatureNotVerified
+ }
+ return
+}
+
+func verifyAsymmetric(verify tokenVerifyAsymmetricFunction) tokenVerifyFunction {
+ return func(key interface{}, hash crypto.Hash, payload []byte, signature []byte) (err error) {
+ h := hash.New()
+ h.Write(payload)
+ return verify(key, hash, h.Sum([]byte{}), signature)
+ }
+}
+
+func verifyRSAPKCS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) (err error) {
+ publicKeyRsa, ok := key.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("incorrect public key type")
+ }
+ if err = rsa.VerifyPKCS1v15(publicKeyRsa, hash, digest, signature); err != nil {
+ err = errSignatureNotVerified
+ }
+ return
+}
+
+func verifyRSAPSS(key interface{}, hash crypto.Hash, digest []byte, signature []byte) (err error) {
+ publicKeyRsa, ok := key.(*rsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("incorrect public key type")
+ }
+ if err = rsa.VerifyPSS(publicKeyRsa, hash, digest, signature, nil); err != nil {
+ err = errSignatureNotVerified
+ }
+ return
+}
+
+func verifyECDSA(key interface{}, hash crypto.Hash, digest []byte, signature []byte) (err error) {
+ publicKeyEcdsa, ok := key.(*ecdsa.PublicKey)
+ if !ok {
+ return fmt.Errorf("incorrect public key type")
+ }
+ r, s := &big.Int{}, &big.Int{}
+ n := len(signature) / 2
+ r.SetBytes(signature[:n])
+ s.SetBytes(signature[n:])
+ if ecdsa.Verify(publicKeyEcdsa, digest, r, s) {
+ return nil
+ }
+ return errSignatureNotVerified
+}
+
+// JWT header parsing and parameters. See tokens_test.go for unit tests.
+
+// tokenHeaderType represents a recognized JWT header field
+// tokenHeader is a parsed JWT header
+type tokenHeader struct {
+ alg string
+ kid string
+ typ string
+ cty string
+ crit map[string]bool
+ unknown []string
+}
+
+// tokenHeaderHandler handles a JWT header parameters
+type tokenHeaderHandler func(header *tokenHeader, value ast.Value) (err error)
+
+// tokenHeaderTypes maps known JWT header parameters to handlers
+var tokenHeaderTypes = map[string]tokenHeaderHandler{
+ "alg": func(header *tokenHeader, value ast.Value) (err error) {
+ return tokenHeaderString("alg", &header.alg, value)
+ },
+ "kid": func(header *tokenHeader, value ast.Value) (err error) {
+ return tokenHeaderString("kid", &header.kid, value)
+ },
+ "typ": func(header *tokenHeader, value ast.Value) (err error) {
+ return tokenHeaderString("typ", &header.typ, value)
+ },
+ "cty": func(header *tokenHeader, value ast.Value) (err error) {
+ return tokenHeaderString("cty", &header.cty, value)
+ },
+ "crit": tokenHeaderCrit,
+}
+
+// tokenHeaderCrit handles the 'crit' header parameter
+func tokenHeaderCrit(header *tokenHeader, value ast.Value) (err error) {
+ var ok bool
+ var v *ast.Array
+ if v, ok = value.(*ast.Array); !ok {
+ err = fmt.Errorf("crit: must be a list")
+ return
+ }
+ header.crit = map[string]bool{}
+ for i := 0; i < v.Len(); i++ {
+ var tv ast.String
+ if tv, ok = v.Elem(i).Value.(ast.String); !ok {
+ err = fmt.Errorf("crit: must be a list of strings")
+ return
+ }
+ header.crit[string(tv)] = true
+ }
+ if len(header.crit) == 0 {
+ err = fmt.Errorf("crit: must be a nonempty list") // 'MUST NOT' use the empty list
+ return
+ }
+ return
+}
+
+// tokenHeaderString handles string-format JWT header parameters
+func tokenHeaderString(name string, where *string, value ast.Value) (err error) {
+ var ok bool
+ var v ast.String
+ if v, ok = value.(ast.String); !ok {
+ err = fmt.Errorf("%s: must be a string", name)
+ return
+ }
+ *where = string(v)
+ return
+}
+
+// parseTokenHeader parses the JWT header.
+func parseTokenHeader(token *JSONWebToken) (header tokenHeader, err error) {
+ header.unknown = []string{}
+ if err = token.decodedHeader.Iter(func(k *ast.Term, v *ast.Term) (err error) {
+ ks := string(k.Value.(ast.String))
+ var ok bool
+ var handler tokenHeaderHandler
+ if handler, ok = tokenHeaderTypes[ks]; ok {
+ if err = handler(&header, v.Value); err != nil {
+ return
+ }
+ } else {
+ header.unknown = append(header.unknown, ks)
+ }
+ return
+ }); err != nil {
+ return
+ }
+ return
+}
+
+// validTokenHeader returns true if the JOSE header is valid, otherwise false.
+func (header *tokenHeader) valid() bool {
+ // RFC7515 s4.1.1 alg MUST be present
+ if header.alg == "" {
+ return false
+ }
+ // RFC7515 4.1.11 JWS is invalid if there is a critical parameter that we did not recognize
+ for _, u := range header.unknown {
+ if header.crit[u] {
+ return false
+ }
+ }
+ return true
+}
+
+func commonBuiltinJWTEncodeSign(inputHeaders, jwsPayload, jwkSrc string) (v ast.Value, err error) {
+
+ keys, err := jwk.ParseString(jwkSrc)
+ if err != nil {
+ return nil, err
+ }
+ key, err := keys.Keys[0].Materialize()
+ if err != nil {
+ return nil, err
+ }
+ if jwk.GetKeyTypeFromKey(key) != keys.Keys[0].GetKeyType() {
+ return nil, fmt.Errorf("JWK derived key type and keyType parameter do not match")
+ }
+
+ standardHeaders := &jws.StandardHeaders{}
+ jwsHeaders := []byte(inputHeaders)
+ err = json.Unmarshal(jwsHeaders, standardHeaders)
+ if err != nil {
+ return nil, err
+ }
+ alg := standardHeaders.GetAlgorithm()
+
+ if (standardHeaders.Type == "" || standardHeaders.Type == "JWT") && !json.Valid([]byte(jwsPayload)) {
+ return nil, fmt.Errorf("type is JWT but payload is not JSON")
+ }
+
+ // process payload and sign
+ var jwsCompact []byte
+ jwsCompact, err = jws.SignLiteral([]byte(jwsPayload), alg, key, jwsHeaders)
+ if err != nil {
+ return nil, err
+ }
+ return ast.String(jwsCompact), nil
+
+}
+
+func builtinJWTEncodeSign(a ast.Value, b ast.Value, c ast.Value) (v ast.Value, err error) {
+
+ jwkSrc := c.String()
+
+ inputHeaders := a.String()
+
+ jwsPayload := b.String()
+
+ return commonBuiltinJWTEncodeSign(inputHeaders, jwsPayload, jwkSrc)
+
+}
+
+func builtinJWTEncodeSignRaw(a ast.Value, b ast.Value, c ast.Value) (v ast.Value, err error) {
+
+ jwkSrc, err := builtins.StringOperand(c, 1)
+ if err != nil {
+ return nil, err
+ }
+ inputHeaders, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+ jwsPayload, err := builtins.StringOperand(b, 1)
+ if err != nil {
+ return nil, err
+ }
+ return commonBuiltinJWTEncodeSign(string(inputHeaders), string(jwsPayload), string(jwkSrc))
+}
+
+// Implements full JWT decoding, validation and verification.
+func builtinJWTDecodeVerify(a ast.Value, b ast.Value) (v ast.Value, err error) {
+ // io.jwt.decode_verify(string, constraints, [valid, header, payload])
+ //
+ // If valid is true then the signature verifies and all constraints are met.
+ // If valid is false then either the signature did not verify or some constrain
+ // was not met.
+ //
+ // Decoding errors etc are returned as errors.
+ arr := []*ast.Term{
+ ast.BooleanTerm(false), // by default, not verified
+ ast.NewTerm(ast.NewObject()),
+ ast.NewTerm(ast.NewObject()),
+ }
+ var constraints tokenConstraints
+ if constraints, err = parseTokenConstraints(b); err != nil {
+ return
+ }
+ if err = constraints.validate(); err != nil {
+ return
+ }
+ var token *JSONWebToken
+ var p ast.Value
+ for {
+ // RFC7519 7.2 #1-2 split into parts
+ if token, err = decodeJWT(a); err != nil {
+ return
+ }
+ // RFC7519 7.2 #3, #4, #6
+ if err = token.decodeHeader(); err != nil {
+ return
+ }
+ // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields
+ var header tokenHeader
+ if header, err = parseTokenHeader(token); err != nil {
+ return
+ }
+ if !header.valid() {
+ return ast.NewArray(arr...), nil
+ }
+ // Check constraints that impact signature verification.
+ if constraints.alg != "" && constraints.alg != header.alg {
+ return ast.NewArray(arr...), nil
+ }
+ // RFC7159 7.2 #7 verify the signature
+ var signature string
+ if signature, err = token.decodeSignature(); err != nil {
+ return
+ }
+ if err = constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil {
+ if err == errSignatureNotVerified {
+ return ast.NewArray(arr...), nil
+ }
+ return
+ }
+ // RFC7159 7.2 #9-10 decode the payload
+ if p, err = builtinBase64UrlDecode(ast.String(token.payload)); err != nil {
+ return nil, fmt.Errorf("JWT payload had invalid encoding: %v", err)
+ }
+ // RFC7159 7.2 #8 and 5.2 cty
+ if strings.ToUpper(header.cty) == "JWT" {
+ // Nested JWT, go round again
+ a = p
+ continue
+ } else {
+ // Non-nested JWT (or we've reached the bottom of the nesting).
+ break
+ }
+ }
+ var payload ast.Object
+ if payload, err = extractJSONObject(string(p.(ast.String))); err != nil {
+ return
+ }
+ // Check registered claim names against constraints or environment
+ // RFC7159 4.1.1 iss
+ if constraints.iss != "" {
+ if iss := payload.Get(jwtIssKey); iss != nil {
+ issVal := string(iss.Value.(ast.String))
+ if constraints.iss != issVal {
+ return ast.NewArray(arr...), nil
+ }
+ }
+ }
+ // RFC7159 4.1.3 aud
+ if aud := payload.Get(jwtAudKey); aud != nil {
+ if !constraints.validAudience(aud.Value) {
+ return ast.NewArray(arr...), nil
+ }
+ } else {
+ if constraints.aud != "" {
+ return ast.NewArray(arr...), nil
+ }
+ }
+ // RFC7159 4.1.4 exp
+ if exp := payload.Get(jwtExpKey); exp != nil {
+ if constraints.time < 0 {
+ constraints.time = time.Now().UnixNano()
+ }
+
+ // constraints.time is in nanoseconds but exp Value is in seconds
+ compareTime := ast.Number(strconv.FormatFloat(float64(constraints.time)/1000000000, 'g', -1, 64))
+
+ if ast.Compare(compareTime, exp.Value.(ast.Number)) != -1 {
+ return ast.NewArray(arr...), nil
+ }
+ }
+ // RFC7159 4.1.5 nbf
+ if nbf := payload.Get(jwtNbfKey); nbf != nil {
+ if constraints.time < 0 {
+ constraints.time = time.Now().UnixNano()
+ }
+
+ // constraints.time is in nanoseconds but nbf Value is in seconds
+ compareTime := ast.Number(strconv.FormatFloat(float64(constraints.time)/1000000000, 'g', -1, 64))
+
+ if ast.Compare(compareTime, nbf.Value.(ast.Number)) == -1 {
+ return ast.NewArray(arr...), nil
+ }
+ }
+ // Format the result
+ arr[0] = ast.BooleanTerm(true)
+ arr[1] = ast.NewTerm(token.decodedHeader)
+ arr[2] = ast.NewTerm(payload)
+ return ast.NewArray(arr...), nil
+}
+
+// -- Utilities --
+
+func decodeJWT(a ast.Value) (*JSONWebToken, error) {
+ // Parse the JSON Web Token
+ astEncode, err := builtins.StringOperand(a, 1)
+ if err != nil {
+ return nil, err
+ }
+
+ encoding := string(astEncode)
+ if !strings.Contains(encoding, ".") {
+ return nil, errors.New("encoded JWT had no period separators")
+ }
+
+ parts := strings.Split(encoding, ".")
+ if len(parts) != 3 {
+ return nil, fmt.Errorf("encoded JWT must have 3 sections, found %d", len(parts))
+ }
+
+ return &JSONWebToken{header: parts[0], payload: parts[1], signature: parts[2]}, nil
+}
+
+func (token *JSONWebToken) decodeSignature() (string, error) {
+ decodedSignature, err := builtinBase64UrlDecode(ast.String(token.signature))
+ if err != nil {
+ return "", err
+ }
+
+ signatureAst, err := builtins.StringOperand(decodedSignature, 1)
+ if err != nil {
+ return "", err
+ }
+ return string(signatureAst), err
+}
+
+// Extract, validate and return the JWT header as an ast.Object.
+func validateJWTHeader(h string) (ast.Object, error) {
+ header, err := extractJSONObject(h)
+ if err != nil {
+ return nil, fmt.Errorf("bad JWT header: %v", err)
+ }
+
+ // There are two kinds of JWT tokens, a JSON Web Signature (JWS) and
+ // a JSON Web Encryption (JWE). The latter is very involved, and we
+ // won't support it for now.
+ // This code checks which kind of JWT we are dealing with according to
+ // RFC 7516 Section 9: https://tools.ietf.org/html/rfc7516#section-9
+ if header.Get(jwtEncKey) != nil {
+ return nil, errors.New("JWT is a JWE object, which is not supported")
+ }
+
+ return header, nil
+}
+
+func extractJSONObject(s string) (ast.Object, error) {
+ // XXX: This code relies on undocumented behavior of Go's
+ // json.Unmarshal using the last occurrence of duplicate keys in a JSON
+ // Object. If duplicate keys are present in a JWT, the last must be
+ // used or the token rejected. Since detecting duplicates is tantamount
+ // to parsing it ourselves, we're relying on the Go implementation
+ // using the last occurring instance of the key, which is the behavior
+ // as of Go 1.8.1.
+ v, err := builtinJSONUnmarshal(ast.String(s))
+ if err != nil {
+ return nil, fmt.Errorf("invalid JSON: %v", err)
+ }
+
+ o, ok := v.(ast.Object)
+ if !ok {
+ return nil, errors.New("decoded JSON type was not an Object")
+ }
+
+ return o, nil
+}
+
+// getInputSha returns the SHA checksum of the input
+func getInputSHA(input []byte, h func() hash.Hash) (hash []byte) {
+ hasher := h()
+ hasher.Write(input)
+ return hasher.Sum(nil)
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.JWTDecode.Name, builtinJWTDecode)
+ RegisterBuiltinFunc(ast.JWTVerifyRS256.Name, builtinJWTVerifyRS256)
+ RegisterBuiltinFunc(ast.JWTVerifyRS384.Name, builtinJWTVerifyRS384)
+ RegisterBuiltinFunc(ast.JWTVerifyRS512.Name, builtinJWTVerifyRS512)
+ RegisterBuiltinFunc(ast.JWTVerifyPS256.Name, builtinJWTVerifyPS256)
+ RegisterBuiltinFunc(ast.JWTVerifyPS384.Name, builtinJWTVerifyPS384)
+ RegisterBuiltinFunc(ast.JWTVerifyPS512.Name, builtinJWTVerifyPS512)
+ RegisterBuiltinFunc(ast.JWTVerifyES256.Name, builtinJWTVerifyES256)
+ RegisterBuiltinFunc(ast.JWTVerifyES384.Name, builtinJWTVerifyES384)
+ RegisterBuiltinFunc(ast.JWTVerifyES512.Name, builtinJWTVerifyES512)
+ RegisterBuiltinFunc(ast.JWTVerifyHS256.Name, builtinJWTVerifyHS256)
+ RegisterBuiltinFunc(ast.JWTVerifyHS384.Name, builtinJWTVerifyHS384)
+ RegisterBuiltinFunc(ast.JWTVerifyHS512.Name, builtinJWTVerifyHS512)
+ RegisterFunctionalBuiltin2(ast.JWTDecodeVerify.Name, builtinJWTDecodeVerify)
+ RegisterFunctionalBuiltin3(ast.JWTEncodeSignRaw.Name, builtinJWTEncodeSignRaw)
+ RegisterFunctionalBuiltin3(ast.JWTEncodeSign.Name, builtinJWTEncodeSign)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/trace.go b/vendor/github.com/open-policy-agent/opa/topdown/trace.go
new file mode 100644
index 00000000..d2734c37
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/trace.go
@@ -0,0 +1,462 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+ "io"
+ "path/filepath"
+ "strings"
+
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/internal/lcss"
+ "github.com/open-policy-agent/opa/topdown/builtins"
+)
+
+const (
+ minLocationWidth = 5 // len("query")
+ maxIdealLocationWidth = 64
+ locationPadding = 4
+)
+
+// Op defines the types of tracing events.
+type Op string
+
+const (
+ // EnterOp is emitted when a new query is about to be evaluated.
+ EnterOp Op = "Enter"
+
+ // ExitOp is emitted when a query has evaluated to true.
+ ExitOp Op = "Exit"
+
+ // EvalOp is emitted when an expression is about to be evaluated.
+ EvalOp Op = "Eval"
+
+ // RedoOp is emitted when an expression, rule, or query is being re-evaluated.
+ RedoOp Op = "Redo"
+
+ // SaveOp is emitted when an expression is saved instead of evaluated
+ // during partial evaluation.
+ SaveOp Op = "Save"
+
+ // FailOp is emitted when an expression evaluates to false.
+ FailOp Op = "Fail"
+
+ // DuplicateOp is emitted when a query has produced a duplicate value. The search
+ // will stop at the point where the duplicate was emitted and backtrack.
+ DuplicateOp Op = "Duplicate"
+
+ // NoteOp is emitted when an expression invokes a tracing built-in function.
+ NoteOp Op = "Note"
+
+ // IndexOp is emitted during an expression evaluation to represent lookup
+ // matches.
+ IndexOp Op = "Index"
+
+ // WasmOp is emitted when resolving a ref using an external
+ // Resolver.
+ WasmOp Op = "Wasm"
+)
+
+// VarMetadata provides some user facing information about
+// a variable in some policy.
+type VarMetadata struct {
+ Name ast.Var `json:"name"`
+ Location *ast.Location `json:"location"`
+}
+
+// Event contains state associated with a tracing event.
+type Event struct {
+ Op Op // Identifies type of event.
+ Node ast.Node // Contains AST node relevant to the event.
+ Location *ast.Location // The location of the Node this event relates to.
+ QueryID uint64 // Identifies the query this event belongs to.
+ ParentID uint64 // Identifies the parent query this event belongs to.
+ Locals *ast.ValueMap // Contains local variable bindings from the query context. Nil if variables were not included in the trace event.
+ LocalMetadata map[ast.Var]VarMetadata // Contains metadata for the local variable bindings. Nil if variables were not included in the trace event.
+ Message string // Contains message for Note events.
+ Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations.
+}
+
+// HasRule returns true if the Event contains an ast.Rule.
+func (evt *Event) HasRule() bool {
+ _, ok := evt.Node.(*ast.Rule)
+ return ok
+}
+
+// HasBody returns true if the Event contains an ast.Body.
+func (evt *Event) HasBody() bool {
+ _, ok := evt.Node.(ast.Body)
+ return ok
+}
+
+// HasExpr returns true if the Event contains an ast.Expr.
+func (evt *Event) HasExpr() bool {
+ _, ok := evt.Node.(*ast.Expr)
+ return ok
+}
+
+// Equal returns true if this event is equal to the other event.
+func (evt *Event) Equal(other *Event) bool {
+ if evt.Op != other.Op {
+ return false
+ }
+ if evt.QueryID != other.QueryID {
+ return false
+ }
+ if evt.ParentID != other.ParentID {
+ return false
+ }
+ if !evt.equalNodes(other) {
+ return false
+ }
+ return evt.Locals.Equal(other.Locals)
+}
+
+func (evt *Event) String() string {
+ return fmt.Sprintf("%v %v %v (qid=%v, pqid=%v)", evt.Op, evt.Node, evt.Locals, evt.QueryID, evt.ParentID)
+}
+
+func (evt *Event) equalNodes(other *Event) bool {
+ switch a := evt.Node.(type) {
+ case ast.Body:
+ if b, ok := other.Node.(ast.Body); ok {
+ return a.Equal(b)
+ }
+ case *ast.Rule:
+ if b, ok := other.Node.(*ast.Rule); ok {
+ return a.Equal(b)
+ }
+ case *ast.Expr:
+ if b, ok := other.Node.(*ast.Expr); ok {
+ return a.Equal(b)
+ }
+ case nil:
+ return other.Node == nil
+ }
+ return false
+}
+
+// Tracer defines the interface for tracing in the top-down evaluation engine.
+// Deprecated: Use QueryTracer instead.
+type Tracer interface {
+ Enabled() bool
+ Trace(*Event)
+}
+
+// QueryTracer defines the interface for tracing in the top-down evaluation engine.
+// The implementation can provide additional configuration to modify the tracing
+// behavior for query evaluations.
+type QueryTracer interface {
+ Enabled() bool
+ TraceEvent(Event)
+ Config() TraceConfig
+}
+
+// TraceConfig defines some common configuration for Tracer implementations
+type TraceConfig struct {
+ PlugLocalVars bool // Indicate whether to plug local variable bindings before calling into the tracer.
+}
+
+// legacyTracer Implements the QueryTracer interface by wrapping an older Tracer instance.
+type legacyTracer struct {
+ t Tracer
+}
+
+func (l *legacyTracer) Enabled() bool {
+ return l.t.Enabled()
+}
+
+func (l *legacyTracer) Config() TraceConfig {
+ return TraceConfig{
+ PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables
+ }
+}
+
+func (l *legacyTracer) TraceEvent(evt Event) {
+ l.t.Trace(&evt)
+}
+
+// WrapLegacyTracer will create a new QueryTracer which wraps an
+// older Tracer instance.
+func WrapLegacyTracer(tracer Tracer) QueryTracer {
+ return &legacyTracer{t: tracer}
+}
+
+// BufferTracer implements the Tracer and QueryTracer interface by
+// simply buffering all events received.
+type BufferTracer []*Event
+
+// NewBufferTracer returns a new BufferTracer.
+func NewBufferTracer() *BufferTracer {
+ return &BufferTracer{}
+}
+
+// Enabled always returns true if the BufferTracer is instantiated.
+func (b *BufferTracer) Enabled() bool {
+ if b == nil {
+ return false
+ }
+ return true
+}
+
+// Trace adds the event to the buffer.
+// Deprecated: Use TraceEvent instead.
+func (b *BufferTracer) Trace(evt *Event) {
+ *b = append(*b, evt)
+}
+
+// TraceEvent adds the event to the buffer.
+func (b *BufferTracer) TraceEvent(evt Event) {
+ *b = append(*b, &evt)
+}
+
+// Config returns the Tracers standard configuration
+func (b *BufferTracer) Config() TraceConfig {
+ return TraceConfig{PlugLocalVars: true}
+}
+
+// PrettyTrace pretty prints the trace to the writer.
+func PrettyTrace(w io.Writer, trace []*Event) {
+ depths := depths{}
+ for _, event := range trace {
+ depth := depths.GetOrSet(event.QueryID, event.ParentID)
+ fmt.Fprintln(w, formatEvent(event, depth))
+ }
+}
+
+// PrettyTraceWithLocation prints the trace to the writer and includes location information
+func PrettyTraceWithLocation(w io.Writer, trace []*Event) {
+ depths := depths{}
+
+ filePathAliases, longest := getShortenedFileNames(trace)
+
+ // Always include some padding between the trace and location
+ locationWidth := longest + locationPadding
+
+ for _, event := range trace {
+ depth := depths.GetOrSet(event.QueryID, event.ParentID)
+ location := formatLocation(event, filePathAliases)
+ fmt.Fprintf(w, "%-*s %s\n", locationWidth, location, formatEvent(event, depth))
+ }
+}
+
+func formatEvent(event *Event, depth int) string {
+ padding := formatEventPadding(event, depth)
+ if event.Op == NoteOp {
+ return fmt.Sprintf("%v%v %q", padding, event.Op, event.Message)
+ }
+
+ var details interface{}
+ if node, ok := event.Node.(*ast.Rule); ok {
+ details = node.Path()
+ } else if event.Ref != nil {
+ details = event.Ref
+ } else {
+ details = rewrite(event).Node
+ }
+
+ template := "%v%v %v"
+ opts := []interface{}{padding, event.Op, details}
+
+ if event.Message != "" {
+ template += " %v"
+ opts = append(opts, event.Message)
+ }
+
+ return fmt.Sprintf(template, opts...)
+}
+
+func formatEventPadding(event *Event, depth int) string {
+ spaces := formatEventSpaces(event, depth)
+ padding := ""
+ if spaces > 1 {
+ padding += strings.Repeat("| ", spaces-1)
+ }
+ return padding
+}
+
+func formatEventSpaces(event *Event, depth int) int {
+ switch event.Op {
+ case EnterOp:
+ return depth
+ case RedoOp:
+ if _, ok := event.Node.(*ast.Expr); !ok {
+ return depth
+ }
+ }
+ return depth + 1
+}
+
+// getShortenedFileNames will return a map of file paths to shortened aliases
+// that were found in the trace. It also returns the longest location expected
+func getShortenedFileNames(trace []*Event) (map[string]string, int) {
+ // Get a deduplicated list of all file paths
+ // and the longest file path size
+ fpAliases := map[string]string{}
+ var canShorten [][]byte
+ longestLocation := 0
+ for _, event := range trace {
+ if event.Location != nil {
+ if event.Location.File != "" {
+ // length of ":"
+ curLen := len(event.Location.File) + numDigits10(event.Location.Row) + 1
+ if curLen > longestLocation {
+ longestLocation = curLen
+ }
+
+ if _, ok := fpAliases[event.Location.File]; ok {
+ continue
+ }
+
+ // Only try and shorten the middle parts of paths, ex: bundle1/.../a/b/policy.rego
+ path := filepath.Dir(event.Location.File)
+ path = strings.TrimPrefix(path, string(filepath.Separator))
+ firstSlash := strings.IndexRune(path, filepath.Separator)
+ if firstSlash > 0 {
+ path = path[firstSlash+1:]
+ }
+ canShorten = append(canShorten, []byte(path))
+
+ // Default to just alias their full path
+ fpAliases[event.Location.File] = event.Location.File
+ } else {
+ // length of ":"
+ curLen := minLocationWidth + numDigits10(event.Location.Row) + 1
+ if curLen > longestLocation {
+ longestLocation = curLen
+ }
+ }
+ }
+ }
+
+ if len(canShorten) > 0 && longestLocation > maxIdealLocationWidth {
+ // Find the longest common path segment..
+ var lcs string
+ if len(canShorten) > 1 {
+ lcs = string(lcss.LongestCommonSubstring(canShorten...))
+ } else {
+ lcs = string(canShorten[0])
+ }
+
+ // Don't just swap in the full LCSS, trim it down to be the least amount of
+ // characters to reach our "ideal" width boundary giving as much
+ // detail as possible without going too long.
+ diff := maxIdealLocationWidth - (longestLocation - len(lcs) + 3)
+ if diff > 0 {
+ if diff > len(lcs) {
+ lcs = ""
+ } else {
+ // Favor data on the right hand side of the path
+ lcs = lcs[:len(lcs)-diff]
+ }
+ }
+
+ // Swap in "..." for the longest common path, but if it makes things better
+ if len(lcs) > 3 {
+ for path := range fpAliases {
+ fpAliases[path] = strings.Replace(path, lcs, "...", 1)
+ }
+
+ // Drop the overall length down to match our substitution
+ longestLocation = longestLocation - (len(lcs) - 3)
+ }
+ }
+
+ return fpAliases, longestLocation
+}
+
+func numDigits10(n int) int {
+ if n < 10 {
+ return 1
+ }
+ return numDigits10(n/10) + 1
+}
+
+func formatLocation(event *Event, fileAliases map[string]string) string {
+
+ location := event.Location
+ if location == nil {
+ return ""
+ }
+
+ if location.File == "" {
+ return fmt.Sprintf("query:%v", location.Row)
+ }
+
+ return fmt.Sprintf("%v:%v", fileAliases[location.File], location.Row)
+}
+
+// depths is a helper for computing the depth of an event. Events within the
+// same query all have the same depth. The depth of query is
+// depth(parent(query))+1.
+type depths map[uint64]int
+
+func (ds depths) GetOrSet(qid uint64, pqid uint64) int {
+ depth := ds[qid]
+ if depth == 0 {
+ depth = ds[pqid]
+ depth++
+ ds[qid] = depth
+ }
+ return depth
+}
+
+func builtinTrace(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+
+ str, err := builtins.StringOperand(args[0].Value, 1)
+ if err != nil {
+ return handleBuiltinErr(ast.Trace.Name, bctx.Location, err)
+ }
+
+ if !bctx.TraceEnabled {
+ return iter(ast.BooleanTerm(true))
+ }
+
+ evt := Event{
+ Op: NoteOp,
+ Location: bctx.Location,
+ QueryID: bctx.QueryID,
+ ParentID: bctx.ParentID,
+ Message: string(str),
+ }
+
+ for i := range bctx.QueryTracers {
+ bctx.QueryTracers[i].TraceEvent(evt)
+ }
+
+ return iter(ast.BooleanTerm(true))
+}
+
+func rewrite(event *Event) *Event {
+
+ cpy := *event
+
+ var node ast.Node
+
+ switch v := event.Node.(type) {
+ case *ast.Expr:
+ node = v.Copy()
+ case ast.Body:
+ node = v.Copy()
+ case *ast.Rule:
+ node = v.Copy()
+ }
+
+ ast.TransformVars(node, func(v ast.Var) (ast.Value, error) {
+ if meta, ok := cpy.LocalMetadata[v]; ok {
+ return meta.Name, nil
+ }
+ return v, nil
+ })
+
+ cpy.Node = node
+
+ return &cpy
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.Trace.Name, builtinTrace)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type.go b/vendor/github.com/open-policy-agent/opa/topdown/type.go
new file mode 100644
index 00000000..2fe7a39a
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/type.go
@@ -0,0 +1,82 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+)
+
+func builtinIsNumber(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case ast.Number:
+ return ast.Boolean(true), nil
+ default:
+ return nil, BuiltinEmpty{}
+ }
+}
+
+func builtinIsString(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case ast.String:
+ return ast.Boolean(true), nil
+ default:
+ return nil, BuiltinEmpty{}
+ }
+}
+
+func builtinIsBoolean(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case ast.Boolean:
+ return ast.Boolean(true), nil
+ default:
+ return nil, BuiltinEmpty{}
+ }
+}
+
+func builtinIsArray(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case *ast.Array:
+ return ast.Boolean(true), nil
+ default:
+ return nil, BuiltinEmpty{}
+ }
+}
+
+func builtinIsSet(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case ast.Set:
+ return ast.Boolean(true), nil
+ default:
+ return nil, BuiltinEmpty{}
+ }
+}
+
+func builtinIsObject(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case ast.Object:
+ return ast.Boolean(true), nil
+ default:
+ return nil, BuiltinEmpty{}
+ }
+}
+
+func builtinIsNull(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case ast.Null:
+ return ast.Boolean(true), nil
+ default:
+ return nil, BuiltinEmpty{}
+ }
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.IsNumber.Name, builtinIsNumber)
+ RegisterFunctionalBuiltin1(ast.IsString.Name, builtinIsString)
+ RegisterFunctionalBuiltin1(ast.IsBoolean.Name, builtinIsBoolean)
+ RegisterFunctionalBuiltin1(ast.IsArray.Name, builtinIsArray)
+ RegisterFunctionalBuiltin1(ast.IsSet.Name, builtinIsSet)
+ RegisterFunctionalBuiltin1(ast.IsObject.Name, builtinIsObject)
+ RegisterFunctionalBuiltin1(ast.IsNull.Name, builtinIsNull)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/type_name.go b/vendor/github.com/open-policy-agent/opa/topdown/type_name.go
new file mode 100644
index 00000000..2154abb5
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/type_name.go
@@ -0,0 +1,36 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "fmt"
+
+ "github.com/open-policy-agent/opa/ast"
+)
+
+func builtinTypeName(a ast.Value) (ast.Value, error) {
+ switch a.(type) {
+ case ast.Null:
+ return ast.String("null"), nil
+ case ast.Boolean:
+ return ast.String("boolean"), nil
+ case ast.Number:
+ return ast.String("number"), nil
+ case ast.String:
+ return ast.String("string"), nil
+ case *ast.Array:
+ return ast.String("array"), nil
+ case ast.Object:
+ return ast.String("object"), nil
+ case ast.Set:
+ return ast.String("set"), nil
+ }
+
+ return nil, fmt.Errorf("illegal value")
+}
+
+func init() {
+ RegisterFunctionalBuiltin1(ast.TypeNameBuiltin.Name, builtinTypeName)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/uuid.go b/vendor/github.com/open-policy-agent/opa/topdown/uuid.go
new file mode 100644
index 00000000..9e41c6d7
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/uuid.go
@@ -0,0 +1,37 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/internal/uuid"
+)
+
+type uuidCachingKey string
+
+func builtinUUIDRFC4122(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+
+ var result *ast.Term
+ var key = uuidCachingKey(args[0].Value.String())
+
+ if val, ok := bctx.Cache.Get(key); !ok {
+ s, err := uuid.New(bctx.Seed)
+ if err != nil {
+ return err
+ }
+
+ result = ast.NewTerm(ast.String(s))
+ bctx.Cache.Put(key, result)
+
+ } else {
+ result = val.(*ast.Term)
+ }
+
+ return iter(result)
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.UUIDRFC4122.Name, builtinUUIDRFC4122)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/topdown/walk.go b/vendor/github.com/open-policy-agent/opa/topdown/walk.go
new file mode 100644
index 00000000..520098f8
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/topdown/walk.go
@@ -0,0 +1,96 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package topdown
+
+import (
+ "github.com/open-policy-agent/opa/ast"
+)
+
+func evalWalk(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
+ input := args[0]
+ filter := getOutputPath(args)
+ return walk(filter, nil, input, iter)
+}
+
+func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error {
+
+ if filter == nil || filter.Len() == 0 {
+ if path == nil {
+ path = ast.NewArray()
+ }
+
+ if err := iter(ast.ArrayTerm(ast.NewTerm(path), input)); err != nil {
+ return err
+ }
+ }
+
+ if filter != nil && filter.Len() > 0 {
+ key := filter.Elem(0)
+ filter = filter.Slice(1, -1)
+ if key.IsGround() {
+ if term := input.Get(key); term != nil {
+ path = pathAppend(path, key)
+ return walk(filter, path, term, iter)
+ }
+ return nil
+ }
+ }
+
+ switch v := input.Value.(type) {
+ case *ast.Array:
+ for i := 0; i < v.Len(); i++ {
+ path = pathAppend(path, ast.IntNumberTerm(i))
+ if err := walk(filter, path, v.Elem(i), iter); err != nil {
+ return err
+ }
+ path = path.Slice(0, path.Len()-1)
+ }
+ case ast.Object:
+ return v.Iter(func(k, v *ast.Term) error {
+ path = pathAppend(path, k)
+ if err := walk(filter, path, v, iter); err != nil {
+ return err
+ }
+ path = path.Slice(0, path.Len()-1)
+ return nil
+ })
+ case ast.Set:
+ return v.Iter(func(elem *ast.Term) error {
+ path = pathAppend(path, elem)
+ if err := walk(filter, path, elem, iter); err != nil {
+ return err
+ }
+ path = path.Slice(0, path.Len()-1)
+ return nil
+ })
+ }
+
+ return nil
+}
+
+func pathAppend(path *ast.Array, key *ast.Term) *ast.Array {
+ if path == nil {
+ return ast.NewArray(key)
+ }
+
+ return path.Append(key)
+}
+
+func getOutputPath(args []*ast.Term) *ast.Array {
+ if len(args) == 2 {
+ if arr, ok := args[1].Value.(*ast.Array); ok {
+ if arr.Len() == 2 {
+ if path, ok := arr.Elem(0).Value.(*ast.Array); ok {
+ return path
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func init() {
+ RegisterBuiltinFunc(ast.WalkBuiltin.Name, evalWalk)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/types/decode.go b/vendor/github.com/open-policy-agent/opa/types/decode.go
new file mode 100644
index 00000000..e6eb540b
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/types/decode.go
@@ -0,0 +1,165 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+// Unmarshal deserializes bs and returns the resulting type.
+func Unmarshal(bs []byte) (result Type, err error) {
+
+ var hint rawtype
+
+ if err = util.UnmarshalJSON(bs, &hint); err == nil {
+ switch hint.Type {
+ case "null":
+ result = NewNull()
+ case "boolean":
+ result = NewBoolean()
+ case "number":
+ result = NewNumber()
+ case "string":
+ result = NewString()
+ case "array":
+ var arr rawarray
+ if err = util.UnmarshalJSON(bs, &arr); err == nil {
+ var err error
+ var static []Type
+ var dynamic Type
+ if static, err = unmarshalSlice(arr.Static); err != nil {
+ return nil, err
+ }
+ if len(arr.Dynamic) != 0 {
+ if dynamic, err = Unmarshal(arr.Dynamic); err != nil {
+ return nil, err
+ }
+ }
+ result = NewArray(static, dynamic)
+ }
+ case "object":
+ var obj rawobject
+ if err = util.UnmarshalJSON(bs, &obj); err == nil {
+ var err error
+ var static []*StaticProperty
+ var dynamic *DynamicProperty
+ if static, err = unmarshalStaticPropertySlice(obj.Static); err != nil {
+ return nil, err
+ }
+ if dynamic, err = unmarshalDynamicProperty(obj.Dynamic); err != nil {
+ return nil, err
+ }
+ result = NewObject(static, dynamic)
+ }
+ case "set":
+ var set rawset
+ if err = util.UnmarshalJSON(bs, &set); err == nil {
+ var of Type
+ if of, err = Unmarshal(set.Of); err == nil {
+ result = NewSet(of)
+ }
+ }
+ case "any":
+ var any rawunion
+ if err = util.UnmarshalJSON(bs, &any); err == nil {
+ var of []Type
+ if of, err = unmarshalSlice(any.Of); err == nil {
+ result = NewAny(of...)
+ }
+ }
+ case "function":
+ var decl rawdecl
+ if err = util.UnmarshalJSON(bs, &decl); err == nil {
+ var args []Type
+ if args, err = unmarshalSlice(decl.Args); err == nil {
+ var ret Type
+ if ret, err = Unmarshal(decl.Result); err == nil {
+ result = NewFunction(args, ret)
+ }
+ }
+ }
+ default:
+ err = fmt.Errorf("unsupported type '%v'", hint.Type)
+ }
+ }
+
+ return result, err
+}
+
+type rawtype struct {
+ Type string `json:"type"`
+}
+
+type rawarray struct {
+ Static []json.RawMessage `json:"static"`
+ Dynamic json.RawMessage `json:"dynamic"`
+}
+
+type rawobject struct {
+ Static []rawstaticproperty `json:"static"`
+ Dynamic rawdynamicproperty `json:"dynamic"`
+}
+
+type rawstaticproperty struct {
+ Key interface{} `json:"key"`
+ Value json.RawMessage `json:"value"`
+}
+
+type rawdynamicproperty struct {
+ Key json.RawMessage `json:"key"`
+ Value json.RawMessage `json:"value"`
+}
+
+type rawset struct {
+ Of json.RawMessage `json:"of"`
+}
+
+type rawunion struct {
+ Of []json.RawMessage `json:"of"`
+}
+
+type rawdecl struct {
+ Args []json.RawMessage `json:"args"`
+ Result json.RawMessage `json:"result"`
+}
+
+func unmarshalSlice(elems []json.RawMessage) (result []Type, err error) {
+ result = make([]Type, len(elems))
+ for i := range elems {
+ if result[i], err = Unmarshal(elems[i]); err != nil {
+ return nil, err
+ }
+ }
+ return result, err
+}
+
+func unmarshalStaticPropertySlice(elems []rawstaticproperty) (result []*StaticProperty, err error) {
+ result = make([]*StaticProperty, len(elems))
+ for i := range elems {
+ value, err := Unmarshal(elems[i].Value)
+ if err != nil {
+ return nil, err
+ }
+ result[i] = NewStaticProperty(elems[i].Key, value)
+ }
+ return result, err
+}
+
+func unmarshalDynamicProperty(x rawdynamicproperty) (result *DynamicProperty, err error) {
+ if len(x.Key) == 0 {
+ return nil, nil
+ }
+ var key Type
+ if key, err = Unmarshal(x.Key); err == nil {
+ var value Type
+ if value, err = Unmarshal(x.Value); err == nil {
+ return NewDynamicProperty(key, value), nil
+ }
+ }
+ return nil, err
+}
diff --git a/vendor/github.com/open-policy-agent/opa/types/types.go b/vendor/github.com/open-policy-agent/opa/types/types.go
new file mode 100644
index 00000000..b364241e
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/types/types.go
@@ -0,0 +1,898 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package types declares data types for Rego values and helper functions to
+// operate on these types.
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/open-policy-agent/opa/util"
+)
+
+// Sprint returns the string representation of the type.
+func Sprint(x Type) string {
+ if x == nil {
+ return "???"
+ }
+ return x.String()
+}
+
+// Type represents a type of a term in the language.
+type Type interface {
+ String() string
+ typeMarker() string
+ json.Marshaler
+}
+
+func (Null) typeMarker() string { return "null" }
+func (Boolean) typeMarker() string { return "boolean" }
+func (Number) typeMarker() string { return "number" }
+func (String) typeMarker() string { return "string" }
+func (*Array) typeMarker() string { return "array" }
+func (*Object) typeMarker() string { return "object" }
+func (*Set) typeMarker() string { return "set" }
+func (Any) typeMarker() string { return "any" }
+func (Function) typeMarker() string { return "function" }
+
+// Null represents the null type.
+type Null struct{}
+
+// NewNull returns a new Null type.
+func NewNull() Null {
+ return Null{}
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t Null) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "type": t.typeMarker(),
+ })
+}
+
+func (t Null) String() string {
+ return "null"
+}
+
+// Boolean represents the boolean type.
+type Boolean struct{}
+
+// B represents an instance of the boolean type.
+var B = NewBoolean()
+
+// NewBoolean returns a new Boolean type.
+func NewBoolean() Boolean {
+ return Boolean{}
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t Boolean) MarshalJSON() ([]byte, error) {
+ repr := map[string]interface{}{
+ "type": t.typeMarker(),
+ }
+ return json.Marshal(repr)
+}
+
+func (t Boolean) String() string {
+ return t.typeMarker()
+}
+
+// String represents the string type.
+type String struct{}
+
+// S represents an instance of the string type.
+var S = NewString()
+
+// NewString returns a new String type.
+func NewString() String {
+ return String{}
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t String) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "type": t.typeMarker(),
+ })
+}
+
+func (t String) String() string {
+ return "string"
+}
+
+// Number represents the number type.
+type Number struct{}
+
+// N represents an instance of the number type.
+var N = NewNumber()
+
+// NewNumber returns a new Number type.
+func NewNumber() Number {
+ return Number{}
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t Number) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "type": t.typeMarker(),
+ })
+}
+
+func (Number) String() string {
+ return "number"
+}
+
+// Array represents the array type.
+type Array struct {
+ static []Type // static items
+ dynamic Type // dynamic items
+}
+
+// NewArray returns a new Array type.
+func NewArray(static []Type, dynamic Type) *Array {
+ return &Array{
+ static: static,
+ dynamic: dynamic,
+ }
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t *Array) MarshalJSON() ([]byte, error) {
+ repr := map[string]interface{}{
+ "type": t.typeMarker(),
+ }
+ if len(t.static) != 0 {
+ repr["static"] = t.static
+ }
+ if t.dynamic != nil {
+ repr["dynamic"] = t.dynamic
+ }
+ return json.Marshal(repr)
+}
+
+func (t *Array) String() string {
+ prefix := "array"
+ buf := []string{}
+ for _, tpe := range t.static {
+ buf = append(buf, Sprint(tpe))
+ }
+ var repr = prefix
+ if len(buf) > 0 {
+ repr += "<" + strings.Join(buf, ", ") + ">"
+ }
+ if t.dynamic != nil {
+ repr += "[" + t.dynamic.String() + "]"
+ }
+ return repr
+}
+
+// Dynamic returns the type of the array's dynamic elements.
+func (t *Array) Dynamic() Type {
+ return t.dynamic
+}
+
+// Len returns the number of static array elements.
+func (t *Array) Len() int {
+ return len(t.static)
+}
+
+// Select returns the type of element at the zero-based pos.
+func (t *Array) Select(pos int) Type {
+ if pos >= 0 {
+ if len(t.static) > pos {
+ return t.static[pos]
+ }
+ if t.dynamic != nil {
+ return t.dynamic
+ }
+ }
+ return nil
+}
+
+// Set represents the set type.
+type Set struct {
+ of Type
+}
+
+// NewSet returns a new Set type.
+func NewSet(of Type) *Set {
+ return &Set{
+ of: of,
+ }
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t *Set) MarshalJSON() ([]byte, error) {
+ repr := map[string]interface{}{
+ "type": t.typeMarker(),
+ }
+ if t.of != nil {
+ repr["of"] = t.of
+ }
+ return json.Marshal(repr)
+}
+
+func (t *Set) String() string {
+ prefix := "set"
+ return prefix + "[" + Sprint(t.of) + "]"
+}
+
+// StaticProperty represents a static object property.
+type StaticProperty struct {
+ Key interface{}
+ Value Type
+}
+
+// NewStaticProperty returns a new StaticProperty object.
+func NewStaticProperty(key interface{}, value Type) *StaticProperty {
+ return &StaticProperty{
+ Key: key,
+ Value: value,
+ }
+}
+
+// MarshalJSON returns the JSON encoding of p.
+func (p *StaticProperty) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "key": p.Key,
+ "value": p.Value,
+ })
+}
+
+// DynamicProperty represents a dynamic object property.
+type DynamicProperty struct {
+ Key Type
+ Value Type
+}
+
+// NewDynamicProperty returns a new DynamicProperty object.
+func NewDynamicProperty(key, value Type) *DynamicProperty {
+ return &DynamicProperty{
+ Key: key,
+ Value: value,
+ }
+}
+
+// MarshalJSON returns the JSON encoding of p.
+func (p *DynamicProperty) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "key": p.Key,
+ "value": p.Value,
+ })
+}
+
+func (p *DynamicProperty) String() string {
+ return fmt.Sprintf("%s: %s", Sprint(p.Key), Sprint(p.Value))
+}
+
+// Object represents the object type.
+type Object struct {
+ static []*StaticProperty // constant properties
+ dynamic *DynamicProperty // dynamic properties
+}
+
+// NewObject returns a new Object type.
+func NewObject(static []*StaticProperty, dynamic *DynamicProperty) *Object {
+ sort.Slice(static, func(i, j int) bool {
+ cmp := util.Compare(static[i].Key, static[j].Key)
+ return cmp == -1
+ })
+ return &Object{
+ static: static,
+ dynamic: dynamic,
+ }
+}
+
+func (t *Object) String() string {
+ prefix := "object"
+ buf := make([]string, 0, len(t.static))
+ for _, p := range t.static {
+ buf = append(buf, fmt.Sprintf("%v: %v", p.Key, Sprint(p.Value)))
+ }
+ var repr = prefix
+ if len(buf) > 0 {
+ repr += "<" + strings.Join(buf, ", ") + ">"
+ }
+ if t.dynamic != nil {
+ repr += "[" + t.dynamic.String() + "]"
+ }
+ return repr
+}
+
+// DynamicValue returns the type of the object's dynamic elements.
+func (t *Object) DynamicValue() Type {
+ if t.dynamic == nil {
+ return nil
+ }
+ return t.dynamic.Value
+}
+
+// Keys returns the keys of the object's static elements.
+func (t *Object) Keys() []interface{} {
+ sl := make([]interface{}, 0, len(t.static))
+ for _, p := range t.static {
+ sl = append(sl, p.Key)
+ }
+ return sl
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t *Object) MarshalJSON() ([]byte, error) {
+ repr := map[string]interface{}{
+ "type": t.typeMarker(),
+ }
+ if len(t.static) != 0 {
+ repr["static"] = t.static
+ }
+ if t.dynamic != nil {
+ repr["dynamic"] = t.dynamic
+ }
+ return json.Marshal(repr)
+}
+
+// Select returns the type of the named property.
+func (t *Object) Select(name interface{}) Type {
+ for _, p := range t.static {
+ if util.Compare(p.Key, name) == 0 {
+ return p.Value
+ }
+ }
+ if t.dynamic != nil {
+ if Contains(t.dynamic.Key, TypeOf(name)) {
+ return t.dynamic.Value
+ }
+ }
+ return nil
+}
+
+// Any represents a dynamic type.
+type Any []Type
+
+// A represents the superset of all types.
+var A = NewAny()
+
+// NewAny returns a new Any type.
+func NewAny(of ...Type) Any {
+ sl := make(Any, len(of))
+ for i := range sl {
+ sl[i] = of[i]
+ }
+ return sl
+}
+
+// Contains returns true if t is a superset of other.
+func (t Any) Contains(other Type) bool {
+ if _, ok := other.(*Function); ok {
+ return false
+ }
+ for i := range t {
+ if Compare(t[i], other) == 0 {
+ return true
+ }
+ }
+ return len(t) == 0
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t Any) MarshalJSON() ([]byte, error) {
+ data := map[string]interface{}{
+ "type": t.typeMarker(),
+ }
+ if len(t) != 0 {
+ data["of"] = []Type(t)
+ }
+ return json.Marshal(data)
+}
+
+// Merge return a new Any type that is the superset of t and other.
+func (t Any) Merge(other Type) Any {
+ if otherAny, ok := other.(Any); ok {
+ return t.Union(otherAny)
+ }
+ if t.Contains(other) {
+ return t
+ }
+ return append(t, other)
+}
+
+// Union returns a new Any type that is the union of the two Any types.
+func (t Any) Union(other Any) Any {
+ if len(t) == 0 {
+ return t
+ }
+ if len(other) == 0 {
+ return other
+ }
+ cpy := make(Any, len(t))
+ for i := range cpy {
+ cpy[i] = t[i]
+ }
+ for i := range other {
+ if !cpy.Contains(other[i]) {
+ cpy = append(cpy, other[i])
+ }
+ }
+ return cpy
+}
+
+func (t Any) String() string {
+ prefix := "any"
+ if len(t) == 0 {
+ return prefix
+ }
+ buf := make([]string, len(t))
+ for i := range t {
+ buf[i] = Sprint(t[i])
+ }
+ return prefix + "<" + strings.Join(buf, ", ") + ">"
+}
+
+// Function represents a function type.
+type Function struct {
+ args []Type
+ result Type
+}
+
+// Args returns an argument list.
+func Args(x ...Type) []Type {
+ return x
+}
+
+// NewFunction returns a new Function object where xs[:len(xs)-1] are arguments
+// and xs[len(xs)-1] is the result type.
+func NewFunction(args []Type, result Type) *Function {
+ return &Function{
+ args: args,
+ result: result,
+ }
+}
+
+// Args returns the function's argument types.
+func (t *Function) Args() []Type {
+ return t.args
+}
+
+// Result returns the function's result type.
+func (t *Function) Result() Type {
+ return t.result
+}
+
+func (t *Function) String() string {
+ var args string
+ if len(t.args) != 1 {
+ args = "("
+ }
+ buf := []string{}
+ for _, a := range t.Args() {
+ buf = append(buf, Sprint(a))
+ }
+ args += strings.Join(buf, ", ")
+ if len(t.args) != 1 {
+ args += ")"
+ }
+ return fmt.Sprintf("%v => %v", args, Sprint(t.Result()))
+}
+
+// MarshalJSON returns the JSON encoding of t.
+func (t *Function) MarshalJSON() ([]byte, error) {
+ repr := map[string]interface{}{
+ "type": t.typeMarker(),
+ }
+ if len(t.args) > 0 {
+ repr["args"] = t.args
+ }
+ if t.result != nil {
+ repr["result"] = t.result
+ }
+ return json.Marshal(repr)
+}
+
+// UnmarshalJSON decodes the JSON serialized function declaration.
+func (t *Function) UnmarshalJSON(bs []byte) error {
+
+ tpe, err := Unmarshal(bs)
+ if err != nil {
+ return err
+ }
+
+ f, ok := tpe.(*Function)
+ if !ok {
+ return fmt.Errorf("invalid type")
+ }
+
+ *t = *f
+ return nil
+}
+
+// Union returns a new function represnting the union of t and other. Functions
+// must have the same arity to be unioned.
+func (t *Function) Union(other *Function) *Function {
+ if other == nil {
+ return t
+ } else if t == nil {
+ return other
+ }
+ a := t.Args()
+ b := other.Args()
+ if len(a) != len(b) {
+ return nil
+ }
+ args := make([]Type, len(a))
+ for i := range a {
+ args[i] = Or(a[i], b[i])
+ }
+
+ return NewFunction(args, Or(t.Result(), other.Result()))
+}
+
+// Compare returns -1, 0, 1 based on comparison between a and b.
+func Compare(a, b Type) int {
+ x := typeOrder(a)
+ y := typeOrder(b)
+ if x > y {
+ return 1
+ } else if x < y {
+ return -1
+ }
+ switch a.(type) {
+ case nil, Null, Boolean, Number, String:
+ return 0
+ case *Array:
+ arrA := a.(*Array)
+ arrB := b.(*Array)
+ if arrA.dynamic != nil && arrB.dynamic == nil {
+ return 1
+ } else if arrB.dynamic != nil && arrA.dynamic == nil {
+ return -1
+ }
+ if arrB.dynamic != nil && arrA.dynamic != nil {
+ if cmp := Compare(arrA.dynamic, arrB.dynamic); cmp != 0 {
+ return cmp
+ }
+ }
+ return typeSliceCompare(arrA.static, arrB.static)
+ case *Object:
+ objA := a.(*Object)
+ objB := b.(*Object)
+ if objA.dynamic != nil && objB.dynamic == nil {
+ return 1
+ } else if objB.dynamic != nil && objA.dynamic == nil {
+ return -1
+ }
+ if objA.dynamic != nil && objB.dynamic != nil {
+ if cmp := Compare(objA.dynamic.Key, objB.dynamic.Key); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(objA.dynamic.Value, objB.dynamic.Value); cmp != 0 {
+ return cmp
+ }
+ }
+
+ lenStaticA := len(objA.static)
+ lenStaticB := len(objB.static)
+
+ minLen := lenStaticA
+ if lenStaticB < minLen {
+ minLen = lenStaticB
+ }
+
+ for i := 0; i < minLen; i++ {
+ if cmp := util.Compare(objA.static[i].Key, objB.static[i].Key); cmp != 0 {
+ return cmp
+ }
+ if cmp := Compare(objA.static[i].Value, objB.static[i].Value); cmp != 0 {
+ return cmp
+ }
+ }
+
+ if lenStaticA < lenStaticB {
+ return -1
+ } else if lenStaticB < lenStaticA {
+ return 1
+ }
+
+ return 0
+ case *Set:
+ setA := a.(*Set)
+ setB := b.(*Set)
+ if setA.of == nil && setB.of == nil {
+ return 0
+ } else if setA.of == nil {
+ return -1
+ } else if setB.of == nil {
+ return 1
+ }
+ return Compare(setA.of, setB.of)
+ case Any:
+ sl1 := typeSlice(a.(Any))
+ sl2 := typeSlice(b.(Any))
+ sort.Sort(sl1)
+ sort.Sort(sl2)
+ return typeSliceCompare(sl1, sl2)
+ case *Function:
+ fA := a.(*Function)
+ fB := b.(*Function)
+ if len(fA.args) < len(fB.args) {
+ return -1
+ } else if len(fA.args) > len(fB.args) {
+ return 1
+ }
+ for i := 0; i < len(fA.args); i++ {
+ if cmp := Compare(fA.args[i], fB.args[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ return Compare(fA.result, fB.result)
+ default:
+ panic("unreachable")
+ }
+}
+
+// Contains returns true if a is a superset or equal to b.
+func Contains(a, b Type) bool {
+ if any, ok := a.(Any); ok {
+ return any.Contains(b)
+ }
+ return Compare(a, b) == 0
+}
+
+// Or returns a type that represents the union of a and b. If one type is a
+// superset of the other, the superset is returned unchanged.
+func Or(a, b Type) Type {
+ if a == nil {
+ return b
+ } else if b == nil {
+ return a
+ }
+ fA, ok1 := a.(*Function)
+ fB, ok2 := b.(*Function)
+ if ok1 && ok2 {
+ return fA.Union(fB)
+ } else if ok1 || ok2 {
+ return nil
+ }
+ anyA, ok1 := a.(Any)
+ anyB, ok2 := b.(Any)
+ if ok1 {
+ return anyA.Merge(b)
+ }
+ if ok2 {
+ return anyB.Merge(a)
+ }
+ if Compare(a, b) == 0 {
+ return a
+ }
+ return NewAny(a, b)
+}
+
+// Select returns a property or item of a.
+func Select(a Type, x interface{}) Type {
+ switch a := a.(type) {
+ case *Array:
+ n, ok := x.(json.Number)
+ if !ok {
+ return nil
+ }
+ pos, err := n.Int64()
+ if err != nil {
+ return nil
+ }
+ return a.Select(int(pos))
+ case *Object:
+ return a.Select(x)
+ case *Set:
+ tpe := TypeOf(x)
+ if Compare(a.of, tpe) == 0 {
+ return a.of
+ }
+ if any, ok := a.of.(Any); ok {
+ if any.Contains(tpe) {
+ return tpe
+ }
+ }
+ return nil
+ case Any:
+ if Compare(a, A) == 0 {
+ return A
+ }
+ var tpe Type
+ for i := range a {
+ // TODO(tsandall): test nil/nil
+ tpe = Or(Select(a[i], x), tpe)
+ }
+ return tpe
+ default:
+ return nil
+ }
+}
+
+// Keys returns the type of keys that can be enumerated for a. For arrays, the
+// keys are always number types, for objects the keys are always string types,
+// and for sets the keys are always the type of the set element.
+func Keys(a Type) Type {
+ switch a := a.(type) {
+ case *Array:
+ return N
+ case *Object:
+ var tpe Type
+ for _, k := range a.Keys() {
+ tpe = Or(tpe, TypeOf(k))
+ }
+ if a.dynamic != nil {
+ tpe = Or(tpe, a.dynamic.Key)
+ }
+ return tpe
+ case *Set:
+ return a.of
+ case Any:
+ // TODO(tsandall): ditto test
+ if Compare(a, A) == 0 {
+ return A
+ }
+ var tpe Type
+ for i := range a {
+ tpe = Or(Keys(a[i]), tpe)
+ }
+ return tpe
+ }
+ return nil
+}
+
+// Values returns the type of values that can be enumerated for a.
+func Values(a Type) Type {
+ switch a := a.(type) {
+ case *Array:
+ var tpe Type
+ for i := range a.static {
+ tpe = Or(tpe, a.static[i])
+ }
+ return Or(tpe, a.dynamic)
+ case *Object:
+ var tpe Type
+ for _, v := range a.static {
+ tpe = Or(tpe, v.Value)
+ }
+ if a.dynamic != nil {
+ tpe = Or(tpe, a.dynamic.Value)
+ }
+ return tpe
+ case *Set:
+ return a.of
+ case Any:
+ if Compare(a, A) == 0 {
+ return A
+ }
+ var tpe Type
+ for i := range a {
+ tpe = Or(Values(a[i]), tpe)
+ }
+ return tpe
+ }
+ return nil
+}
+
+// Nil returns true if a's type is unknown.
+func Nil(a Type) bool {
+ switch a := a.(type) {
+ case nil:
+ return true
+ case *Function:
+ for i := range a.args {
+ if Nil(a.args[i]) {
+ return true
+ }
+ }
+ return Nil(a.result)
+ case *Array:
+ for i := range a.static {
+ if Nil(a.static[i]) {
+ return true
+ }
+ }
+ if a.dynamic != nil {
+ return Nil(a.dynamic)
+ }
+ case *Object:
+ for i := range a.static {
+ if Nil(a.static[i].Value) {
+ return true
+ }
+ }
+ if a.dynamic != nil {
+ return Nil(a.dynamic.Key) || Nil(a.dynamic.Value)
+ }
+ case *Set:
+ return Nil(a.of)
+ }
+ return false
+}
+
+// TypeOf returns the type of the Golang native value.
+func TypeOf(x interface{}) Type {
+ switch x := x.(type) {
+ case nil:
+ return NewNull()
+ case bool:
+ return B
+ case string:
+ return S
+ case json.Number:
+ return N
+ case map[string]interface{}:
+ // The ast.ValueToInterface() function returns ast.Object values as map[string]interface{}
+ // so map[string]interface{} must be handled here because the type checker uses the value
+ // to interface conversion when inferring object types.
+ static := make([]*StaticProperty, 0, len(x))
+ for k, v := range x {
+ static = append(static, NewStaticProperty(k, TypeOf(v)))
+ }
+ return NewObject(static, nil)
+ case map[interface{}]interface{}:
+ static := make([]*StaticProperty, 0, len(x))
+ for k, v := range x {
+ static = append(static, NewStaticProperty(k, TypeOf(v)))
+ }
+ return NewObject(static, nil)
+ case []interface{}:
+ static := make([]Type, len(x))
+ for i := range x {
+ static[i] = TypeOf(x[i])
+ }
+ return NewArray(static, nil)
+ }
+ panic("unreachable")
+}
+
+type typeSlice []Type
+
+func (s typeSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
+func (s typeSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x }
+func (s typeSlice) Len() int { return len(s) }
+
+func typeSliceCompare(a, b []Type) int {
+ minLen := len(a)
+ if len(b) < minLen {
+ minLen = len(b)
+ }
+ for i := 0; i < minLen; i++ {
+ if cmp := Compare(a[i], b[i]); cmp != 0 {
+ return cmp
+ }
+ }
+ if len(a) < len(b) {
+ return -1
+ } else if len(b) < len(a) {
+ return 1
+ }
+ return 0
+}
+
+func typeOrder(x Type) int {
+ switch x.(type) {
+ case Null:
+ return 0
+ case Boolean:
+ return 1
+ case Number:
+ return 2
+ case String:
+ return 3
+ case *Array:
+ return 4
+ case *Object:
+ return 5
+ case *Set:
+ return 6
+ case Any:
+ return 7
+ case *Function:
+ return 8
+ case nil:
+ return -1
+ }
+ panic("unreachable")
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/backoff.go b/vendor/github.com/open-policy-agent/opa/util/backoff.go
new file mode 100644
index 00000000..9e3a3725
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/backoff.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "math/rand"
+ "time"
+)
+
+// DefaultBackoff returns a delay with an exponential backoff based on the
+// number of retries.
+func DefaultBackoff(base, max float64, retries int) time.Duration {
+ return Backoff(base, max, .2, 1.6, retries)
+}
+
+// Backoff returns a delay with an exponential backoff based on the number of
+// retries. Same algorithm used in gRPC.
+func Backoff(base, max, jitter, factor float64, retries int) time.Duration {
+ if retries == 0 {
+ return 0
+ }
+
+ backoff, max := float64(base), float64(max)
+ for backoff < max && retries > 0 {
+ backoff *= factor
+ retries--
+ }
+ if backoff > max {
+ backoff = max
+ }
+
+ // Randomize backoff delays so that if a cluster of requests start at
+ // the same time, they won't operate in lockstep.
+ backoff *= 1 + jitter*(rand.Float64()*2-1)
+ if backoff < 0 {
+ return 0
+ }
+
+ return time.Duration(backoff)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/close.go b/vendor/github.com/open-policy-agent/opa/util/close.go
new file mode 100644
index 00000000..97c91773
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/close.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+// Close reads the remaining bytes from the response and then closes it to
+// ensure that the connection is freed. If the body is not read and closed, a
+// leak can occur.
+func Close(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
+ return
+ }
+ resp.Body.Close()
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/compare.go b/vendor/github.com/open-policy-agent/opa/util/compare.go
new file mode 100644
index 00000000..dd187590
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/compare.go
@@ -0,0 +1,161 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "sort"
+)
+
+// Compare returns 0 if a equals b, -1 if a is less than b, and 1 if b is than a.
+//
+// For comparison between values of different types, the following ordering is used:
+// nil < bool < float64 < string < []interface{} < map[string]interface{}. Slices and maps
+// are compared recursively. If one slice or map is a subset of the other slice or map
+// it is considered "less than". Nil is always equal to nil.
+//
+func Compare(a, b interface{}) int {
+ aSortOrder := sortOrder(a)
+ bSortOrder := sortOrder(b)
+ if aSortOrder < bSortOrder {
+ return -1
+ } else if bSortOrder < aSortOrder {
+ return 1
+ }
+ switch a := a.(type) {
+ case nil:
+ return 0
+ case bool:
+ switch b := b.(type) {
+ case bool:
+ if a == b {
+ return 0
+ }
+ if !a {
+ return -1
+ }
+ return 1
+ }
+ case json.Number:
+ switch b := b.(type) {
+ case json.Number:
+ return compareJSONNumber(a, b)
+ }
+ case string:
+ switch b := b.(type) {
+ case string:
+ if a == b {
+ return 0
+ } else if a < b {
+ return -1
+ }
+ return 1
+ }
+ case []interface{}:
+ switch b := b.(type) {
+ case []interface{}:
+ bLen := len(b)
+ aLen := len(a)
+ minLen := aLen
+ if bLen < minLen {
+ minLen = bLen
+ }
+ for i := 0; i < minLen; i++ {
+ cmp := Compare(a[i], b[i])
+ if cmp != 0 {
+ return cmp
+ }
+ }
+ if aLen == bLen {
+ return 0
+ } else if aLen < bLen {
+ return -1
+ }
+ return 1
+ }
+ case map[string]interface{}:
+ switch b := b.(type) {
+ case map[string]interface{}:
+ var aKeys []string
+ for k := range a {
+ aKeys = append(aKeys, k)
+ }
+ var bKeys []string
+ for k := range b {
+ bKeys = append(bKeys, k)
+ }
+ sort.Strings(aKeys)
+ sort.Strings(bKeys)
+ aLen := len(aKeys)
+ bLen := len(bKeys)
+ minLen := aLen
+ if bLen < minLen {
+ minLen = bLen
+ }
+ for i := 0; i < minLen; i++ {
+ if aKeys[i] < bKeys[i] {
+ return -1
+ } else if bKeys[i] < aKeys[i] {
+ return 1
+ }
+ aVal := a[aKeys[i]]
+ bVal := b[bKeys[i]]
+ cmp := Compare(aVal, bVal)
+ if cmp != 0 {
+ return cmp
+ }
+ }
+ if aLen == bLen {
+ return 0
+ } else if aLen < bLen {
+ return -1
+ }
+ return 1
+ }
+ }
+
+ panic(fmt.Sprintf("illegal arguments of type %T and type %T", a, b))
+}
+
+const (
+ nilSort = iota
+ boolSort = iota
+ numberSort = iota
+ stringSort = iota
+ arraySort = iota
+ objectSort = iota
+)
+
+func compareJSONNumber(a, b json.Number) int {
+ bigA, ok := new(big.Float).SetString(string(a))
+ if !ok {
+ panic("illegal value")
+ }
+ bigB, ok := new(big.Float).SetString(string(b))
+ if !ok {
+ panic("illegal value")
+ }
+ return bigA.Cmp(bigB)
+}
+
+func sortOrder(v interface{}) int {
+ switch v.(type) {
+ case nil:
+ return nilSort
+ case bool:
+ return boolSort
+ case json.Number:
+ return numberSort
+ case string:
+ return stringSort
+ case []interface{}:
+ return arraySort
+ case map[string]interface{}:
+ return objectSort
+ }
+ panic(fmt.Sprintf("illegal argument of type %T", v))
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/doc.go b/vendor/github.com/open-policy-agent/opa/util/doc.go
new file mode 100644
index 00000000..900dff8c
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package util provides generic utilities used throughout the policy engine.
+package util
diff --git a/vendor/github.com/open-policy-agent/opa/util/enumflag.go b/vendor/github.com/open-policy-agent/opa/util/enumflag.go
new file mode 100644
index 00000000..4796f026
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/enumflag.go
@@ -0,0 +1,59 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+// EnumFlag implements the pflag.Value interface to provide enumerated command
+// line parameter values.
+type EnumFlag struct {
+ defaultValue string
+ vs []string
+ i int
+}
+
+// NewEnumFlag returns a new EnumFlag that has a defaultValue and vs enumerated
+// values.
+func NewEnumFlag(defaultValue string, vs []string) *EnumFlag {
+ f := &EnumFlag{
+ i: -1,
+ vs: vs,
+ defaultValue: defaultValue,
+ }
+ return f
+}
+
+// Type returns the valid enumeration values.
+func (f *EnumFlag) Type() string {
+ return "{" + strings.Join(f.vs, ",") + "}"
+}
+
+// String returns the EnumValue's value as string.
+func (f *EnumFlag) String() string {
+ if f.i == -1 {
+ return f.defaultValue
+ }
+ return f.vs[f.i]
+}
+
+// IsSet will return true if the EnumFlag has been set.
+func (f *EnumFlag) IsSet() bool {
+ return f.i != -1
+}
+
+// Set sets the enum value. If s is not a valid enum value, an error is
+// returned.
+func (f *EnumFlag) Set(s string) error {
+ for i := range f.vs {
+ if f.vs[i] == s {
+ f.i = i
+ return nil
+ }
+ }
+ return fmt.Errorf("must be one of %v", f.Type())
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/graph.go b/vendor/github.com/open-policy-agent/opa/util/graph.go
new file mode 100644
index 00000000..f0e82424
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/graph.go
@@ -0,0 +1,90 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+// Traversal defines a basic interface to perform traversals.
+type Traversal interface {
+
+ // Edges should return the neighbours of node "u".
+ Edges(u T) []T
+
+ // Visited should return true if node "u" has already been visited in this
+ // traversal. If the same traversal is used multiple times, the state that
+ // tracks visited nodes should be reset.
+ Visited(u T) bool
+}
+
+// Equals should return true if node "u" equals node "v".
+type Equals func(u T, v T) bool
+
+// Iter should return true to indicate stop.
+type Iter func(u T) bool
+
+// DFS performs a depth first traversal calling f for each node starting from u.
+// If f returns true, traversal stops and DFS returns true.
+func DFS(t Traversal, f Iter, u T) bool {
+ lifo := NewLIFO(u)
+ for lifo.Size() > 0 {
+ next, _ := lifo.Pop()
+ if t.Visited(next) {
+ continue
+ }
+ if f(next) {
+ return true
+ }
+ for _, v := range t.Edges(next) {
+ lifo.Push(v)
+ }
+ }
+ return false
+}
+
+// BFS performs a breadth first traversal calling f for each node starting from
+// u. If f returns true, traversal stops and BFS returns true.
+func BFS(t Traversal, f Iter, u T) bool {
+ fifo := NewFIFO(u)
+ for fifo.Size() > 0 {
+ next, _ := fifo.Pop()
+ if t.Visited(next) {
+ continue
+ }
+ if f(next) {
+ return true
+ }
+ for _, v := range t.Edges(next) {
+ fifo.Push(v)
+ }
+ }
+ return false
+}
+
+// DFSPath returns a path from node a to node z found by performing
+// a depth first traversal. If no path is found, an empty slice is returned.
+func DFSPath(t Traversal, eq Equals, a, z T) []T {
+ p := dfsRecursive(t, eq, a, z, []T{})
+ for i := len(p)/2 - 1; i >= 0; i-- {
+ o := len(p) - i - 1
+ p[i], p[o] = p[o], p[i]
+ }
+ return p
+}
+
+func dfsRecursive(t Traversal, eq Equals, u, z T, path []T) []T {
+ if t.Visited(u) {
+ return path
+ }
+ for _, v := range t.Edges(u) {
+ if eq(v, z) {
+ path = append(path, z)
+ path = append(path, u)
+ return path
+ }
+ if p := dfsRecursive(t, eq, v, z, path); len(p) > 0 {
+ path = append(p, u)
+ return path
+ }
+ }
+ return path
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/hashmap.go b/vendor/github.com/open-policy-agent/opa/util/hashmap.go
new file mode 100644
index 00000000..11e7dca4
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/hashmap.go
@@ -0,0 +1,157 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+// T is a concise way to refer to T.
+type T interface{}
+
+type hashEntry struct {
+ k T
+ v T
+ next *hashEntry
+}
+
+// HashMap represents a key/value map.
+type HashMap struct {
+ eq func(T, T) bool
+ hash func(T) int
+ table map[int]*hashEntry
+ size int
+}
+
+// NewHashMap returns a new empty HashMap.
+func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap {
+ return &HashMap{
+ eq: eq,
+ hash: hash,
+ table: make(map[int]*hashEntry),
+ size: 0,
+ }
+}
+
+// Copy returns a shallow copy of this HashMap.
+func (h *HashMap) Copy() *HashMap {
+ cpy := NewHashMap(h.eq, h.hash)
+ h.Iter(func(k, v T) bool {
+ cpy.Put(k, v)
+ return false
+ })
+ return cpy
+}
+
+// Equal returns true if this HashMap equals the other HashMap.
+// Two hash maps are equal if they contain the same key/value pairs.
+func (h *HashMap) Equal(other *HashMap) bool {
+ if h.Len() != other.Len() {
+ return false
+ }
+ return !h.Iter(func(k, v T) bool {
+ ov, ok := other.Get(k)
+ if !ok {
+ return true
+ }
+ return !h.eq(v, ov)
+ })
+}
+
+// Get returns the value for k.
+func (h *HashMap) Get(k T) (T, bool) {
+ hash := h.hash(k)
+ for entry := h.table[hash]; entry != nil; entry = entry.next {
+ if h.eq(entry.k, k) {
+ return entry.v, true
+ }
+ }
+ return nil, false
+}
+
+// Delete removes the the key k.
+func (h *HashMap) Delete(k T) {
+ hash := h.hash(k)
+ var prev *hashEntry
+ for entry := h.table[hash]; entry != nil; entry = entry.next {
+ if h.eq(entry.k, k) {
+ if prev != nil {
+ prev.next = entry.next
+ } else {
+ h.table[hash] = entry.next
+ }
+ h.size--
+ return
+ }
+ prev = entry
+ }
+}
+
+// Hash returns the hash code for this hash map.
+func (h *HashMap) Hash() int {
+ var hash int
+ h.Iter(func(k, v T) bool {
+ hash += h.hash(k) + h.hash(v)
+ return false
+ })
+ return hash
+}
+
+// Iter invokes the iter function for each element in the HashMap.
+// If the iter function returns true, iteration stops and the return value is true.
+// If the iter function never returns true, iteration proceeds through all elements
+// and the return value is false.
+func (h *HashMap) Iter(iter func(T, T) bool) bool {
+ for _, entry := range h.table {
+ for ; entry != nil; entry = entry.next {
+ if iter(entry.k, entry.v) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Len returns the current size of this HashMap.
+func (h *HashMap) Len() int {
+ return h.size
+}
+
+// Put inserts a key/value pair into this HashMap. If the key is already present, the existing
+// value is overwritten.
+func (h *HashMap) Put(k T, v T) {
+ hash := h.hash(k)
+ head := h.table[hash]
+ for entry := head; entry != nil; entry = entry.next {
+ if h.eq(entry.k, k) {
+ entry.v = v
+ return
+ }
+ }
+ h.table[hash] = &hashEntry{k: k, v: v, next: head}
+ h.size++
+}
+
+func (h *HashMap) String() string {
+ var buf []string
+ h.Iter(func(k T, v T) bool {
+ buf = append(buf, fmt.Sprintf("%v: %v", k, v))
+ return false
+ })
+ return "{" + strings.Join(buf, ", ") + "}"
+}
+
+// Update returns a new HashMap with elements from the other HashMap put into this HashMap.
+// If the other HashMap contains elements with the same key as this HashMap, the value
+// from the other HashMap overwrites the value from this HashMap.
+func (h *HashMap) Update(other *HashMap) *HashMap {
+ updated := h.Copy()
+ other.Iter(func(k, v T) bool {
+ updated.Put(k, v)
+ return false
+ })
+ return updated
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/json.go b/vendor/github.com/open-policy-agent/opa/util/json.go
new file mode 100644
index 00000000..cfc94e06
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/json.go
@@ -0,0 +1,113 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/ghodss/yaml"
+)
+
+// UnmarshalJSON parses the JSON encoded data and stores the result in the value
+// pointed to by x.
+//
+// This function is intended to be used in place of the standard json.Marshal
+// function when json.Number is required.
+func UnmarshalJSON(bs []byte, x interface{}) (err error) {
+ buf := bytes.NewBuffer(bs)
+ decoder := NewJSONDecoder(buf)
+ if err := decoder.Decode(x); err != nil {
+ return err
+ }
+
+ // Since decoder.Decode validates only the first json structure in bytes,
+ // check if decoder has more bytes to consume to validate whole input bytes.
+ tok, err := decoder.Token()
+ if tok != nil {
+ return fmt.Errorf("error: invalid character '%s' after top-level value", tok)
+ }
+ if err != nil && err != io.EOF {
+ return err
+ }
+ return nil
+}
+
+// NewJSONDecoder returns a new decoder that reads from r.
+//
+// This function is intended to be used in place of the standard json.NewDecoder
+// when json.Number is required.
+func NewJSONDecoder(r io.Reader) *json.Decoder {
+ decoder := json.NewDecoder(r)
+ decoder.UseNumber()
+ return decoder
+}
+
+// MustUnmarshalJSON parse the JSON encoded data and returns the result.
+//
+// If the data cannot be decoded, this function will panic. This function is for
+// test purposes.
+func MustUnmarshalJSON(bs []byte) interface{} {
+ var x interface{}
+ if err := UnmarshalJSON(bs, &x); err != nil {
+ panic(err)
+ }
+ return x
+}
+
+// MustMarshalJSON returns the JSON encoding of x
+//
+// If the data cannot be encoded, this function will panic. This function is for
+// test purposes.
+func MustMarshalJSON(x interface{}) []byte {
+ bs, err := json.Marshal(x)
+ if err != nil {
+ panic(err)
+ }
+ return bs
+}
+
+// RoundTrip encodes to JSON, and decodes the result again.
+//
+// Thereby, it is converting its argument to the representation expected by
+// rego.Input and inmem's Write operations. Works with both references and
+// values.
+func RoundTrip(x *interface{}) error {
+ bs, err := json.Marshal(x)
+ if err != nil {
+ return err
+ }
+ return UnmarshalJSON(bs, x)
+}
+
+// Reference returns a pointer to its argument unless the argument already is
+// a pointer. If the argument is **t, or ***t, etc, it will return *t.
+//
+// Used for preparing Go types (including pointers to structs) into values to be
+// put through util.RoundTrip().
+func Reference(x interface{}) *interface{} {
+ var y interface{}
+ rv := reflect.ValueOf(x)
+ if rv.Kind() == reflect.Ptr {
+ return Reference(rv.Elem().Interface())
+ }
+ if rv.Kind() != reflect.Invalid {
+ y = rv.Interface()
+ return &y
+ }
+ return &x
+}
+
+// Unmarshal decodes a YAML or JSON value into the specified type.
+func Unmarshal(bs []byte, v interface{}) error {
+ bs, err := yaml.YAMLToJSON(bs)
+ if err != nil {
+ return err
+ }
+ return UnmarshalJSON(bs, v)
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/queue.go b/vendor/github.com/open-policy-agent/opa/util/queue.go
new file mode 100644
index 00000000..63a2ffc1
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/queue.go
@@ -0,0 +1,113 @@
+// Copyright 2017 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+// LIFO represents a simple LIFO queue.
+type LIFO struct {
+ top *queueNode
+ size int
+}
+
+type queueNode struct {
+ v T
+ next *queueNode
+}
+
+// NewLIFO returns a new LIFO queue containing elements ts starting with the
+// left-most argument at the bottom.
+func NewLIFO(ts ...T) *LIFO {
+ s := &LIFO{}
+ for i := range ts {
+ s.Push(ts[i])
+ }
+ return s
+}
+
+// Push adds a new element onto the LIFO.
+func (s *LIFO) Push(t T) {
+ node := &queueNode{v: t, next: s.top}
+ s.top = node
+ s.size++
+}
+
+// Peek returns the top of the LIFO. If LIFO is empty, returns nil, false.
+func (s *LIFO) Peek() (T, bool) {
+ if s.top == nil {
+ return nil, false
+ }
+ return s.top.v, true
+}
+
+// Pop returns the top of the LIFO and removes it. If LIFO is empty returns
+// nil, false.
+func (s *LIFO) Pop() (T, bool) {
+ if s.top == nil {
+ return nil, false
+ }
+ node := s.top
+ s.top = node.next
+ s.size--
+ return node.v, true
+}
+
+// Size returns the size of the LIFO.
+func (s *LIFO) Size() int {
+ return s.size
+}
+
+// FIFO represents a simple FIFO queue.
+type FIFO struct {
+ front *queueNode
+ back *queueNode
+ size int
+}
+
+// NewFIFO returns a new FIFO queue containing elements ts starting with the
+// left-most argument at the front.
+func NewFIFO(ts ...T) *FIFO {
+ s := &FIFO{}
+ for i := range ts {
+ s.Push(ts[i])
+ }
+ return s
+}
+
+// Push adds a new element onto the LIFO.
+func (s *FIFO) Push(t T) {
+ node := &queueNode{v: t, next: nil}
+ if s.front == nil {
+ s.front = node
+ s.back = node
+ } else {
+ s.back.next = node
+ s.back = node
+ }
+ s.size++
+}
+
+// Peek returns the top of the LIFO. If LIFO is empty, returns nil, false.
+func (s *FIFO) Peek() (T, bool) {
+ if s.front == nil {
+ return nil, false
+ }
+ return s.front.v, true
+}
+
+// Pop returns the top of the LIFO and removes it. If LIFO is empty returns
+// nil, false.
+func (s *FIFO) Pop() (T, bool) {
+ if s.front == nil {
+ return nil, false
+ }
+ node := s.front
+ s.front = node.next
+ s.size--
+ return node.v, true
+}
+
+// Size returns the size of the LIFO.
+func (s *FIFO) Size() int {
+ return s.size
+}
diff --git a/vendor/github.com/open-policy-agent/opa/util/wait.go b/vendor/github.com/open-policy-agent/opa/util/wait.go
new file mode 100644
index 00000000..b70ab6fc
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/util/wait.go
@@ -0,0 +1,34 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "time"
+)
+
+// WaitFunc will call passed function at an interval and return nil
+// as soon this function returns true.
+// If timeout is reached before the passed in function returns true
+// an error is returned.
+func WaitFunc(fun func() bool, interval, timeout time.Duration) error {
+ if fun() {
+ return nil
+ }
+ ticker := time.NewTicker(interval)
+ timer := time.NewTimer(timeout)
+ defer ticker.Stop()
+ defer timer.Stop()
+ for {
+ select {
+ case <-timer.C:
+ return fmt.Errorf("timeout")
+ case <-ticker.C:
+ if fun() {
+ return nil
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/open-policy-agent/opa/version/version.go b/vendor/github.com/open-policy-agent/opa/version/version.go
new file mode 100644
index 00000000..e2cd9f32
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/version/version.go
@@ -0,0 +1,24 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// Package version contains version information that is set at build time.
+package version
+
+import (
+ "runtime"
+)
+
+// Version is the canonical version of OPA.
+var Version = "0.26.0"
+
+// GoVersion is the version of Go this was built with
+var GoVersion = runtime.Version()
+
+// Additional version information that is displayed by the "version" command and used to
+// identify the version of running instances of OPA.
+var (
+ Vcs = ""
+ Timestamp = ""
+ Hostname = ""
+)
diff --git a/vendor/github.com/open-policy-agent/opa/version/wasm.go b/vendor/github.com/open-policy-agent/opa/version/wasm.go
new file mode 100644
index 00000000..d33b4770
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/version/wasm.go
@@ -0,0 +1,10 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// +build opa_wasm
+
+package version
+
+// WasmRuntimeAvailable indicates if a wasm runtime is available in this OPA.
+const WasmRuntimeAvailable = true
diff --git a/vendor/github.com/open-policy-agent/opa/version/wasm_nop.go b/vendor/github.com/open-policy-agent/opa/version/wasm_nop.go
new file mode 100644
index 00000000..7668f06f
--- /dev/null
+++ b/vendor/github.com/open-policy-agent/opa/version/wasm_nop.go
@@ -0,0 +1,10 @@
+// Copyright 2020 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+// +build !opa_wasm
+
+package version
+
+// WasmRuntimeAvailable indicates if a wasm runtime is available in this OPA.
+const WasmRuntimeAvailable = false
diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE
new file mode 100644
index 00000000..27448585
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE
new file mode 100644
index 00000000..5c97abce
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/NOTICE
@@ -0,0 +1,17 @@
+runc
+
+Copyright 2012-2015 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (http://www.docker.com).
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see http://www.bis.doc.gov
+
+See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
new file mode 100644
index 00000000..a16a68e9
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go
@@ -0,0 +1,51 @@
+// +build linux
+
+package cgroups
+
+import (
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+type Manager interface {
+ // Applies cgroup configuration to the process with the specified pid
+ Apply(pid int) error
+
+ // Returns the PIDs inside the cgroup set
+ GetPids() ([]int, error)
+
+ // Returns the PIDs inside the cgroup set & all sub-cgroups
+ GetAllPids() ([]int, error)
+
+ // Returns statistics for the cgroup set
+ GetStats() (*Stats, error)
+
+ // Toggles the freezer cgroup according with specified state
+ Freeze(state configs.FreezerState) error
+
+ // Destroys the cgroup set
+ Destroy() error
+
+ // Path returns a cgroup path to the specified controller/subsystem.
+ // For cgroupv2, the argument is unused and can be empty.
+ Path(string) string
+
+ // Sets the cgroup as configured.
+ Set(container *configs.Config) error
+
+ // GetPaths returns cgroup path(s) to save in a state file in order to restore later.
+ //
+ // For cgroup v1, a key is cgroup subsystem name, and the value is the path
+ // to the cgroup for this subsystem.
+ //
+ // For cgroup v2 unified hierarchy, a key is "", and the value is the unified path.
+ GetPaths() map[string]string
+
+ // GetCgroups returns the cgroup data as configured.
+ GetCgroups() (*configs.Cgroup, error)
+
+ // GetFreezerState retrieves the current FreezerState of the cgroup.
+ GetFreezerState() (configs.FreezerState, error)
+
+ // Whether the cgroup path exists or not
+ Exists() bool
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
new file mode 100644
index 00000000..278d507e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go
@@ -0,0 +1,3 @@
+// +build !linux
+
+package cgroups
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go
new file mode 100644
index 00000000..d19673f7
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/fscommon.go
@@ -0,0 +1,50 @@
+// +build linux
+
+package fscommon
+
+import (
+ "bytes"
+ "os"
+
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+// WriteFile writes data to a cgroup file in dir.
+// It is supposed to be used for cgroup files only.
+func WriteFile(dir, file, data string) error {
+ fd, err := OpenFile(dir, file, unix.O_WRONLY)
+ if err != nil {
+ return err
+ }
+ defer fd.Close()
+ if err := retryingWriteFile(fd, data); err != nil {
+ return errors.Wrapf(err, "failed to write %q", data)
+ }
+ return nil
+}
+
+// ReadFile reads data from a cgroup file in dir.
+// It is supposed to be used for cgroup files only.
+func ReadFile(dir, file string) (string, error) {
+ fd, err := OpenFile(dir, file, unix.O_RDONLY)
+ if err != nil {
+ return "", err
+ }
+ var buf bytes.Buffer
+
+ _, err = buf.ReadFrom(fd)
+ return buf.String(), err
+}
+
+func retryingWriteFile(fd *os.File, data string) error {
+ for {
+ _, err := fd.Write([]byte(data))
+ if errors.Is(err, unix.EINTR) {
+ logrus.Infof("interrupted while writing %s to %s", data, fd.Name())
+ continue
+ }
+ return err
+ }
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/open.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/open.go
new file mode 100644
index 00000000..90560f54
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/open.go
@@ -0,0 +1,33 @@
+package fscommon
+
+import (
+ "os"
+
+ securejoin "github.com/cyphar/filepath-securejoin"
+ "github.com/pkg/errors"
+)
+
+var (
+ // Set to true by fs unit tests
+ TestMode bool
+)
+
+// OpenFile opens a cgroup file in a given dir with given flags.
+// It is supposed to be used for cgroup files only.
+func OpenFile(dir, file string, flags int) (*os.File, error) {
+ if dir == "" {
+ return nil, errors.Errorf("no directory specified for %s", file)
+ }
+ mode := os.FileMode(0)
+ if TestMode && flags&os.O_WRONLY != 0 {
+ // "emulate" cgroup fs for unit tests
+ flags |= os.O_TRUNC | os.O_CREATE
+ mode = 0o600
+ }
+ path, err := securejoin.SecureJoin(dir, file)
+ if err != nil {
+ return nil, err
+ }
+
+ return os.OpenFile(path, flags, mode)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go
new file mode 100644
index 00000000..7c387a8b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fscommon/utils.go
@@ -0,0 +1,83 @@
+// +build linux
+
+package fscommon
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+)
+
+var (
+ ErrNotValidFormat = errors.New("line is not a valid key value format")
+)
+
+// ParseUint converts a string to an uint64 integer.
+// Negative values are returned at zero as, due to kernel bugs,
+// some of the memory cgroup stats can be negative.
+func ParseUint(s string, base, bitSize int) (uint64, error) {
+ value, err := strconv.ParseUint(s, base, bitSize)
+ if err != nil {
+ intValue, intErr := strconv.ParseInt(s, base, bitSize)
+ // 1. Handle negative values greater than MinInt64 (and)
+ // 2. Handle negative values lesser than MinInt64
+ if intErr == nil && intValue < 0 {
+ return 0, nil
+ } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 {
+ return 0, nil
+ }
+
+ return value, err
+ }
+
+ return value, nil
+}
+
+// GetCgroupParamKeyValue parses a space-separated "name value" kind of cgroup
+// parameter and returns its components. For example, "io_service_bytes 1234"
+// will return as "io_service_bytes", 1234.
+func GetCgroupParamKeyValue(t string) (string, uint64, error) {
+ parts := strings.Fields(t)
+ switch len(parts) {
+ case 2:
+ value, err := ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return "", 0, fmt.Errorf("unable to convert to uint64: %v", err)
+ }
+
+ return parts[0], value, nil
+ default:
+ return "", 0, ErrNotValidFormat
+ }
+}
+
+// GetCgroupParamUint reads a single uint64 value from the specified cgroup file.
+// If the value read is "max", the math.MaxUint64 is returned.
+func GetCgroupParamUint(path, file string) (uint64, error) {
+ contents, err := GetCgroupParamString(path, file)
+ if err != nil {
+ return 0, err
+ }
+ contents = strings.TrimSpace(contents)
+ if contents == "max" {
+ return math.MaxUint64, nil
+ }
+
+ res, err := ParseUint(contents, 10, 64)
+ if err != nil {
+ return res, fmt.Errorf("unable to parse file %q", path+"/"+file)
+ }
+ return res, nil
+}
+
+// GetCgroupParamString reads a string from the specified cgroup file.
+func GetCgroupParamString(path, file string) (string, error) {
+ contents, err := ReadFile(path, file)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(contents), nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
new file mode 100644
index 00000000..7ac81660
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go
@@ -0,0 +1,135 @@
+// +build linux
+
+package cgroups
+
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods,omitempty"`
+ // Number of periods when the container hit its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+}
+
+// CpuUsage denotes the usage of a CPU.
+// All CPU stats are aggregate since container inception.
+type CpuUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds.
+ TotalUsage uint64 `json:"total_usage,omitempty"`
+ // Total CPU time consumed per core.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+ // CPU time consumed per core in kernel mode
+ // Units: nanoseconds.
+ PercpuUsageInKernelmode []uint64 `json:"percpu_usage_in_kernelmode"`
+ // CPU time consumed per core in user mode
+ // Units: nanoseconds.
+ PercpuUsageInUsermode []uint64 `json:"percpu_usage_in_usermode"`
+ // Time spent by tasks of the cgroup in kernel mode.
+ // Units: nanoseconds.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+ // Time spent by tasks of the cgroup in user mode.
+ // Units: nanoseconds.
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+type CpuStats struct {
+ CpuUsage CpuUsage `json:"cpu_usage,omitempty"`
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+type MemoryData struct {
+ Usage uint64 `json:"usage,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ Failcnt uint64 `json:"failcnt"`
+ Limit uint64 `json:"limit"`
+}
+
+type MemoryStats struct {
+ // memory used for cache
+ Cache uint64 `json:"cache,omitempty"`
+ // usage of memory
+ Usage MemoryData `json:"usage,omitempty"`
+ // usage of memory + swap
+ SwapUsage MemoryData `json:"swap_usage,omitempty"`
+ // usage of kernel memory
+ KernelUsage MemoryData `json:"kernel_usage,omitempty"`
+ // usage of kernel TCP memory
+ KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"`
+ // usage of memory pages by NUMA node
+ // see chapter 5.6 of memory controller documentation
+ PageUsageByNUMA PageUsageByNUMA `json:"page_usage_by_numa,omitempty"`
+ // if true, memory usage is accounted for throughout a hierarchy of cgroups.
+ UseHierarchy bool `json:"use_hierarchy"`
+
+ Stats map[string]uint64 `json:"stats,omitempty"`
+}
+
+type PageUsageByNUMA struct {
+ // Embedding is used as types can't be recursive.
+ PageUsageByNUMAInner
+ Hierarchical PageUsageByNUMAInner `json:"hierarchical,omitempty"`
+}
+
+type PageUsageByNUMAInner struct {
+ Total PageStats `json:"total,omitempty"`
+ File PageStats `json:"file,omitempty"`
+ Anon PageStats `json:"anon,omitempty"`
+ Unevictable PageStats `json:"unevictable,omitempty"`
+}
+
+type PageStats struct {
+ Total uint64 `json:"total,omitempty"`
+ Nodes map[uint8]uint64 `json:"nodes,omitempty"`
+}
+
+type PidsStats struct {
+ // number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // active pids hard limit
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+type BlkioStatEntry struct {
+ Major uint64 `json:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty"`
+ Op string `json:"op,omitempty"`
+ Value uint64 `json:"value,omitempty"`
+}
+
+type BlkioStats struct {
+ // number of bytes tranferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"`
+}
+
+type HugetlbStats struct {
+ // current res_counter usage for hugetlb
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // number of times hugetlb usage allocation failure.
+ Failcnt uint64 `json:"failcnt"`
+}
+
+type Stats struct {
+ CpuStats CpuStats `json:"cpu_stats,omitempty"`
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+ // the map is in the format "size of hugepage: stats of the hugepage"
+ HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`
+}
+
+func NewStats() *Stats {
+ memoryStats := MemoryStats{Stats: make(map[string]uint64)}
+ hugetlbStats := make(map[string]HugetlbStats)
+ return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats}
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
new file mode 100644
index 00000000..5303f0fb
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -0,0 +1,452 @@
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/unix"
+)
+
+const (
+ CgroupProcesses = "cgroup.procs"
+ unifiedMountpoint = "/sys/fs/cgroup"
+)
+
+var (
+ isUnifiedOnce sync.Once
+ isUnified bool
+)
+
+// IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode.
+func IsCgroup2UnifiedMode() bool {
+ isUnifiedOnce.Do(func() {
+ var st unix.Statfs_t
+ err := unix.Statfs(unifiedMountpoint, &st)
+ if err != nil {
+ if os.IsNotExist(err) && system.RunningInUserNS() {
+ // ignore the "not found" error if running in userns
+ logrus.WithError(err).Debugf("%s missing, assuming cgroup v1", unifiedMountpoint)
+ isUnified = false
+ return
+ }
+ panic(fmt.Sprintf("cannot statfs cgroup root: %s", err))
+ }
+ isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC
+ })
+ return isUnified
+}
+
+type Mount struct {
+ Mountpoint string
+ Root string
+ Subsystems []string
+}
+
+// GetCgroupMounts returns the mounts for the cgroup subsystems.
+// all indicates whether to return just the first instance or all the mounts.
+// This function should not be used from cgroupv2 code, as in this case
+// all the controllers are available under the constant unifiedMountpoint.
+func GetCgroupMounts(all bool) ([]Mount, error) {
+ if IsCgroup2UnifiedMode() {
+ // TODO: remove cgroupv2 case once all external users are converted
+ availableControllers, err := GetAllSubsystems()
+ if err != nil {
+ return nil, err
+ }
+ m := Mount{
+ Mountpoint: unifiedMountpoint,
+ Root: unifiedMountpoint,
+ Subsystems: availableControllers,
+ }
+ return []Mount{m}, nil
+ }
+
+ return getCgroupMountsV1(all)
+}
+
+// GetAllSubsystems returns all the cgroup subsystems supported by the kernel
+func GetAllSubsystems() ([]string, error) {
+ // /proc/cgroups is meaningless for v2
+ // https://github.com/torvalds/linux/blob/v5.3/Documentation/admin-guide/cgroup-v2.rst#deprecated-v1-core-features
+ if IsCgroup2UnifiedMode() {
+ // "pseudo" controllers do not appear in /sys/fs/cgroup/cgroup.controllers.
+ // - devices: implemented in kernel 4.15
+ // - freezer: implemented in kernel 5.2
+ // We assume these are always available, as it is hard to detect availability.
+ pseudo := []string{"devices", "freezer"}
+ data, err := fscommon.ReadFile("/sys/fs/cgroup", "cgroup.controllers")
+ if err != nil {
+ return nil, err
+ }
+ subsystems := append(pseudo, strings.Fields(data)...)
+ return subsystems, nil
+ }
+ f, err := os.Open("/proc/cgroups")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ subsystems := []string{}
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ text := s.Text()
+ if text[0] != '#' {
+ parts := strings.Fields(text)
+ if len(parts) >= 4 && parts[3] != "0" {
+ subsystems = append(subsystems, parts[0])
+ }
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return subsystems, nil
+}
+
+func readProcsFile(file string) ([]int, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var (
+ s = bufio.NewScanner(f)
+ out = []int{}
+ )
+
+ for s.Scan() {
+ if t := s.Text(); t != "" {
+ pid, err := strconv.Atoi(t)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, pid)
+ }
+ }
+ return out, s.Err()
+}
+
+// ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup
+// or /proc//cgroup, into a map of subsystems to cgroup paths, e.g.
+// "cpu": "/user.slice/user-1000.slice"
+// "pids": "/user.slice/user-1000.slice"
+// etc.
+//
+// Note that for cgroup v2 unified hierarchy, there are no per-controller
+// cgroup paths, so the resulting map will have a single element where the key
+// is empty string ("") and the value is the cgroup path the is in.
+func ParseCgroupFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseCgroupFromReader(f)
+}
+
+// helper function for ParseCgroupFile to make testing easier
+func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
+ s := bufio.NewScanner(r)
+ cgroups := make(map[string]string)
+
+ for s.Scan() {
+ text := s.Text()
+ // from cgroups(7):
+ // /proc/[pid]/cgroup
+ // ...
+ // For each cgroup hierarchy ... there is one entry
+ // containing three colon-separated fields of the form:
+ // hierarchy-ID:subsystem-list:cgroup-path
+ parts := strings.SplitN(text, ":", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
+ }
+
+ for _, subs := range strings.Split(parts[1], ",") {
+ cgroups[subs] = parts[2]
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return cgroups, nil
+}
+
+func PathExists(path string) bool {
+ if _, err := os.Stat(path); err != nil {
+ return false
+ }
+ return true
+}
+
+func EnterPid(cgroupPaths map[string]string, pid int) error {
+ for _, path := range cgroupPaths {
+ if PathExists(path) {
+ if err := WriteCgroupProc(path, pid); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func rmdir(path string) error {
+ err := unix.Rmdir(path)
+ if err == nil || err == unix.ENOENT {
+ return nil
+ }
+ return &os.PathError{Op: "rmdir", Path: path, Err: err}
+}
+
+// RemovePath aims to remove cgroup path. It does so recursively,
+// by removing any subdirectories (sub-cgroups) first.
+func RemovePath(path string) error {
+ // try the fast path first
+ if err := rmdir(path); err == nil {
+ return nil
+ }
+
+ infos, err := ioutil.ReadDir(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil
+ }
+ return err
+ }
+ for _, info := range infos {
+ if info.IsDir() {
+ // We should remove subcgroups dir first
+ if err = RemovePath(filepath.Join(path, info.Name())); err != nil {
+ break
+ }
+ }
+ }
+ if err == nil {
+ err = rmdir(path)
+ }
+ return err
+}
+
+// RemovePaths iterates over the provided paths removing them.
+// We trying to remove all paths five times with increasing delay between tries.
+// If after all there are not removed cgroups - appropriate error will be
+// returned.
+func RemovePaths(paths map[string]string) (err error) {
+ const retries = 5
+ delay := 10 * time.Millisecond
+ for i := 0; i < retries; i++ {
+ if i != 0 {
+ time.Sleep(delay)
+ delay *= 2
+ }
+ for s, p := range paths {
+ if err := RemovePath(p); err != nil {
+ // do not log intermediate iterations
+ switch i {
+ case 0:
+ logrus.WithError(err).Warnf("Failed to remove cgroup (will retry)")
+ case retries - 1:
+ logrus.WithError(err).Error("Failed to remove cgroup")
+ }
+
+ }
+ _, err := os.Stat(p)
+ // We need this strange way of checking cgroups existence because
+ // RemoveAll almost always returns error, even on already removed
+ // cgroups
+ if os.IsNotExist(err) {
+ delete(paths, s)
+ }
+ }
+ if len(paths) == 0 {
+ //nolint:ineffassign // done to help garbage collecting: opencontainers/runc#2506
+ paths = make(map[string]string)
+ return nil
+ }
+ }
+ return fmt.Errorf("Failed to remove paths: %v", paths)
+}
+
+func GetHugePageSize() ([]string, error) {
+ dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ files, err := dir.Readdirnames(0)
+ dir.Close()
+ if err != nil {
+ return nil, err
+ }
+
+ return getHugePageSizeFromFilenames(files)
+}
+
+func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) {
+ pageSizes := make([]string, 0, len(fileNames))
+
+ for _, file := range fileNames {
+ // example: hugepages-1048576kB
+ val := strings.TrimPrefix(file, "hugepages-")
+ if len(val) == len(file) {
+ // unexpected file name: no prefix found
+ continue
+ }
+ // The suffix is always "kB" (as of Linux 5.9)
+ eLen := len(val) - 2
+ val = strings.TrimSuffix(val, "kB")
+ if len(val) != eLen {
+ logrus.Warnf("GetHugePageSize: %s: invalid filename suffix (expected \"kB\")", file)
+ continue
+ }
+ size, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, err
+ }
+ // Model after https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/hugetlb_cgroup.c?id=eff48ddeab782e35e58ccc8853f7386bbae9dec4#n574
+ // but in our case the size is in KB already.
+ if size >= (1 << 20) {
+ val = strconv.Itoa(size>>20) + "GB"
+ } else if size >= (1 << 10) {
+ val = strconv.Itoa(size>>10) + "MB"
+ } else {
+ val += "KB"
+ }
+ pageSizes = append(pageSizes, val)
+ }
+
+ return pageSizes, nil
+}
+
+// GetPids returns all pids, that were added to cgroup at path.
+func GetPids(dir string) ([]int, error) {
+ return readProcsFile(filepath.Join(dir, CgroupProcesses))
+}
+
+// GetAllPids returns all pids, that were added to cgroup at path and to all its
+// subcgroups.
+func GetAllPids(path string) ([]int, error) {
+ var pids []int
+ // collect pids from all sub-cgroups
+ err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {
+ if iErr != nil {
+ return iErr
+ }
+ if info.IsDir() || info.Name() != CgroupProcesses {
+ return nil
+ }
+ cPids, err := readProcsFile(p)
+ if err != nil {
+ return err
+ }
+ pids = append(pids, cPids...)
+ return nil
+ })
+ return pids, err
+}
+
+// WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file
+func WriteCgroupProc(dir string, pid int) error {
+ // Normally dir should not be empty, one case is that cgroup subsystem
+ // is not mounted, we will get empty dir, and we want it fail here.
+ if dir == "" {
+ return fmt.Errorf("no such directory for %s", CgroupProcesses)
+ }
+
+ // Dont attach any pid to the cgroup if -1 is specified as a pid
+ if pid == -1 {
+ return nil
+ }
+
+ file, err := fscommon.OpenFile(dir, CgroupProcesses, os.O_WRONLY)
+ if err != nil {
+ return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
+ }
+ defer file.Close()
+
+ for i := 0; i < 5; i++ {
+ _, err = file.WriteString(strconv.Itoa(pid))
+ if err == nil {
+ return nil
+ }
+
+ // EINVAL might mean that the task being added to cgroup.procs is in state
+ // TASK_NEW. We should attempt to do so again.
+ if errors.Is(err, unix.EINVAL) {
+ time.Sleep(30 * time.Millisecond)
+ continue
+ }
+
+ return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err)
+ }
+ return err
+}
+
+// Since the OCI spec is designed for cgroup v1, in some cases
+// there is need to convert from the cgroup v1 configuration to cgroup v2
+// the formula for BlkIOWeight is y = (1 + (x - 10) * 9999 / 990)
+// convert linearly from [10-1000] to [1-10000]
+func ConvertBlkIOToCgroupV2Value(blkIoWeight uint16) uint64 {
+ if blkIoWeight == 0 {
+ return 0
+ }
+ return uint64(1 + (uint64(blkIoWeight)-10)*9999/990)
+}
+
+// Since the OCI spec is designed for cgroup v1, in some cases
+// there is need to convert from the cgroup v1 configuration to cgroup v2
+// the formula for cpuShares is y = (1 + ((x - 2) * 9999) / 262142)
+// convert from [2-262144] to [1-10000]
+// 262144 comes from Linux kernel definition "#define MAX_SHARES (1UL << 18)"
+func ConvertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 {
+ if cpuShares == 0 {
+ return 0
+ }
+ return (1 + ((cpuShares-2)*9999)/262142)
+}
+
+// ConvertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec
+// for use by cgroup v2 drivers. A conversion is needed since Resources.MemorySwap
+// is defined as memory+swap combined, while in cgroup v2 swap is a separate value.
+func ConvertMemorySwapToCgroupV2Value(memorySwap, memory int64) (int64, error) {
+ // for compatibility with cgroup1 controller, set swap to unlimited in
+ // case the memory is set to unlimited, and swap is not explicitly set,
+ // treating the request as "set both memory and swap to unlimited".
+ if memory == -1 && memorySwap == 0 {
+ return -1, nil
+ }
+ if memorySwap == -1 || memorySwap == 0 {
+ // -1 is "max", 0 is "unset", so treat as is
+ return memorySwap, nil
+ }
+ // sanity checks
+ if memory == 0 || memory == -1 {
+ return 0, errors.New("unable to set swap limit without memory limit")
+ }
+ if memory < 0 {
+ return 0, fmt.Errorf("invalid memory value: %d", memory)
+ }
+ if memorySwap < memory {
+ return 0, errors.New("memory+swap limit should be >= memory limit")
+ }
+
+ return memorySwap - memory, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go
new file mode 100644
index 00000000..8b9275fb
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go
@@ -0,0 +1,302 @@
+package cgroups
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ securejoin "github.com/cyphar/filepath-securejoin"
+ "golang.org/x/sys/unix"
+)
+
+// Code in this source file are specific to cgroup v1,
+// and must not be used from any cgroup v2 code.
+
+const (
+ CgroupNamePrefix = "name="
+ defaultPrefix = "/sys/fs/cgroup"
+)
+
+var (
+ errUnified = errors.New("not implemented for cgroup v2 unified hierarchy")
+ ErrV1NoUnified = errors.New("invalid configuration: cannot use unified on cgroup v1")
+)
+
+type NotFoundError struct {
+ Subsystem string
+}
+
+func (e *NotFoundError) Error() string {
+ return fmt.Sprintf("mountpoint for %s not found", e.Subsystem)
+}
+
+func NewNotFoundError(sub string) error {
+ return &NotFoundError{
+ Subsystem: sub,
+ }
+}
+
+func IsNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, ok := err.(*NotFoundError)
+ return ok
+}
+
+func tryDefaultPath(cgroupPath, subsystem string) string {
+ if !strings.HasPrefix(defaultPrefix, cgroupPath) {
+ return ""
+ }
+
+ // remove possible prefix
+ subsystem = strings.TrimPrefix(subsystem, CgroupNamePrefix)
+
+ // Make sure we're still under defaultPrefix, and resolve
+ // a possible symlink (like cpu -> cpu,cpuacct).
+ path, err := securejoin.SecureJoin(defaultPrefix, subsystem)
+ if err != nil {
+ return ""
+ }
+
+ // (1) path should be a directory.
+ st, err := os.Lstat(path)
+ if err != nil || !st.IsDir() {
+ return ""
+ }
+
+ // (2) path should be a mount point.
+ pst, err := os.Lstat(filepath.Dir(path))
+ if err != nil {
+ return ""
+ }
+
+ if st.Sys().(*syscall.Stat_t).Dev == pst.Sys().(*syscall.Stat_t).Dev {
+ // parent dir has the same dev -- path is not a mount point
+ return ""
+ }
+
+ // (3) path should have 'cgroup' fs type.
+ fst := unix.Statfs_t{}
+ err = unix.Statfs(path, &fst)
+ if err != nil || fst.Type != unix.CGROUP_SUPER_MAGIC {
+ return ""
+ }
+
+ return path
+}
+
+// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt
+func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) {
+ if IsCgroup2UnifiedMode() {
+ return "", errUnified
+ }
+
+ // Avoid parsing mountinfo by trying the default path first, if possible.
+ if path := tryDefaultPath(cgroupPath, subsystem); path != "" {
+ return path, nil
+ }
+
+ mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem)
+ return mnt, err
+}
+
+func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) {
+ if IsCgroup2UnifiedMode() {
+ return "", "", errUnified
+ }
+
+ // Avoid parsing mountinfo by checking if subsystem is valid/available.
+ if !isSubsystemAvailable(subsystem) {
+ return "", "", NewNotFoundError(subsystem)
+ }
+
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return "", "", err
+ }
+ defer f.Close()
+
+ return findCgroupMountpointAndRootFromReader(f, cgroupPath, subsystem)
+}
+
+func findCgroupMountpointAndRootFromReader(reader io.Reader, cgroupPath, subsystem string) (string, string, error) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ fields := strings.Fields(txt)
+ if len(fields) < 9 {
+ continue
+ }
+ if strings.HasPrefix(fields[4], cgroupPath) {
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ if opt == subsystem {
+ return fields[4], fields[3], nil
+ }
+ }
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return "", "", err
+ }
+
+ return "", "", NewNotFoundError(subsystem)
+}
+
+func isSubsystemAvailable(subsystem string) bool {
+ if IsCgroup2UnifiedMode() {
+ panic("don't call isSubsystemAvailable from cgroupv2 code")
+ }
+
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return false
+ }
+ _, avail := cgroups[subsystem]
+ return avail
+}
+
+func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) {
+ if len(m.Subsystems) == 0 {
+ return "", fmt.Errorf("no subsystem for mount")
+ }
+
+ return getControllerPath(m.Subsystems[0], cgroups)
+}
+
+func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) {
+ res := make([]Mount, 0, len(ss))
+ scanner := bufio.NewScanner(mi)
+ numFound := 0
+ for scanner.Scan() && numFound < len(ss) {
+ txt := scanner.Text()
+ sepIdx := strings.Index(txt, " - ")
+ if sepIdx == -1 {
+ return nil, fmt.Errorf("invalid mountinfo format")
+ }
+ if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" {
+ continue
+ }
+ fields := strings.Split(txt, " ")
+ m := Mount{
+ Mountpoint: fields[4],
+ Root: fields[3],
+ }
+ for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+ seen, known := ss[opt]
+ if !known || (!all && seen) {
+ continue
+ }
+ ss[opt] = true
+ opt = strings.TrimPrefix(opt, CgroupNamePrefix)
+ m.Subsystems = append(m.Subsystems, opt)
+ numFound++
+ }
+ if len(m.Subsystems) > 0 || all {
+ res = append(res, m)
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+func getCgroupMountsV1(all bool) ([]Mount, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ allSubsystems, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return nil, err
+ }
+
+ allMap := make(map[string]bool)
+ for s := range allSubsystems {
+ allMap[s] = false
+ }
+ return getCgroupMountsHelper(allMap, f, all)
+}
+
+// GetOwnCgroup returns the relative path to the cgroup docker is running in.
+func GetOwnCgroup(subsystem string) (string, error) {
+ if IsCgroup2UnifiedMode() {
+ return "", errUnified
+ }
+ cgroups, err := ParseCgroupFile("/proc/self/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func GetOwnCgroupPath(subsystem string) (string, error) {
+ cgroup, err := GetOwnCgroup(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ return getCgroupPathHelper(subsystem, cgroup)
+}
+
+func GetInitCgroup(subsystem string) (string, error) {
+ if IsCgroup2UnifiedMode() {
+ return "", errUnified
+ }
+ cgroups, err := ParseCgroupFile("/proc/1/cgroup")
+ if err != nil {
+ return "", err
+ }
+
+ return getControllerPath(subsystem, cgroups)
+}
+
+func GetInitCgroupPath(subsystem string) (string, error) {
+ cgroup, err := GetInitCgroup(subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ return getCgroupPathHelper(subsystem, cgroup)
+}
+
+func getCgroupPathHelper(subsystem, cgroup string) (string, error) {
+ mnt, root, err := FindCgroupMountpointAndRoot("", subsystem)
+ if err != nil {
+ return "", err
+ }
+
+ // This is needed for nested containers, because in /proc/self/cgroup we
+ // see paths from host, which don't exist in container.
+ relCgroup, err := filepath.Rel(root, cgroup)
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(mnt, relCgroup), nil
+}
+
+func getControllerPath(subsystem string, cgroups map[string]string) (string, error) {
+ if IsCgroup2UnifiedMode() {
+ return "", errUnified
+ }
+
+ if p, ok := cgroups[subsystem]; ok {
+ return p, nil
+ }
+
+ if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok {
+ return p, nil
+ }
+
+ return "", NewNotFoundError(subsystem)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
new file mode 100644
index 00000000..fa195bf9
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go
@@ -0,0 +1,66 @@
+package configs
+
+import "fmt"
+
+// blockIODevice holds major:minor format supported in blkio cgroup
+type blockIODevice struct {
+ // Major is the device's major number
+ Major int64 `json:"major"`
+ // Minor is the device's minor number
+ Minor int64 `json:"minor"`
+}
+
+// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair
+type WeightDevice struct {
+ blockIODevice
+ // Weight is the bandwidth rate for the device, range is from 10 to 1000
+ Weight uint16 `json:"weight"`
+ // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ LeafWeight uint16 `json:"leafWeight"`
+}
+
+// NewWeightDevice returns a configured WeightDevice pointer
+func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice {
+ wd := &WeightDevice{}
+ wd.Major = major
+ wd.Minor = minor
+ wd.Weight = weight
+ wd.LeafWeight = leafWeight
+ return wd
+}
+
+// WeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) WeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight)
+}
+
+// LeafWeightString formats the struct to be writable to the cgroup specific file
+func (wd *WeightDevice) LeafWeightString() string {
+ return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight)
+}
+
+// ThrottleDevice struct holds a `major:minor rate_per_second` pair
+type ThrottleDevice struct {
+ blockIODevice
+ // Rate is the IO rate limit per cgroup per device
+ Rate uint64 `json:"rate"`
+}
+
+// NewThrottleDevice returns a configured ThrottleDevice pointer
+func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice {
+ td := &ThrottleDevice{}
+ td.Major = major
+ td.Minor = minor
+ td.Rate = rate
+ return td
+}
+
+// String formats the struct to be writable to the cgroup specific file
+func (td *ThrottleDevice) String() string {
+ return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)
+}
+
+// StringName formats the struct to be writable to the cgroup specific file
+func (td *ThrottleDevice) StringName(name string) string {
+ return fmt.Sprintf("%d:%d %s=%d", td.Major, td.Minor, name, td.Rate)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
new file mode 100644
index 00000000..dcc29c61
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
@@ -0,0 +1,139 @@
+package configs
+
+import (
+ systemdDbus "github.com/coreos/go-systemd/v22/dbus"
+)
+
+type FreezerState string
+
+const (
+ Undefined FreezerState = ""
+ Frozen FreezerState = "FROZEN"
+ Thawed FreezerState = "THAWED"
+)
+
+type Cgroup struct {
+ // Deprecated, use Path instead
+ Name string `json:"name,omitempty"`
+
+ // name of parent of cgroup or slice
+ // Deprecated, use Path instead
+ Parent string `json:"parent,omitempty"`
+
+ // Path specifies the path to cgroups that are created and/or joined by the container.
+ // The path is assumed to be relative to the host system cgroup mountpoint.
+ Path string `json:"path"`
+
+ // ScopePrefix describes prefix for the scope name
+ ScopePrefix string `json:"scope_prefix"`
+
+ // Paths represent the absolute cgroups paths to join.
+ // This takes precedence over Path.
+ Paths map[string]string
+
+ // Resources contains various cgroups settings to apply
+ *Resources
+
+ // SystemdProps are any additional properties for systemd,
+ // derived from org.systemd.property.xxx annotations.
+ // Ignored unless systemd is used for managing cgroups.
+ SystemdProps []systemdDbus.Property `json:"-"`
+}
+
+type Resources struct {
+ // Devices is the set of access rules for devices in the container.
+ Devices []*DeviceRule `json:"devices"`
+
+ // Memory limit (in bytes)
+ Memory int64 `json:"memory"`
+
+ // Memory reservation or soft_limit (in bytes)
+ MemoryReservation int64 `json:"memory_reservation"`
+
+ // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwap int64 `json:"memory_swap"`
+
+ // Kernel memory limit (in bytes)
+ KernelMemory int64 `json:"kernel_memory"`
+
+ // Kernel memory limit for TCP use (in bytes)
+ KernelMemoryTCP int64 `json:"kernel_memory_tcp"`
+
+ // CPU shares (relative weight vs. other containers)
+ CpuShares uint64 `json:"cpu_shares"`
+
+ // CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+ CpuQuota int64 `json:"cpu_quota"`
+
+ // CPU period to be used for hardcapping (in usecs). 0 to use system default.
+ CpuPeriod uint64 `json:"cpu_period"`
+
+ // How many time CPU will use in realtime scheduling (in usecs).
+ CpuRtRuntime int64 `json:"cpu_rt_quota"`
+
+ // CPU period to be used for realtime scheduling (in usecs).
+ CpuRtPeriod uint64 `json:"cpu_rt_period"`
+
+ // CPU to use
+ CpusetCpus string `json:"cpuset_cpus"`
+
+ // MEM to use
+ CpusetMems string `json:"cpuset_mems"`
+
+ // Process limit; set <= `0' to disable limit.
+ PidsLimit int64 `json:"pids_limit"`
+
+ // Specifies per cgroup weight, range is from 10 to 1000.
+ BlkioWeight uint16 `json:"blkio_weight"`
+
+ // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
+ BlkioLeafWeight uint16 `json:"blkio_leaf_weight"`
+
+ // Weight per cgroup per device, can override BlkioWeight.
+ BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"`
+
+ // IO read rate limit per cgroup per device, bytes per second.
+ BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"`
+
+ // IO write rate limit per cgroup per device, bytes per second.
+ BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"`
+
+ // IO read rate limit per cgroup per device, IO per second.
+ BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"`
+
+ // IO write rate limit per cgroup per device, IO per second.
+ BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"`
+
+ // set the freeze value for the process
+ Freezer FreezerState `json:"freezer"`
+
+ // Hugetlb limit (in bytes)
+ HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"`
+
+ // Whether to disable OOM Killer
+ OomKillDisable bool `json:"oom_kill_disable"`
+
+ // Tuning swappiness behaviour per cgroup
+ MemorySwappiness *uint64 `json:"memory_swappiness"`
+
+ // Set priority of network traffic for container
+ NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
+
+ // Set class identifier for container's network packets
+ NetClsClassid uint32 `json:"net_cls_classid_u"`
+
+ // Used on cgroups v2:
+
+ // CpuWeight sets a proportional bandwidth limit.
+ CpuWeight uint64 `json:"cpu_weight"`
+
+ // Unified is cgroupv2-only key-value map.
+ Unified map[string]string `json:"unified"`
+
+ // SkipDevices allows to skip configuring device permissions.
+ // Used by e.g. kubelet while creating a parent cgroup (kubepods)
+ // common for many containers.
+ //
+ // NOTE it is impossible to start a container which has this flag set.
+ SkipDevices bool `json:"skip_devices"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
new file mode 100644
index 00000000..c0c23d70
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux
+
+package configs
+
+// TODO Windows: This can ultimately be entirely factored out on Windows as
+// cgroups are a Unix-specific construct.
+type Cgroup struct {
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
new file mode 100644
index 00000000..82e91b82
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -0,0 +1,395 @@
+package configs
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+type Rlimit struct {
+ Type int `json:"type"`
+ Hard uint64 `json:"hard"`
+ Soft uint64 `json:"soft"`
+}
+
+// IDMap represents UID/GID Mappings for User Namespaces.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+// Seccomp represents syscall restrictions
+// By default, only the native architecture of the kernel is allowed to be used
+// for syscalls. Additional architectures can be added by specifying them in
+// Architectures.
+type Seccomp struct {
+ DefaultAction Action `json:"default_action"`
+ Architectures []string `json:"architectures"`
+ Syscalls []*Syscall `json:"syscalls"`
+ ListenerPath string `json:"listenerPath,omitempty"`
+ ListenerMetadata string `json:"listenerMetadata,omitempty"`
+}
+
+// Action is taken upon rule match in Seccomp
+type Action int
+
+const (
+ Kill Action = iota + 1
+ Errno
+ Trap
+ Allow
+ Trace
+ Log
+ Notify
+)
+
+// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
+type Operator int
+
+const (
+ EqualTo Operator = iota + 1
+ NotEqualTo
+ GreaterThan
+ GreaterThanOrEqualTo
+ LessThan
+ LessThanOrEqualTo
+ MaskEqualTo
+)
+
+// Arg is a rule to match a specific syscall argument in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"value_two"`
+ Op Operator `json:"op"`
+}
+
+// Syscall is a rule to match a syscall in Seccomp
+type Syscall struct {
+ Name string `json:"name"`
+ Action Action `json:"action"`
+ ErrnoRet *uint `json:"errnoRet"`
+ Args []*Arg `json:"args"`
+}
+
+// TODO Windows. Many of these fields should be factored out into those parts
+// which are common across platforms, and those which are platform specific.
+
+// Config defines configuration options for executing a process inside a contained environment.
+type Config struct {
+ // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
+ // This is a common option when the container is running in ramdisk
+ NoPivotRoot bool `json:"no_pivot_root"`
+
+ // ParentDeathSignal specifies the signal that is sent to the container's process in the case
+ // that the parent process dies.
+ ParentDeathSignal int `json:"parent_death_signal"`
+
+ // Path to a directory containing the container's root filesystem.
+ Rootfs string `json:"rootfs"`
+
+ // Umask is the umask to use inside of the container.
+ Umask *uint32 `json:"umask"`
+
+ // Readonlyfs will remount the container's rootfs as readonly where only externally mounted
+ // bind mounts are writtable.
+ Readonlyfs bool `json:"readonlyfs"`
+
+ // Specifies the mount propagation flags to be applied to /.
+ RootPropagation int `json:"rootPropagation"`
+
+ // Mounts specify additional source and destination paths that will be mounted inside the container's
+ // rootfs and mount namespace if specified
+ Mounts []*Mount `json:"mounts"`
+
+ // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
+ Devices []*Device `json:"devices"`
+
+ MountLabel string `json:"mount_label"`
+
+ // Hostname optionally sets the container's hostname if provided
+ Hostname string `json:"hostname"`
+
+ // Namespaces specifies the container's namespaces that it should setup when cloning the init process
+ // If a namespace is not provided that namespace is shared from the container's parent process
+ Namespaces Namespaces `json:"namespaces"`
+
+ // Capabilities specify the capabilities to keep when executing the process inside the container
+ // All capabilities not specified will be dropped from the processes capability mask
+ Capabilities *Capabilities `json:"capabilities"`
+
+ // Networks specifies the container's network setup to be created
+ Networks []*Network `json:"networks"`
+
+ // Routes can be specified to create entries in the route table as the container is started
+ Routes []*Route `json:"routes"`
+
+ // Cgroups specifies specific cgroup settings for the various subsystems that the container is
+ // placed into to limit the resources the container has available
+ Cgroups *Cgroup `json:"cgroups"`
+
+ // AppArmorProfile specifies the profile to apply to the process running in the container and is
+ // change at the time the process is execed
+ AppArmorProfile string `json:"apparmor_profile,omitempty"`
+
+ // ProcessLabel specifies the label to apply to the process running in the container. It is
+ // commonly used by selinux
+ ProcessLabel string `json:"process_label,omitempty"`
+
+ // Rlimits specifies the resource limits, such as max open files, to set in the container
+ // If Rlimits are not set, the container will inherit rlimits from the parent process
+ Rlimits []Rlimit `json:"rlimits,omitempty"`
+
+ // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
+ // for a process. Valid values are between the range [-1000, '1000'], where processes with
+ // higher scores are preferred for being killed. If it is unset then we don't touch the current
+ // value.
+ // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
+ OomScoreAdj *int `json:"oom_score_adj,omitempty"`
+
+ // UidMappings is an array of User ID mappings for User Namespaces
+ UidMappings []IDMap `json:"uid_mappings"`
+
+ // GidMappings is an array of Group ID mappings for User Namespaces
+ GidMappings []IDMap `json:"gid_mappings"`
+
+ // MaskPaths specifies paths within the container's rootfs to mask over with a bind
+ // mount pointing to /dev/null as to prevent reads of the file.
+ MaskPaths []string `json:"mask_paths"`
+
+ // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
+ // so that these files prevent any writes.
+ ReadonlyPaths []string `json:"readonly_paths"`
+
+ // Sysctl is a map of properties and their values. It is the equivalent of using
+ // sysctl -w my.property.name value in Linux.
+ Sysctl map[string]string `json:"sysctl"`
+
+ // Seccomp allows actions to be taken whenever a syscall is made within the container.
+ // A number of rules are given, each having an action to be taken if a syscall matches it.
+ // A default action to be taken if no rules match is also given.
+ Seccomp *Seccomp `json:"seccomp"`
+
+ // NoNewPrivileges controls whether processes in the container can gain additional privileges.
+ NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
+
+ // Hooks are a collection of actions to perform at various container lifecycle events.
+ // CommandHooks are serialized to JSON, but other hooks are not.
+ Hooks Hooks
+
+ // Version is the version of opencontainer specification that is supported.
+ Version string `json:"version"`
+
+ // Labels are user defined metadata that is stored in the config and populated on the state
+ Labels []string `json:"labels"`
+
+ // NoNewKeyring will not allocated a new session keyring for the container. It will use the
+ // callers keyring in this case.
+ NoNewKeyring bool `json:"no_new_keyring"`
+
+ // IntelRdt specifies settings for Intel RDT group that the container is placed into
+ // to limit the resources (e.g., L3 cache, memory bandwidth) the container has available
+ IntelRdt *IntelRdt `json:"intel_rdt,omitempty"`
+
+ // RootlessEUID is set when the runc was launched with non-zero EUID.
+ // Note that RootlessEUID is set to false when launched with EUID=0 in userns.
+ // When RootlessEUID is set, runc creates a new userns for the container.
+ // (config.json needs to contain userns settings)
+ RootlessEUID bool `json:"rootless_euid,omitempty"`
+
+ // RootlessCgroups is set when unlikely to have the full access to cgroups.
+ // When RootlessCgroups is set, cgroups errors are ignored.
+ RootlessCgroups bool `json:"rootless_cgroups,omitempty"`
+}
+
+type HookName string
+type HookList []Hook
+type Hooks map[HookName]HookList
+
+const (
+ // Prestart commands are executed after the container namespaces are created,
+ // but before the user supplied command is executed from init.
+ // Note: This hook is now deprecated
+ // Prestart commands are called in the Runtime namespace.
+ Prestart HookName = "prestart"
+
+ // CreateRuntime commands MUST be called as part of the create operation after
+ // the runtime environment has been created but before the pivot_root has been executed.
+ // CreateRuntime is called immediately after the deprecated Prestart hook.
+ // CreateRuntime commands are called in the Runtime Namespace.
+ CreateRuntime = "createRuntime"
+
+ // CreateContainer commands MUST be called as part of the create operation after
+ // the runtime environment has been created but before the pivot_root has been executed.
+ // CreateContainer commands are called in the Container namespace.
+ CreateContainer = "createContainer"
+
+ // StartContainer commands MUST be called as part of the start operation and before
+ // the container process is started.
+ // StartContainer commands are called in the Container namespace.
+ StartContainer = "startContainer"
+
+ // Poststart commands are executed after the container init process starts.
+ // Poststart commands are called in the Runtime Namespace.
+ Poststart = "poststart"
+
+ // Poststop commands are executed after the container init process exits.
+ // Poststop commands are called in the Runtime Namespace.
+ Poststop = "poststop"
+)
+
+type Capabilities struct {
+ // Bounding is the set of capabilities checked by the kernel.
+ Bounding []string
+ // Effective is the set of capabilities checked by the kernel.
+ Effective []string
+ // Inheritable is the capabilities preserved across execve.
+ Inheritable []string
+ // Permitted is the limiting superset for effective capabilities.
+ Permitted []string
+ // Ambient is the ambient set of capabilities that are kept.
+ Ambient []string
+}
+
+func (hooks HookList) RunHooks(state *specs.State) error {
+ for i, h := range hooks {
+ if err := h.Run(state); err != nil {
+ return errors.Wrapf(err, "Running hook #%d:", i)
+ }
+ }
+
+ return nil
+}
+
+func (hooks *Hooks) UnmarshalJSON(b []byte) error {
+ var state map[HookName][]CommandHook
+
+ if err := json.Unmarshal(b, &state); err != nil {
+ return err
+ }
+
+ *hooks = Hooks{}
+ for n, commandHooks := range state {
+ if len(commandHooks) == 0 {
+ continue
+ }
+
+ (*hooks)[n] = HookList{}
+ for _, h := range commandHooks {
+ (*hooks)[n] = append((*hooks)[n], h)
+ }
+ }
+
+ return nil
+}
+
+func (hooks *Hooks) MarshalJSON() ([]byte, error) {
+ serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
+ for _, hook := range hooks {
+ switch chook := hook.(type) {
+ case CommandHook:
+ serializableHooks = append(serializableHooks, chook)
+ default:
+ logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
+ }
+ }
+
+ return serializableHooks
+ }
+
+ return json.Marshal(map[string]interface{}{
+ "prestart": serialize((*hooks)[Prestart]),
+ "createRuntime": serialize((*hooks)[CreateRuntime]),
+ "createContainer": serialize((*hooks)[CreateContainer]),
+ "startContainer": serialize((*hooks)[StartContainer]),
+ "poststart": serialize((*hooks)[Poststart]),
+ "poststop": serialize((*hooks)[Poststop]),
+ })
+}
+
+type Hook interface {
+ // Run executes the hook with the provided state.
+ Run(*specs.State) error
+}
+
+// NewFunctionHook will call the provided function when the hook is run.
+func NewFunctionHook(f func(*specs.State) error) FuncHook {
+ return FuncHook{
+ run: f,
+ }
+}
+
+type FuncHook struct {
+ run func(*specs.State) error
+}
+
+func (f FuncHook) Run(s *specs.State) error {
+ return f.run(s)
+}
+
+type Command struct {
+ Path string `json:"path"`
+ Args []string `json:"args"`
+ Env []string `json:"env"`
+ Dir string `json:"dir"`
+ Timeout *time.Duration `json:"timeout"`
+}
+
+// NewCommandHook will execute the provided command when the hook is run.
+func NewCommandHook(cmd Command) CommandHook {
+ return CommandHook{
+ Command: cmd,
+ }
+}
+
+type CommandHook struct {
+ Command
+}
+
+func (c Command) Run(s *specs.State) error {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ var stdout, stderr bytes.Buffer
+ cmd := exec.Cmd{
+ Path: c.Path,
+ Args: c.Args,
+ Env: c.Env,
+ Stdin: bytes.NewReader(b),
+ Stdout: &stdout,
+ Stderr: &stderr,
+ }
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ errC := make(chan error, 1)
+ go func() {
+ err := cmd.Wait()
+ if err != nil {
+ err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String())
+ }
+ errC <- err
+ }()
+ var timerCh <-chan time.Time
+ if c.Timeout != nil {
+ timer := time.NewTimer(*c.Timeout)
+ defer timer.Stop()
+ timerCh = timer.C
+ }
+ select {
+ case err := <-errC:
+ return err
+ case <-timerCh:
+ cmd.Process.Kill()
+ cmd.Wait()
+ return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds())
+ }
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go
new file mode 100644
index 00000000..07da1080
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go
@@ -0,0 +1,61 @@
+package configs
+
+import "fmt"
+
+// HostUID gets the translated uid for the process on host which could be
+// different when user namespaces are enabled.
+func (c Config) HostUID(containerId int) (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.UidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no uid mappings found.")
+ }
+ id, found := c.hostIDFromMapping(containerId, c.UidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no user mapping found.")
+ }
+ return id, nil
+ }
+ // Return unchanged id.
+ return containerId, nil
+}
+
+// HostRootUID gets the root uid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostRootUID() (int, error) {
+ return c.HostUID(0)
+}
+
+// HostGID gets the translated gid for the process on host which could be
+// different when user namespaces are enabled.
+func (c Config) HostGID(containerId int) (int, error) {
+ if c.Namespaces.Contains(NEWUSER) {
+ if c.GidMappings == nil {
+ return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.")
+ }
+ id, found := c.hostIDFromMapping(containerId, c.GidMappings)
+ if !found {
+ return -1, fmt.Errorf("User namespaces enabled, but no group mapping found.")
+ }
+ return id, nil
+ }
+ // Return unchanged id.
+ return containerId, nil
+}
+
+// HostRootGID gets the root gid for the process on host which could be non-zero
+// when user namespaces are enabled.
+func (c Config) HostRootGID() (int, error) {
+ return c.HostGID(0)
+}
+
+// Utility function that gets a host ID for a container ID from user namespace map
+// if that ID is present in the map.
+func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) {
+ for _, m := range uMap {
+ if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (containerID - m.ContainerID)
+ return hostID, true
+ }
+ }
+ return -1, false
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
new file mode 100644
index 00000000..632bf6ac
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go
@@ -0,0 +1,170 @@
+package configs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+const (
+ Wildcard = -1
+)
+
+type Device struct {
+ DeviceRule
+
+ // Path to the device.
+ Path string `json:"path"`
+
+ // FileMode permission bits for the device.
+ FileMode os.FileMode `json:"file_mode"`
+
+ // Uid of the device.
+ Uid uint32 `json:"uid"`
+
+ // Gid of the device.
+ Gid uint32 `json:"gid"`
+}
+
+// DevicePermissions is a cgroupv1-style string to represent device access. It
+// has to be a string for backward compatibility reasons, hence why it has
+// methods to do set operations.
+type DevicePermissions string
+
+const (
+ deviceRead uint = (1 << iota)
+ deviceWrite
+ deviceMknod
+)
+
+func (p DevicePermissions) toSet() uint {
+ var set uint
+ for _, perm := range p {
+ switch perm {
+ case 'r':
+ set |= deviceRead
+ case 'w':
+ set |= deviceWrite
+ case 'm':
+ set |= deviceMknod
+ }
+ }
+ return set
+}
+
+func fromSet(set uint) DevicePermissions {
+ var perm string
+ if set&deviceRead == deviceRead {
+ perm += "r"
+ }
+ if set&deviceWrite == deviceWrite {
+ perm += "w"
+ }
+ if set&deviceMknod == deviceMknod {
+ perm += "m"
+ }
+ return DevicePermissions(perm)
+}
+
+// Union returns the union of the two sets of DevicePermissions.
+func (p DevicePermissions) Union(o DevicePermissions) DevicePermissions {
+ lhs := p.toSet()
+ rhs := o.toSet()
+ return fromSet(lhs | rhs)
+}
+
+// Difference returns the set difference of the two sets of DevicePermissions.
+// In set notation, A.Difference(B) gives you A\B.
+func (p DevicePermissions) Difference(o DevicePermissions) DevicePermissions {
+ lhs := p.toSet()
+ rhs := o.toSet()
+ return fromSet(lhs &^ rhs)
+}
+
+// Intersection computes the intersection of the two sets of DevicePermissions.
+func (p DevicePermissions) Intersection(o DevicePermissions) DevicePermissions {
+ lhs := p.toSet()
+ rhs := o.toSet()
+ return fromSet(lhs & rhs)
+}
+
+// IsEmpty returns whether the set of permissions in a DevicePermissions is
+// empty.
+func (p DevicePermissions) IsEmpty() bool {
+ return p == DevicePermissions("")
+}
+
+// IsValid returns whether the set of permissions is a subset of valid
+// permissions (namely, {r,w,m}).
+func (p DevicePermissions) IsValid() bool {
+ return p == fromSet(p.toSet())
+}
+
+type DeviceType rune
+
+const (
+ WildcardDevice DeviceType = 'a'
+ BlockDevice DeviceType = 'b'
+ CharDevice DeviceType = 'c' // or 'u'
+ FifoDevice DeviceType = 'p'
+)
+
+func (t DeviceType) IsValid() bool {
+ switch t {
+ case WildcardDevice, BlockDevice, CharDevice, FifoDevice:
+ return true
+ default:
+ return false
+ }
+}
+
+func (t DeviceType) CanMknod() bool {
+ switch t {
+ case BlockDevice, CharDevice, FifoDevice:
+ return true
+ default:
+ return false
+ }
+}
+
+func (t DeviceType) CanCgroup() bool {
+ switch t {
+ case WildcardDevice, BlockDevice, CharDevice:
+ return true
+ default:
+ return false
+ }
+}
+
+type DeviceRule struct {
+ // Type of device ('c' for char, 'b' for block). If set to 'a', this rule
+ // acts as a wildcard and all fields other than Allow are ignored.
+ Type DeviceType `json:"type"`
+
+ // Major is the device's major number.
+ Major int64 `json:"major"`
+
+ // Minor is the device's minor number.
+ Minor int64 `json:"minor"`
+
+ // Permissions is the set of permissions that this rule applies to (in the
+ // cgroupv1 format -- any combination of "rwm").
+ Permissions DevicePermissions `json:"permissions"`
+
+ // Allow specifies whether this rule is allowed.
+ Allow bool `json:"allow"`
+}
+
+func (d *DeviceRule) CgroupString() string {
+ var (
+ major = strconv.FormatInt(d.Major, 10)
+ minor = strconv.FormatInt(d.Minor, 10)
+ )
+ if d.Major == Wildcard {
+ major = "*"
+ }
+ if d.Minor == Wildcard {
+ minor = "*"
+ }
+ return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go
new file mode 100644
index 00000000..650c4684
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_unix.go
@@ -0,0 +1,16 @@
+// +build !windows
+
+package configs
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+func (d *DeviceRule) Mkdev() (uint64, error) {
+ if d.Major == Wildcard || d.Minor == Wildcard {
+ return 0, errors.New("cannot mkdev() device with wildcards")
+ }
+ return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go
new file mode 100644
index 00000000..72928939
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_windows.go
@@ -0,0 +1,5 @@
+package configs
+
+func (d *DeviceRule) Mkdev() (uint64, error) {
+ return 0, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
new file mode 100644
index 00000000..d3021638
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go
@@ -0,0 +1,9 @@
+package configs
+
+type HugepageLimit struct {
+ // which type of hugepage to limit.
+ Pagesize string `json:"page_size"`
+
+ // usage limit for hugepage.
+ Limit uint64 `json:"limit"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go
new file mode 100644
index 00000000..57e9f037
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go
@@ -0,0 +1,13 @@
+package configs
+
+type IntelRdt struct {
+ // The schema for L3 cache id and capacity bitmask (CBM)
+ // Format: "L3:=;=;..."
+ L3CacheSchema string `json:"l3_cache_schema,omitempty"`
+
+ // The schema of memory bandwidth per L3 cache id
+ // Format: "MB:=bandwidth0;=bandwidth1;..."
+ // The unit of memory bandwidth is specified in "percentages" by
+ // default, and in "MBps" if MBA Software Controller is enabled.
+ MemBwSchema string `json:"memBwSchema,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
new file mode 100644
index 00000000..9a0395ea
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go
@@ -0,0 +1,14 @@
+package configs
+
+import (
+ "fmt"
+)
+
+type IfPrioMap struct {
+ Interface string `json:"interface"`
+ Priority int64 `json:"priority"`
+}
+
+func (i *IfPrioMap) CgroupString() string {
+ return fmt.Sprintf("%s %d", i.Interface, i.Priority)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
new file mode 100644
index 00000000..670757dd
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go
@@ -0,0 +1,39 @@
+package configs
+
+const (
+ // EXT_COPYUP is a directive to copy up the contents of a directory when
+ // a tmpfs is mounted over it.
+ EXT_COPYUP = 1 << iota
+)
+
+type Mount struct {
+ // Source path for the mount.
+ Source string `json:"source"`
+
+ // Destination path for the mount inside the container.
+ Destination string `json:"destination"`
+
+ // Device the mount is for.
+ Device string `json:"device"`
+
+ // Mount flags.
+ Flags int `json:"flags"`
+
+ // Propagation Flags
+ PropagationFlags []int `json:"propagation_flags"`
+
+ // Mount data applied to the mount.
+ Data string `json:"data"`
+
+ // Relabel source if set, "z" indicates shared, "Z" indicates unshared.
+ Relabel string `json:"relabel"`
+
+ // Extensions are additional flags that are specific to runc.
+ Extensions int `json:"extensions"`
+
+ // Optional Command to be run before Source is mounted.
+ PremountCmds []Command `json:"premount_cmds"`
+
+ // Optional Command to be run after Source is mounted.
+ PostmountCmds []Command `json:"postmount_cmds"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
new file mode 100644
index 00000000..a3329a31
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go
@@ -0,0 +1,5 @@
+package configs
+
+type NamespaceType string
+
+type Namespaces []Namespace
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
new file mode 100644
index 00000000..d52d6fcd
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go
@@ -0,0 +1,126 @@
+package configs
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+const (
+ NEWNET NamespaceType = "NEWNET"
+ NEWPID NamespaceType = "NEWPID"
+ NEWNS NamespaceType = "NEWNS"
+ NEWUTS NamespaceType = "NEWUTS"
+ NEWIPC NamespaceType = "NEWIPC"
+ NEWUSER NamespaceType = "NEWUSER"
+ NEWCGROUP NamespaceType = "NEWCGROUP"
+)
+
+var (
+ nsLock sync.Mutex
+ supportedNamespaces = make(map[NamespaceType]bool)
+)
+
+// NsName converts the namespace type to its filename
+func NsName(ns NamespaceType) string {
+ switch ns {
+ case NEWNET:
+ return "net"
+ case NEWNS:
+ return "mnt"
+ case NEWPID:
+ return "pid"
+ case NEWIPC:
+ return "ipc"
+ case NEWUSER:
+ return "user"
+ case NEWUTS:
+ return "uts"
+ case NEWCGROUP:
+ return "cgroup"
+ }
+ return ""
+}
+
+// IsNamespaceSupported returns whether a namespace is available or
+// not
+func IsNamespaceSupported(ns NamespaceType) bool {
+ nsLock.Lock()
+ defer nsLock.Unlock()
+ supported, ok := supportedNamespaces[ns]
+ if ok {
+ return supported
+ }
+ nsFile := NsName(ns)
+ // if the namespace type is unknown, just return false
+ if nsFile == "" {
+ return false
+ }
+ _, err := os.Stat("/proc/self/ns/" + nsFile)
+ // a namespace is supported if it exists and we have permissions to read it
+ supported = err == nil
+ supportedNamespaces[ns] = supported
+ return supported
+}
+
+func NamespaceTypes() []NamespaceType {
+ return []NamespaceType{
+ NEWUSER, // Keep user NS always first, don't move it.
+ NEWIPC,
+ NEWUTS,
+ NEWNET,
+ NEWPID,
+ NEWNS,
+ NEWCGROUP,
+ }
+}
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+ Type NamespaceType `json:"type"`
+ Path string `json:"path"`
+}
+
+func (n *Namespace) GetPath(pid int) string {
+ return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type))
+}
+
+func (n *Namespaces) Remove(t NamespaceType) bool {
+ i := n.index(t)
+ if i == -1 {
+ return false
+ }
+ *n = append((*n)[:i], (*n)[i+1:]...)
+ return true
+}
+
+func (n *Namespaces) Add(t NamespaceType, path string) {
+ i := n.index(t)
+ if i == -1 {
+ *n = append(*n, Namespace{Type: t, Path: path})
+ return
+ }
+ (*n)[i].Path = path
+}
+
+func (n *Namespaces) index(t NamespaceType) int {
+ for i, ns := range *n {
+ if ns.Type == t {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n *Namespaces) Contains(t NamespaceType) bool {
+ return n.index(t) != -1
+}
+
+func (n *Namespaces) PathOf(t NamespaceType) string {
+ i := n.index(t)
+ if i == -1 {
+ return ""
+ }
+ return (*n)[i].Path
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
new file mode 100644
index 00000000..2dc7adfc
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go
@@ -0,0 +1,32 @@
+// +build linux
+
+package configs
+
+import "golang.org/x/sys/unix"
+
+func (n *Namespace) Syscall() int {
+ return namespaceInfo[n.Type]
+}
+
+var namespaceInfo = map[NamespaceType]int{
+ NEWNET: unix.CLONE_NEWNET,
+ NEWNS: unix.CLONE_NEWNS,
+ NEWUSER: unix.CLONE_NEWUSER,
+ NEWIPC: unix.CLONE_NEWIPC,
+ NEWUTS: unix.CLONE_NEWUTS,
+ NEWPID: unix.CLONE_NEWPID,
+ NEWCGROUP: unix.CLONE_NEWCGROUP,
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ var flag int
+ for _, v := range *n {
+ if v.Path != "" {
+ continue
+ }
+ flag |= namespaceInfo[v.Type]
+ }
+ return uintptr(flag)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
new file mode 100644
index 00000000..5d9a5c81
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux,!windows
+
+package configs
+
+func (n *Namespace) Syscall() int {
+ panic("No namespace syscall support")
+}
+
+// CloneFlags parses the container's Namespaces options to set the correct
+// flags on clone, unshare. This function returns flags only for new namespaces.
+func (n *Namespaces) CloneFlags() uintptr {
+ panic("No namespace syscall support")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
new file mode 100644
index 00000000..19bf713d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux
+
+package configs
+
+// Namespace defines configuration for each namespace. It specifies an
+// alternate path that is able to be joined via setns.
+type Namespace struct {
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
new file mode 100644
index 00000000..ccdb228e
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go
@@ -0,0 +1,72 @@
+package configs
+
+// Network defines configuration for a container's networking stack
+//
+// The network configuration can be omitted from a container causing the
+// container to be setup with the host's networking stack
+type Network struct {
+ // Type sets the networks type, commonly veth and loopback
+ Type string `json:"type"`
+
+ // Name of the network interface
+ Name string `json:"name"`
+
+ // The bridge to use.
+ Bridge string `json:"bridge"`
+
+ // MacAddress contains the MAC address to set on the network interface
+ MacAddress string `json:"mac_address"`
+
+ // Address contains the IPv4 and mask to set on the network interface
+ Address string `json:"address"`
+
+ // Gateway sets the gateway address that is used as the default for the interface
+ Gateway string `json:"gateway"`
+
+ // IPv6Address contains the IPv6 and mask to set on the network interface
+ IPv6Address string `json:"ipv6_address"`
+
+ // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface
+ IPv6Gateway string `json:"ipv6_gateway"`
+
+ // Mtu sets the mtu value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ Mtu int `json:"mtu"`
+
+ // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ // Note: This does not apply to loopback interfaces.
+ TxQueueLen int `json:"txqueuelen"`
+
+ // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the
+ // container.
+ HostInterfaceName string `json:"host_interface_name"`
+
+ // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ // bridge port in the case of type veth
+ // Note: This is unsupported on some systems.
+ // Note: This does not apply to loopback interfaces.
+ HairpinMode bool `json:"hairpin_mode"`
+}
+
+// Routes can be specified to create entries in the route table as the container is started
+//
+// All of destination, source, and gateway should be either IPv4 or IPv6.
+// One of the three options must be present, and omitted entries will use their
+// IP family default for the route table. For IPv4 for example, setting the
+// gateway to 1.2.3.4 and the interface to eth0 will set up a standard
+// destination of 0.0.0.0(or *) when viewed in the route table.
+type Route struct {
+ // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Destination string `json:"destination"`
+
+ // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6
+ Source string `json:"source"`
+
+ // Sets the gateway. Accepts IPv4 and IPv6
+ Gateway string `json:"gateway"`
+
+ // The device to set this route up for, for example: eth0
+ InterfaceName string `json:"interface_name"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go
new file mode 100644
index 00000000..1c393861
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go
@@ -0,0 +1,78 @@
+package seccomp
+
+import (
+ "fmt"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var operators = map[string]configs.Operator{
+ "SCMP_CMP_NE": configs.NotEqualTo,
+ "SCMP_CMP_LT": configs.LessThan,
+ "SCMP_CMP_LE": configs.LessThanOrEqualTo,
+ "SCMP_CMP_EQ": configs.EqualTo,
+ "SCMP_CMP_GE": configs.GreaterThanOrEqualTo,
+ "SCMP_CMP_GT": configs.GreaterThan,
+ "SCMP_CMP_MASKED_EQ": configs.MaskEqualTo,
+}
+
+var actions = map[string]configs.Action{
+ "SCMP_ACT_KILL": configs.Kill,
+ "SCMP_ACT_ERRNO": configs.Errno,
+ "SCMP_ACT_TRAP": configs.Trap,
+ "SCMP_ACT_ALLOW": configs.Allow,
+ "SCMP_ACT_TRACE": configs.Trace,
+ "SCMP_ACT_LOG": configs.Log,
+ "SCMP_ACT_NOTIFY": configs.Notify,
+}
+
+var archs = map[string]string{
+ "SCMP_ARCH_X86": "x86",
+ "SCMP_ARCH_X86_64": "amd64",
+ "SCMP_ARCH_X32": "x32",
+ "SCMP_ARCH_ARM": "arm",
+ "SCMP_ARCH_AARCH64": "arm64",
+ "SCMP_ARCH_MIPS": "mips",
+ "SCMP_ARCH_MIPS64": "mips64",
+ "SCMP_ARCH_MIPS64N32": "mips64n32",
+ "SCMP_ARCH_MIPSEL": "mipsel",
+ "SCMP_ARCH_MIPSEL64": "mipsel64",
+ "SCMP_ARCH_MIPSEL64N32": "mipsel64n32",
+ "SCMP_ARCH_PPC": "ppc",
+ "SCMP_ARCH_PPC64": "ppc64",
+ "SCMP_ARCH_PPC64LE": "ppc64le",
+ "SCMP_ARCH_S390": "s390",
+ "SCMP_ARCH_S390X": "s390x",
+}
+
+// ConvertStringToOperator converts a string into a Seccomp comparison operator.
+// Comparison operators use the names they are assigned by Libseccomp's header.
+// Attempting to convert a string that is not a valid operator results in an
+// error.
+func ConvertStringToOperator(in string) (configs.Operator, error) {
+ if op, ok := operators[in]; ok {
+ return op, nil
+ }
+ return 0, fmt.Errorf("string %s is not a valid operator for seccomp", in)
+}
+
+// ConvertStringToAction converts a string into a Seccomp rule match action.
+// Actions use the names they are assigned in Libseccomp's header, though some
+// (notable, SCMP_ACT_TRACE) are not available in this implementation and will
+// return errors.
+// Attempting to convert a string that is not a valid action results in an
+// error.
+func ConvertStringToAction(in string) (configs.Action, error) {
+ if act, ok := actions[in]; ok {
+ return act, nil
+ }
+ return 0, fmt.Errorf("string %s is not a valid action for seccomp", in)
+}
+
+// ConvertStringToArch converts a string into a Seccomp comparison arch.
+func ConvertStringToArch(in string) (string, error) {
+ if arch, ok := archs[in]; ok {
+ return arch, nil
+ }
+ return "", fmt.Errorf("string %s is not a valid arch for seccomp", in)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
new file mode 100644
index 00000000..fd82ff48
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
@@ -0,0 +1,314 @@
+// +build linux,cgo,seccomp
+
+package seccomp
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+ libseccomp "github.com/seccomp/libseccomp-golang"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ actAllow = libseccomp.ActAllow
+ actTrap = libseccomp.ActTrap
+ actKill = libseccomp.ActKill
+ actTrace = libseccomp.ActTrace.SetReturnCode(int16(unix.EPERM))
+ actLog = libseccomp.ActLog
+ actErrno = libseccomp.ActErrno.SetReturnCode(int16(unix.EPERM))
+ actNotify = libseccomp.ActNotify
+)
+
+const (
+ // Linux system calls can have at most 6 arguments
+ syscallMaxArguments int = 6
+)
+
+// Filters given syscalls in a container, preventing them from being used
+// Started in the container init process, and carried over to all child processes
+// Setns calls, however, require a separate invocation, as they are not children
+// of the init until they join the namespace
+func InitSeccomp(config *configs.Seccomp) (int, error) {
+ if config == nil {
+ return -1, errors.New("cannot initialize Seccomp - nil config passed")
+ }
+
+ defaultAction, err := getAction(config.DefaultAction, nil)
+ if err != nil {
+ return -1, errors.New("error initializing seccomp - invalid default action")
+ }
+
+ if defaultAction == actNotify {
+ return -1, fmt.Errorf("SCMP_ACT_NOTIFY cannot be used as default action")
+ }
+
+ filter, err := libseccomp.NewFilter(defaultAction)
+ if err != nil {
+ return -1, fmt.Errorf("error creating filter: %s", err)
+ }
+
+ // TODO: config.Flags defines the options to pass to seccomp(2) but
+ // it's not taken into consideration.
+ notifyFeatureRequired := false
+ for _, call := range config.Syscalls {
+ if call.Action == configs.Notify {
+ if call.Name == "write" {
+ return -1, fmt.Errorf("SCMP_ACT_NOTIFY cannot be used for the write syscall")
+ }
+ notifyFeatureRequired = true
+ }
+ }
+ // Ignore GetAPI() error: if API level operations are not supported, it
+ // means seccompAPILevel == 0.
+ seccompAPILevel, _ := libseccomp.GetAPI()
+ if notifyFeatureRequired {
+ if seccompAPILevel < 5 {
+ return -1, fmt.Errorf("seccomp notify unsupported: API level: got %d, want at least 6. Please try with libseccomp >= 2.5.0 and Linux >= 5.7", seccompAPILevel)
+ } else if seccompAPILevel == 5 {
+ // The current Linux kernel does not provide support for Tsync + Notify.
+ // Users should update to Linux >= 5.7 or backport this patch:
+ // https://github.com/torvalds/linux/commit/51891498f2da78ee64dfad88fa53c9e85fb50abf
+
+ // As a workaround, disable Tsync. This is not ideal because seccomp will
+ // not be applied to all threads.
+ filter.SetTsync(false)
+ }
+ }
+
+ // Add extra architectures
+ for _, arch := range config.Architectures {
+ scmpArch, err := libseccomp.GetArchFromString(arch)
+ if err != nil {
+ return -1, fmt.Errorf("error validating Seccomp architecture: %s", err)
+ }
+
+ if err := filter.AddArch(scmpArch); err != nil {
+ return -1, fmt.Errorf("error adding architecture to seccomp filter: %s", err)
+ }
+ }
+
+ // Unset no new privs bit
+ if err := filter.SetNoNewPrivsBit(false); err != nil {
+ return -1, fmt.Errorf("error setting no new privileges: %s", err)
+ }
+
+ // Add a rule for each syscall
+ for _, call := range config.Syscalls {
+ if call == nil {
+ return -1, errors.New("encountered nil syscall while initializing Seccomp")
+ }
+
+ if err = matchCall(filter, call); err != nil {
+ return -1, err
+ }
+ }
+
+ if err = filter.Load(); err != nil {
+ return -1, fmt.Errorf("error loading seccomp filter into kernel: %s", err)
+ }
+ if !notifyFeatureRequired {
+ return -1, nil
+ }
+ seccompFd, err := filter.GetNotifFd()
+ if err != nil {
+ return -1, fmt.Errorf("error getting seccomp notify fd: %s", err)
+ }
+ return int(seccompFd), nil
+}
+
+// IsEnabled returns if the kernel has been configured to support seccomp.
+func IsEnabled() bool {
+ // Try to read from /proc/self/status for kernels > 3.8
+ s, err := parseStatusFile("/proc/self/status")
+ if err != nil {
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
+ return true
+ }
+ }
+ return false
+ }
+ _, ok := s["Seccomp"]
+ return ok
+}
+
+// Convert Libcontainer Action to Libseccomp ScmpAction
+func getAction(act configs.Action, errnoRet *uint) (libseccomp.ScmpAction, error) {
+ switch act {
+ case configs.Kill:
+ return actKill, nil
+ case configs.Errno:
+ if errnoRet != nil {
+ return libseccomp.ActErrno.SetReturnCode(int16(*errnoRet)), nil
+ }
+ return actErrno, nil
+ case configs.Trap:
+ return actTrap, nil
+ case configs.Allow:
+ return actAllow, nil
+ case configs.Trace:
+ if errnoRet != nil {
+ return libseccomp.ActTrace.SetReturnCode(int16(*errnoRet)), nil
+ }
+ return actTrace, nil
+ case configs.Log:
+ return actLog, nil
+ case configs.Notify:
+ return actNotify, nil
+ default:
+ return libseccomp.ActInvalid, errors.New("invalid action, cannot use in rule")
+ }
+}
+
+// Convert Libcontainer Operator to Libseccomp ScmpCompareOp
+func getOperator(op configs.Operator) (libseccomp.ScmpCompareOp, error) {
+ switch op {
+ case configs.EqualTo:
+ return libseccomp.CompareEqual, nil
+ case configs.NotEqualTo:
+ return libseccomp.CompareNotEqual, nil
+ case configs.GreaterThan:
+ return libseccomp.CompareGreater, nil
+ case configs.GreaterThanOrEqualTo:
+ return libseccomp.CompareGreaterEqual, nil
+ case configs.LessThan:
+ return libseccomp.CompareLess, nil
+ case configs.LessThanOrEqualTo:
+ return libseccomp.CompareLessOrEqual, nil
+ case configs.MaskEqualTo:
+ return libseccomp.CompareMaskedEqual, nil
+ default:
+ return libseccomp.CompareInvalid, errors.New("invalid operator, cannot use in rule")
+ }
+}
+
+// Convert Libcontainer Arg to Libseccomp ScmpCondition
+func getCondition(arg *configs.Arg) (libseccomp.ScmpCondition, error) {
+ cond := libseccomp.ScmpCondition{}
+
+ if arg == nil {
+ return cond, errors.New("cannot convert nil to syscall condition")
+ }
+
+ op, err := getOperator(arg.Op)
+ if err != nil {
+ return cond, err
+ }
+
+ return libseccomp.MakeCondition(arg.Index, op, arg.Value, arg.ValueTwo)
+}
+
+// Add a rule to match a single syscall
+func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
+ if call == nil || filter == nil {
+ return errors.New("cannot use nil as syscall to block")
+ }
+
+ if len(call.Name) == 0 {
+ return errors.New("empty string is not a valid syscall")
+ }
+
+ // If we can't resolve the syscall, assume it's not supported on this kernel
+ // Ignore it, don't error out
+ callNum, err := libseccomp.GetSyscallFromName(call.Name)
+ if err != nil {
+ return nil
+ }
+
+ // Convert the call's action to the libseccomp equivalent
+ callAct, err := getAction(call.Action, call.ErrnoRet)
+ if err != nil {
+ return fmt.Errorf("action in seccomp profile is invalid: %s", err)
+ }
+
+ // Unconditional match - just add the rule
+ if len(call.Args) == 0 {
+ if err = filter.AddRule(callNum, callAct); err != nil {
+ return fmt.Errorf("error adding seccomp filter rule for syscall %s: %s", call.Name, err)
+ }
+ } else {
+ // If two or more arguments have the same condition,
+ // Revert to old behavior, adding each condition as a separate rule
+ argCounts := make([]uint, syscallMaxArguments)
+ conditions := []libseccomp.ScmpCondition{}
+
+ for _, cond := range call.Args {
+ newCond, err := getCondition(cond)
+ if err != nil {
+ return fmt.Errorf("error creating seccomp syscall condition for syscall %s: %s", call.Name, err)
+ }
+
+ argCounts[cond.Index] += 1
+
+ conditions = append(conditions, newCond)
+ }
+
+ hasMultipleArgs := false
+ for _, count := range argCounts {
+ if count > 1 {
+ hasMultipleArgs = true
+ break
+ }
+ }
+
+ if hasMultipleArgs {
+ // Revert to old behavior
+ // Add each condition attached to a separate rule
+ for _, cond := range conditions {
+ condArr := []libseccomp.ScmpCondition{cond}
+
+ if err = filter.AddRuleConditional(callNum, callAct, condArr); err != nil {
+ return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err)
+ }
+ }
+ } else {
+ // No conditions share same argument
+ // Use new, proper behavior
+ if err = filter.AddRuleConditional(callNum, callAct, conditions); err != nil {
+ return fmt.Errorf("error adding seccomp rule for syscall %s: %s", call.Name, err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func parseStatusFile(path string) (map[string]string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ status := make(map[string]string)
+
+ for s.Scan() {
+ text := s.Text()
+ parts := strings.Split(text, ":")
+
+ if len(parts) <= 1 {
+ continue
+ }
+
+ status[parts[0]] = parts[1]
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
+
+// Version returns major, minor, and micro.
+func Version() (uint, uint, uint) {
+ return libseccomp.GetLibraryVersion()
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go
new file mode 100644
index 00000000..d15383f0
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go
@@ -0,0 +1,29 @@
+// +build !linux !cgo !seccomp
+
+package seccomp
+
+import (
+ "errors"
+
+ "github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var ErrSeccompNotEnabled = errors.New("seccomp: config provided but seccomp not supported")
+
+// InitSeccomp does nothing because seccomp is not supported.
+func InitSeccomp(config *configs.Seccomp) (int, error) {
+ if config != nil {
+ return -1, ErrSeccompNotEnabled
+ }
+ return -1, nil
+}
+
+// IsEnabled returns false, because it is not supported.
+func IsEnabled() bool {
+ return false
+}
+
+// Version returns major, minor, and micro.
+func Version() (uint, uint, uint) {
+ return 0, 0, 0
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/specconv/example.go b/vendor/github.com/opencontainers/runc/libcontainer/specconv/example.go
new file mode 100644
index 00000000..8a201bc7
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/specconv/example.go
@@ -0,0 +1,230 @@
+package specconv
+
+import (
+ "os"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Example returns an example spec file, with many options set so a user can
+// see what a standard spec file looks like.
+func Example() *specs.Spec {
+ spec := &specs.Spec{
+ Version: specs.Version,
+ Root: &specs.Root{
+ Path: "rootfs",
+ Readonly: true,
+ },
+ Process: &specs.Process{
+ Terminal: true,
+ User: specs.User{},
+ Args: []string{
+ "sh",
+ },
+ Env: []string{
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "TERM=xterm",
+ },
+ Cwd: "/",
+ NoNewPrivileges: true,
+ Capabilities: &specs.LinuxCapabilities{
+ Bounding: []string{
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE",
+ },
+ Permitted: []string{
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE",
+ },
+ Inheritable: []string{
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE",
+ },
+ Ambient: []string{
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE",
+ },
+ Effective: []string{
+ "CAP_AUDIT_WRITE",
+ "CAP_KILL",
+ "CAP_NET_BIND_SERVICE",
+ },
+ },
+ Rlimits: []specs.POSIXRlimit{
+ {
+ Type: "RLIMIT_NOFILE",
+ Hard: uint64(1024),
+ Soft: uint64(1024),
+ },
+ },
+ },
+ Hostname: "runc",
+ Mounts: []specs.Mount{
+ {
+ Destination: "/proc",
+ Type: "proc",
+ Source: "proc",
+ Options: nil,
+ },
+ {
+ Destination: "/dev",
+ Type: "tmpfs",
+ Source: "tmpfs",
+ Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
+ },
+ {
+ Destination: "/dev/pts",
+ Type: "devpts",
+ Source: "devpts",
+ Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
+ },
+ {
+ Destination: "/dev/shm",
+ Type: "tmpfs",
+ Source: "shm",
+ Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
+ },
+ {
+ Destination: "/dev/mqueue",
+ Type: "mqueue",
+ Source: "mqueue",
+ Options: []string{"nosuid", "noexec", "nodev"},
+ },
+ {
+ Destination: "/sys",
+ Type: "sysfs",
+ Source: "sysfs",
+ Options: []string{"nosuid", "noexec", "nodev", "ro"},
+ },
+ {
+ Destination: "/sys/fs/cgroup",
+ Type: "cgroup",
+ Source: "cgroup",
+ Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
+ },
+ },
+ Linux: &specs.Linux{
+ MaskedPaths: []string{
+ "/proc/acpi",
+ "/proc/asound",
+ "/proc/kcore",
+ "/proc/keys",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/sys/firmware",
+ "/proc/scsi",
+ },
+ ReadonlyPaths: []string{
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger",
+ },
+ Resources: &specs.LinuxResources{
+ Devices: []specs.LinuxDeviceCgroup{
+ {
+ Allow: false,
+ Access: "rwm",
+ },
+ },
+ },
+ Namespaces: []specs.LinuxNamespace{
+ {
+ Type: specs.PIDNamespace,
+ },
+ {
+ Type: specs.NetworkNamespace,
+ },
+ {
+ Type: specs.IPCNamespace,
+ },
+ {
+ Type: specs.UTSNamespace,
+ },
+ {
+ Type: specs.MountNamespace,
+ },
+ },
+ },
+ }
+ if cgroups.IsCgroup2UnifiedMode() {
+ spec.Linux.Namespaces = append(spec.Linux.Namespaces, specs.LinuxNamespace{
+ Type: specs.CgroupNamespace,
+ })
+ }
+ return spec
+}
+
+// ToRootless converts the given spec file into one that should work with
+// rootless containers (euid != 0), by removing incompatible options and adding others that
+// are needed.
+func ToRootless(spec *specs.Spec) {
+ var namespaces []specs.LinuxNamespace
+
+ // Remove networkns from the spec.
+ for _, ns := range spec.Linux.Namespaces {
+ switch ns.Type {
+ case specs.NetworkNamespace, specs.UserNamespace:
+ // Do nothing.
+ default:
+ namespaces = append(namespaces, ns)
+ }
+ }
+ // Add userns to the spec.
+ namespaces = append(namespaces, specs.LinuxNamespace{
+ Type: specs.UserNamespace,
+ })
+ spec.Linux.Namespaces = namespaces
+
+ // Add mappings for the current user.
+ spec.Linux.UIDMappings = []specs.LinuxIDMapping{{
+ HostID: uint32(os.Geteuid()),
+ ContainerID: 0,
+ Size: 1,
+ }}
+ spec.Linux.GIDMappings = []specs.LinuxIDMapping{{
+ HostID: uint32(os.Getegid()),
+ ContainerID: 0,
+ Size: 1,
+ }}
+
+ // Fix up mounts.
+ var mounts []specs.Mount
+ for _, mount := range spec.Mounts {
+ // Ignore all mounts that are under /sys.
+ if strings.HasPrefix(mount.Destination, "/sys") {
+ continue
+ }
+
+ // Remove all gid= and uid= mappings.
+ var options []string
+ for _, option := range mount.Options {
+ if !strings.HasPrefix(option, "gid=") && !strings.HasPrefix(option, "uid=") {
+ options = append(options, option)
+ }
+ }
+
+ mount.Options = options
+ mounts = append(mounts, mount)
+ }
+ // Add the sysfs mount as an rbind.
+ mounts = append(mounts, specs.Mount{
+ Source: "/sys",
+ Destination: "/sys",
+ Type: "none",
+ Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
+ })
+ spec.Mounts = mounts
+
+ // Remove cgroup settings.
+ spec.Linux.Resources = nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go
new file mode 100644
index 00000000..d3551b1b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go
@@ -0,0 +1,951 @@
+// +build linux
+
+// Package specconv implements conversion of specifications to libcontainer
+// configurations
+package specconv
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ systemdDbus "github.com/coreos/go-systemd/v22/dbus"
+ dbus "github.com/godbus/dbus/v5"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+ "github.com/opencontainers/runc/libcontainer/configs"
+ "github.com/opencontainers/runc/libcontainer/seccomp"
+ libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
+ "github.com/opencontainers/runtime-spec/specs-go"
+
+ "golang.org/x/sys/unix"
+)
+
+var namespaceMapping = map[specs.LinuxNamespaceType]configs.NamespaceType{
+ specs.PIDNamespace: configs.NEWPID,
+ specs.NetworkNamespace: configs.NEWNET,
+ specs.MountNamespace: configs.NEWNS,
+ specs.UserNamespace: configs.NEWUSER,
+ specs.IPCNamespace: configs.NEWIPC,
+ specs.UTSNamespace: configs.NEWUTS,
+ specs.CgroupNamespace: configs.NEWCGROUP,
+}
+
+var mountPropagationMapping = map[string]int{
+ "rprivate": unix.MS_PRIVATE | unix.MS_REC,
+ "private": unix.MS_PRIVATE,
+ "rslave": unix.MS_SLAVE | unix.MS_REC,
+ "slave": unix.MS_SLAVE,
+ "rshared": unix.MS_SHARED | unix.MS_REC,
+ "shared": unix.MS_SHARED,
+ "runbindable": unix.MS_UNBINDABLE | unix.MS_REC,
+ "unbindable": unix.MS_UNBINDABLE,
+ "": 0,
+}
+
+// AllowedDevices is the set of devices which are automatically included for
+// all containers.
+//
+// XXX (cyphar)
+// This behaviour is at the very least "questionable" (if not outright
+// wrong) according to the runtime-spec.
+//
+// Yes, we have to include certain devices other than the ones the user
+// specifies, but several devices listed here are not part of the spec
+// (including "mknod for any device"?!). In addition, these rules are
+// appended to the user-provided set which means that users *cannot disable
+// this behaviour*.
+//
+// ... unfortunately I'm too scared to change this now because who knows how
+// many people depend on this (incorrect and arguably insecure) behaviour.
+var AllowedDevices = []*configs.Device{
+ // allow mknod for any device
+ {
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: configs.Wildcard,
+ Minor: configs.Wildcard,
+ Permissions: "m",
+ Allow: true,
+ },
+ },
+ {
+ DeviceRule: configs.DeviceRule{
+ Type: configs.BlockDevice,
+ Major: configs.Wildcard,
+ Minor: configs.Wildcard,
+ Permissions: "m",
+ Allow: true,
+ },
+ },
+ {
+ Path: "/dev/null",
+ FileMode: 0666,
+ Uid: 0,
+ Gid: 0,
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 1,
+ Minor: 3,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ {
+ Path: "/dev/random",
+ FileMode: 0666,
+ Uid: 0,
+ Gid: 0,
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 1,
+ Minor: 8,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ {
+ Path: "/dev/full",
+ FileMode: 0666,
+ Uid: 0,
+ Gid: 0,
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 1,
+ Minor: 7,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ {
+ Path: "/dev/tty",
+ FileMode: 0666,
+ Uid: 0,
+ Gid: 0,
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 5,
+ Minor: 0,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ {
+ Path: "/dev/zero",
+ FileMode: 0666,
+ Uid: 0,
+ Gid: 0,
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 1,
+ Minor: 5,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ {
+ Path: "/dev/urandom",
+ FileMode: 0666,
+ Uid: 0,
+ Gid: 0,
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 1,
+ Minor: 9,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ // /dev/pts/ - pts namespaces are "coming soon"
+ {
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 136,
+ Minor: configs.Wildcard,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ {
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 5,
+ Minor: 2,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+ // tuntap
+ {
+ DeviceRule: configs.DeviceRule{
+ Type: configs.CharDevice,
+ Major: 10,
+ Minor: 200,
+ Permissions: "rwm",
+ Allow: true,
+ },
+ },
+}
+
+type CreateOpts struct {
+ CgroupName string
+ UseSystemdCgroup bool
+ NoPivotRoot bool
+ NoNewKeyring bool
+ Spec *specs.Spec
+ RootlessEUID bool
+ RootlessCgroups bool
+}
+
+// CreateLibcontainerConfig creates a new libcontainer configuration from a
+// given specification and a cgroup name
+func CreateLibcontainerConfig(opts *CreateOpts) (*configs.Config, error) {
+ // runc's cwd will always be the bundle path
+ rcwd, err := os.Getwd()
+ if err != nil {
+ return nil, err
+ }
+ cwd, err := filepath.Abs(rcwd)
+ if err != nil {
+ return nil, err
+ }
+ spec := opts.Spec
+ if spec.Root == nil {
+ return nil, fmt.Errorf("Root must be specified")
+ }
+ rootfsPath := spec.Root.Path
+ if !filepath.IsAbs(rootfsPath) {
+ rootfsPath = filepath.Join(cwd, rootfsPath)
+ }
+ labels := []string{}
+ for k, v := range spec.Annotations {
+ labels = append(labels, k+"="+v)
+ }
+ config := &configs.Config{
+ Rootfs: rootfsPath,
+ NoPivotRoot: opts.NoPivotRoot,
+ Readonlyfs: spec.Root.Readonly,
+ Hostname: spec.Hostname,
+ Labels: append(labels, "bundle="+cwd),
+ NoNewKeyring: opts.NoNewKeyring,
+ RootlessEUID: opts.RootlessEUID,
+ RootlessCgroups: opts.RootlessCgroups,
+ }
+
+ for _, m := range spec.Mounts {
+ config.Mounts = append(config.Mounts, createLibcontainerMount(cwd, m))
+ }
+
+ defaultDevs, err := createDevices(spec, config)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := CreateCgroupConfig(opts, defaultDevs)
+ if err != nil {
+ return nil, err
+ }
+
+ config.Cgroups = c
+ // set linux-specific config
+ if spec.Linux != nil {
+ var exists bool
+ if config.RootPropagation, exists = mountPropagationMapping[spec.Linux.RootfsPropagation]; !exists {
+ return nil, fmt.Errorf("rootfsPropagation=%v is not supported", spec.Linux.RootfsPropagation)
+ }
+ if config.NoPivotRoot && (config.RootPropagation&unix.MS_PRIVATE != 0) {
+ return nil, fmt.Errorf("rootfsPropagation of [r]private is not safe without pivot_root")
+ }
+
+ for _, ns := range spec.Linux.Namespaces {
+ t, exists := namespaceMapping[ns.Type]
+ if !exists {
+ return nil, fmt.Errorf("namespace %q does not exist", ns)
+ }
+ if config.Namespaces.Contains(t) {
+ return nil, fmt.Errorf("malformed spec file: duplicated ns %q", ns)
+ }
+ config.Namespaces.Add(t, ns.Path)
+ }
+ if config.Namespaces.Contains(configs.NEWNET) && config.Namespaces.PathOf(configs.NEWNET) == "" {
+ config.Networks = []*configs.Network{
+ {
+ Type: "loopback",
+ },
+ }
+ }
+ if config.Namespaces.Contains(configs.NEWUSER) {
+ if err := setupUserNamespace(spec, config); err != nil {
+ return nil, err
+ }
+ }
+ config.MaskPaths = spec.Linux.MaskedPaths
+ config.ReadonlyPaths = spec.Linux.ReadonlyPaths
+ config.MountLabel = spec.Linux.MountLabel
+ config.Sysctl = spec.Linux.Sysctl
+ if spec.Linux.Seccomp != nil {
+ seccomp, err := SetupSeccomp(spec.Linux.Seccomp)
+ if err != nil {
+ return nil, err
+ }
+ config.Seccomp = seccomp
+ }
+ if spec.Linux.IntelRdt != nil {
+ config.IntelRdt = &configs.IntelRdt{}
+ if spec.Linux.IntelRdt.L3CacheSchema != "" {
+ config.IntelRdt.L3CacheSchema = spec.Linux.IntelRdt.L3CacheSchema
+ }
+ if spec.Linux.IntelRdt.MemBwSchema != "" {
+ config.IntelRdt.MemBwSchema = spec.Linux.IntelRdt.MemBwSchema
+ }
+ }
+ }
+ if spec.Process != nil {
+ config.OomScoreAdj = spec.Process.OOMScoreAdj
+ config.NoNewPrivileges = spec.Process.NoNewPrivileges
+ config.Umask = spec.Process.User.Umask
+ if spec.Process.SelinuxLabel != "" {
+ config.ProcessLabel = spec.Process.SelinuxLabel
+ }
+ if spec.Process.Capabilities != nil {
+ config.Capabilities = &configs.Capabilities{
+ Bounding: spec.Process.Capabilities.Bounding,
+ Effective: spec.Process.Capabilities.Effective,
+ Permitted: spec.Process.Capabilities.Permitted,
+ Inheritable: spec.Process.Capabilities.Inheritable,
+ Ambient: spec.Process.Capabilities.Ambient,
+ }
+ }
+ }
+ createHooks(spec, config)
+ config.Version = specs.Version
+ return config, nil
+}
+
+func createLibcontainerMount(cwd string, m specs.Mount) *configs.Mount {
+ flags, pgflags, data, ext := parseMountOptions(m.Options)
+ source := m.Source
+ device := m.Type
+ if flags&unix.MS_BIND != 0 {
+ // Any "type" the user specified is meaningless (and ignored) for
+ // bind-mounts -- so we set it to "bind" because rootfs_linux.go
+ // (incorrectly) relies on this for some checks.
+ device = "bind"
+ if !filepath.IsAbs(source) {
+ source = filepath.Join(cwd, m.Source)
+ }
+ }
+ return &configs.Mount{
+ Device: device,
+ Source: source,
+ Destination: m.Destination,
+ Data: data,
+ Flags: flags,
+ PropagationFlags: pgflags,
+ Extensions: ext,
+ }
+}
+
+// systemd property name check: latin letters only, at least 3 of them
+var isValidName = regexp.MustCompile(`^[a-zA-Z]{3,}$`).MatchString
+
+var isSecSuffix = regexp.MustCompile(`[a-z]Sec$`).MatchString
+
+// Some systemd properties are documented as having "Sec" suffix
+// (e.g. TimeoutStopSec) but are expected to have "USec" suffix
+// here, so let's provide conversion to improve compatibility.
+func convertSecToUSec(value dbus.Variant) (dbus.Variant, error) {
+ var sec uint64
+ const M = 1000000
+ vi := value.Value()
+ switch value.Signature().String() {
+ case "y":
+ sec = uint64(vi.(byte)) * M
+ case "n":
+ sec = uint64(vi.(int16)) * M
+ case "q":
+ sec = uint64(vi.(uint16)) * M
+ case "i":
+ sec = uint64(vi.(int32)) * M
+ case "u":
+ sec = uint64(vi.(uint32)) * M
+ case "x":
+ sec = uint64(vi.(int64)) * M
+ case "t":
+ sec = vi.(uint64) * M
+ case "d":
+ sec = uint64(vi.(float64) * M)
+ default:
+ return value, errors.New("not a number")
+ }
+ return dbus.MakeVariant(sec), nil
+}
+
+func initSystemdProps(spec *specs.Spec) ([]systemdDbus.Property, error) {
+ const keyPrefix = "org.systemd.property."
+ var sp []systemdDbus.Property
+
+ for k, v := range spec.Annotations {
+ name := strings.TrimPrefix(k, keyPrefix)
+ if len(name) == len(k) { // prefix not there
+ continue
+ }
+ if !isValidName(name) {
+ return nil, fmt.Errorf("Annotation %s name incorrect: %s", k, name)
+ }
+ value, err := dbus.ParseVariant(v, dbus.Signature{})
+ if err != nil {
+ return nil, fmt.Errorf("Annotation %s=%s value parse error: %v", k, v, err)
+ }
+ if isSecSuffix(name) {
+ name = strings.TrimSuffix(name, "Sec") + "USec"
+ value, err = convertSecToUSec(value)
+ if err != nil {
+ return nil, fmt.Errorf("Annotation %s=%s value parse error: %v", k, v, err)
+ }
+ }
+ sp = append(sp, systemdDbus.Property{Name: name, Value: value})
+ }
+
+ return sp, nil
+}
+
+func CreateCgroupConfig(opts *CreateOpts, defaultDevs []*configs.Device) (*configs.Cgroup, error) {
+ var (
+ myCgroupPath string
+
+ spec = opts.Spec
+ useSystemdCgroup = opts.UseSystemdCgroup
+ name = opts.CgroupName
+ )
+
+ c := &configs.Cgroup{
+ Resources: &configs.Resources{},
+ }
+
+ if useSystemdCgroup {
+ sp, err := initSystemdProps(spec)
+ if err != nil {
+ return nil, err
+ }
+ c.SystemdProps = sp
+ }
+
+ if spec.Linux != nil && spec.Linux.CgroupsPath != "" {
+ myCgroupPath = libcontainerUtils.CleanPath(spec.Linux.CgroupsPath)
+ if useSystemdCgroup {
+ myCgroupPath = spec.Linux.CgroupsPath
+ }
+ }
+
+ if useSystemdCgroup {
+ if myCgroupPath == "" {
+ c.Parent = "system.slice"
+ c.ScopePrefix = "runc"
+ c.Name = name
+ } else {
+ // Parse the path from expected "slice:prefix:name"
+ // for e.g. "system.slice:docker:1234"
+ parts := strings.Split(myCgroupPath, ":")
+ if len(parts) != 3 {
+ return nil, fmt.Errorf("expected cgroupsPath to be of format \"slice:prefix:name\" for systemd cgroups, got %q instead", myCgroupPath)
+ }
+ c.Parent = parts[0]
+ c.ScopePrefix = parts[1]
+ c.Name = parts[2]
+ }
+ } else {
+ if myCgroupPath == "" {
+ c.Name = name
+ }
+ c.Path = myCgroupPath
+ }
+
+ // In rootless containers, any attempt to make cgroup changes is likely to fail.
+ // libcontainer will validate this but ignores the error.
+ if spec.Linux != nil {
+ r := spec.Linux.Resources
+ if r != nil {
+ for i, d := range spec.Linux.Resources.Devices {
+ var (
+ t = "a"
+ major = int64(-1)
+ minor = int64(-1)
+ )
+ if d.Type != "" {
+ t = d.Type
+ }
+ if d.Major != nil {
+ major = *d.Major
+ }
+ if d.Minor != nil {
+ minor = *d.Minor
+ }
+ if d.Access == "" {
+ return nil, fmt.Errorf("device access at %d field cannot be empty", i)
+ }
+ dt, err := stringToCgroupDeviceRune(t)
+ if err != nil {
+ return nil, err
+ }
+ c.Resources.Devices = append(c.Resources.Devices, &configs.DeviceRule{
+ Type: dt,
+ Major: major,
+ Minor: minor,
+ Permissions: configs.DevicePermissions(d.Access),
+ Allow: d.Allow,
+ })
+ }
+ if r.Memory != nil {
+ if r.Memory.Limit != nil {
+ c.Resources.Memory = *r.Memory.Limit
+ }
+ if r.Memory.Reservation != nil {
+ c.Resources.MemoryReservation = *r.Memory.Reservation
+ }
+ if r.Memory.Swap != nil {
+ c.Resources.MemorySwap = *r.Memory.Swap
+ }
+ if r.Memory.Kernel != nil {
+ c.Resources.KernelMemory = *r.Memory.Kernel
+ }
+ if r.Memory.KernelTCP != nil {
+ c.Resources.KernelMemoryTCP = *r.Memory.KernelTCP
+ }
+ if r.Memory.Swappiness != nil {
+ c.Resources.MemorySwappiness = r.Memory.Swappiness
+ }
+ if r.Memory.DisableOOMKiller != nil {
+ c.Resources.OomKillDisable = *r.Memory.DisableOOMKiller
+ }
+ }
+ if r.CPU != nil {
+ if r.CPU.Shares != nil {
+ c.Resources.CpuShares = *r.CPU.Shares
+
+ //CpuWeight is used for cgroupv2 and should be converted
+ c.Resources.CpuWeight = cgroups.ConvertCPUSharesToCgroupV2Value(c.Resources.CpuShares)
+ }
+ if r.CPU.Quota != nil {
+ c.Resources.CpuQuota = *r.CPU.Quota
+ }
+ if r.CPU.Period != nil {
+ c.Resources.CpuPeriod = *r.CPU.Period
+ }
+ if r.CPU.RealtimeRuntime != nil {
+ c.Resources.CpuRtRuntime = *r.CPU.RealtimeRuntime
+ }
+ if r.CPU.RealtimePeriod != nil {
+ c.Resources.CpuRtPeriod = *r.CPU.RealtimePeriod
+ }
+ if r.CPU.Cpus != "" {
+ c.Resources.CpusetCpus = r.CPU.Cpus
+ }
+ if r.CPU.Mems != "" {
+ c.Resources.CpusetMems = r.CPU.Mems
+ }
+ }
+ if r.Pids != nil {
+ c.Resources.PidsLimit = r.Pids.Limit
+ }
+ if r.BlockIO != nil {
+ if r.BlockIO.Weight != nil {
+ c.Resources.BlkioWeight = *r.BlockIO.Weight
+ }
+ if r.BlockIO.LeafWeight != nil {
+ c.Resources.BlkioLeafWeight = *r.BlockIO.LeafWeight
+ }
+ if r.BlockIO.WeightDevice != nil {
+ for _, wd := range r.BlockIO.WeightDevice {
+ var weight, leafWeight uint16
+ if wd.Weight != nil {
+ weight = *wd.Weight
+ }
+ if wd.LeafWeight != nil {
+ leafWeight = *wd.LeafWeight
+ }
+ weightDevice := configs.NewWeightDevice(wd.Major, wd.Minor, weight, leafWeight)
+ c.Resources.BlkioWeightDevice = append(c.Resources.BlkioWeightDevice, weightDevice)
+ }
+ }
+ if r.BlockIO.ThrottleReadBpsDevice != nil {
+ for _, td := range r.BlockIO.ThrottleReadBpsDevice {
+ rate := td.Rate
+ throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, rate)
+ c.Resources.BlkioThrottleReadBpsDevice = append(c.Resources.BlkioThrottleReadBpsDevice, throttleDevice)
+ }
+ }
+ if r.BlockIO.ThrottleWriteBpsDevice != nil {
+ for _, td := range r.BlockIO.ThrottleWriteBpsDevice {
+ rate := td.Rate
+ throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, rate)
+ c.Resources.BlkioThrottleWriteBpsDevice = append(c.Resources.BlkioThrottleWriteBpsDevice, throttleDevice)
+ }
+ }
+ if r.BlockIO.ThrottleReadIOPSDevice != nil {
+ for _, td := range r.BlockIO.ThrottleReadIOPSDevice {
+ rate := td.Rate
+ throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, rate)
+ c.Resources.BlkioThrottleReadIOPSDevice = append(c.Resources.BlkioThrottleReadIOPSDevice, throttleDevice)
+ }
+ }
+ if r.BlockIO.ThrottleWriteIOPSDevice != nil {
+ for _, td := range r.BlockIO.ThrottleWriteIOPSDevice {
+ rate := td.Rate
+ throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, rate)
+ c.Resources.BlkioThrottleWriteIOPSDevice = append(c.Resources.BlkioThrottleWriteIOPSDevice, throttleDevice)
+ }
+ }
+ }
+ for _, l := range r.HugepageLimits {
+ c.Resources.HugetlbLimit = append(c.Resources.HugetlbLimit, &configs.HugepageLimit{
+ Pagesize: l.Pagesize,
+ Limit: l.Limit,
+ })
+ }
+ if r.Network != nil {
+ if r.Network.ClassID != nil {
+ c.Resources.NetClsClassid = *r.Network.ClassID
+ }
+ for _, m := range r.Network.Priorities {
+ c.Resources.NetPrioIfpriomap = append(c.Resources.NetPrioIfpriomap, &configs.IfPrioMap{
+ Interface: m.Name,
+ Priority: int64(m.Priority),
+ })
+ }
+ }
+ if len(r.Unified) > 0 {
+ // copy the map
+ c.Resources.Unified = make(map[string]string, len(r.Unified))
+ for k, v := range r.Unified {
+ c.Resources.Unified[k] = v
+ }
+ }
+ }
+ }
+
+ // Append the default allowed devices to the end of the list.
+ for _, device := range defaultDevs {
+ c.Resources.Devices = append(c.Resources.Devices, &device.DeviceRule)
+ }
+ return c, nil
+}
+
+func stringToCgroupDeviceRune(s string) (configs.DeviceType, error) {
+ switch s {
+ case "a":
+ return configs.WildcardDevice, nil
+ case "b":
+ return configs.BlockDevice, nil
+ case "c":
+ return configs.CharDevice, nil
+ default:
+ return 0, fmt.Errorf("invalid cgroup device type %q", s)
+ }
+}
+
+func stringToDeviceRune(s string) (configs.DeviceType, error) {
+ switch s {
+ case "p":
+ return configs.FifoDevice, nil
+ case "u", "c":
+ return configs.CharDevice, nil
+ case "b":
+ return configs.BlockDevice, nil
+ default:
+ return 0, fmt.Errorf("invalid device type %q", s)
+ }
+}
+
+func createDevices(spec *specs.Spec, config *configs.Config) ([]*configs.Device, error) {
+ // If a spec device is redundant with a default device, remove that default
+ // device (the spec one takes priority).
+ dedupedAllowDevs := []*configs.Device{}
+
+next:
+ for _, ad := range AllowedDevices {
+ if ad.Path != "" {
+ for _, sd := range spec.Linux.Devices {
+ if sd.Path == ad.Path {
+ continue next
+ }
+ }
+ }
+ dedupedAllowDevs = append(dedupedAllowDevs, ad)
+ if ad.Path != "" {
+ config.Devices = append(config.Devices, ad)
+ }
+ }
+
+ // Merge in additional devices from the spec.
+ if spec.Linux != nil {
+ for _, d := range spec.Linux.Devices {
+ var uid, gid uint32
+ var filemode os.FileMode = 0666
+
+ if d.UID != nil {
+ uid = *d.UID
+ }
+ if d.GID != nil {
+ gid = *d.GID
+ }
+ dt, err := stringToDeviceRune(d.Type)
+ if err != nil {
+ return nil, err
+ }
+ if d.FileMode != nil {
+ filemode = *d.FileMode
+ }
+ device := &configs.Device{
+ DeviceRule: configs.DeviceRule{
+ Type: dt,
+ Major: d.Major,
+ Minor: d.Minor,
+ },
+ Path: d.Path,
+ FileMode: filemode,
+ Uid: uid,
+ Gid: gid,
+ }
+ config.Devices = append(config.Devices, device)
+ }
+ }
+
+ return dedupedAllowDevs, nil
+}
+
+func setupUserNamespace(spec *specs.Spec, config *configs.Config) error {
+ create := func(m specs.LinuxIDMapping) configs.IDMap {
+ return configs.IDMap{
+ HostID: int(m.HostID),
+ ContainerID: int(m.ContainerID),
+ Size: int(m.Size),
+ }
+ }
+ if spec.Linux != nil {
+ for _, m := range spec.Linux.UIDMappings {
+ config.UidMappings = append(config.UidMappings, create(m))
+ }
+ for _, m := range spec.Linux.GIDMappings {
+ config.GidMappings = append(config.GidMappings, create(m))
+ }
+ }
+ rootUID, err := config.HostRootUID()
+ if err != nil {
+ return err
+ }
+ rootGID, err := config.HostRootGID()
+ if err != nil {
+ return err
+ }
+ for _, node := range config.Devices {
+ node.Uid = uint32(rootUID)
+ node.Gid = uint32(rootGID)
+ }
+ return nil
+}
+
+// parseMountOptions parses the string and returns the flags, propagation
+// flags and any mount data that it contains.
+func parseMountOptions(options []string) (int, []int, string, int) {
+ var (
+ flag int
+ pgflag []int
+ data []string
+ extFlags int
+ )
+ flags := map[string]struct {
+ clear bool
+ flag int
+ }{
+ "acl": {false, unix.MS_POSIXACL},
+ "async": {true, unix.MS_SYNCHRONOUS},
+ "atime": {true, unix.MS_NOATIME},
+ "bind": {false, unix.MS_BIND},
+ "defaults": {false, 0},
+ "dev": {true, unix.MS_NODEV},
+ "diratime": {true, unix.MS_NODIRATIME},
+ "dirsync": {false, unix.MS_DIRSYNC},
+ "exec": {true, unix.MS_NOEXEC},
+ "iversion": {false, unix.MS_I_VERSION},
+ "lazytime": {false, unix.MS_LAZYTIME},
+ "loud": {true, unix.MS_SILENT},
+ "mand": {false, unix.MS_MANDLOCK},
+ "noacl": {true, unix.MS_POSIXACL},
+ "noatime": {false, unix.MS_NOATIME},
+ "nodev": {false, unix.MS_NODEV},
+ "nodiratime": {false, unix.MS_NODIRATIME},
+ "noexec": {false, unix.MS_NOEXEC},
+ "noiversion": {true, unix.MS_I_VERSION},
+ "nolazytime": {true, unix.MS_LAZYTIME},
+ "nomand": {true, unix.MS_MANDLOCK},
+ "norelatime": {true, unix.MS_RELATIME},
+ "nostrictatime": {true, unix.MS_STRICTATIME},
+ "nosuid": {false, unix.MS_NOSUID},
+ "rbind": {false, unix.MS_BIND | unix.MS_REC},
+ "relatime": {false, unix.MS_RELATIME},
+ "remount": {false, unix.MS_REMOUNT},
+ "ro": {false, unix.MS_RDONLY},
+ "rw": {true, unix.MS_RDONLY},
+ "silent": {false, unix.MS_SILENT},
+ "strictatime": {false, unix.MS_STRICTATIME},
+ "suid": {true, unix.MS_NOSUID},
+ "sync": {false, unix.MS_SYNCHRONOUS},
+ }
+ propagationFlags := map[string]int{
+ "private": unix.MS_PRIVATE,
+ "shared": unix.MS_SHARED,
+ "slave": unix.MS_SLAVE,
+ "unbindable": unix.MS_UNBINDABLE,
+ "rprivate": unix.MS_PRIVATE | unix.MS_REC,
+ "rshared": unix.MS_SHARED | unix.MS_REC,
+ "rslave": unix.MS_SLAVE | unix.MS_REC,
+ "runbindable": unix.MS_UNBINDABLE | unix.MS_REC,
+ }
+ extensionFlags := map[string]struct {
+ clear bool
+ flag int
+ }{
+ "tmpcopyup": {false, configs.EXT_COPYUP},
+ }
+ for _, o := range options {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else if f, exists := propagationFlags[o]; exists && f != 0 {
+ pgflag = append(pgflag, f)
+ } else if f, exists := extensionFlags[o]; exists && f.flag != 0 {
+ if f.clear {
+ extFlags &= ^f.flag
+ } else {
+ extFlags |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, pgflag, strings.Join(data, ","), extFlags
+}
+
+func SetupSeccomp(config *specs.LinuxSeccomp) (*configs.Seccomp, error) {
+ if config == nil {
+ return nil, nil
+ }
+
+ // No default action specified, no syscalls listed, assume seccomp disabled
+ if config.DefaultAction == "" && len(config.Syscalls) == 0 {
+ return nil, nil
+ }
+
+ newConfig := new(configs.Seccomp)
+ newConfig.Syscalls = []*configs.Syscall{}
+
+ if len(config.Architectures) > 0 {
+ newConfig.Architectures = []string{}
+ for _, arch := range config.Architectures {
+ newArch, err := seccomp.ConvertStringToArch(string(arch))
+ if err != nil {
+ return nil, err
+ }
+ newConfig.Architectures = append(newConfig.Architectures, newArch)
+ }
+ }
+
+ // Convert default action from string representation
+ newDefaultAction, err := seccomp.ConvertStringToAction(string(config.DefaultAction))
+ if err != nil {
+ return nil, err
+ }
+ newConfig.DefaultAction = newDefaultAction
+
+ newConfig.ListenerPath = config.ListenerPath
+ newConfig.ListenerMetadata = config.ListenerMetadata
+
+ // Loop through all syscall blocks and convert them to libcontainer format
+ for _, call := range config.Syscalls {
+ newAction, err := seccomp.ConvertStringToAction(string(call.Action))
+ if err != nil {
+ return nil, err
+ }
+
+ for _, name := range call.Names {
+ newCall := configs.Syscall{
+ Name: name,
+ Action: newAction,
+ ErrnoRet: call.ErrnoRet,
+ Args: []*configs.Arg{},
+ }
+ // Loop through all the arguments of the syscall and convert them
+ for _, arg := range call.Args {
+ newOp, err := seccomp.ConvertStringToOperator(string(arg.Op))
+ if err != nil {
+ return nil, err
+ }
+
+ newArg := configs.Arg{
+ Index: arg.Index,
+ Value: arg.Value,
+ ValueTwo: arg.ValueTwo,
+ Op: newOp,
+ }
+
+ newCall.Args = append(newCall.Args, &newArg)
+ }
+ newConfig.Syscalls = append(newConfig.Syscalls, &newCall)
+ }
+ }
+
+ return newConfig, nil
+}
+
+func createHooks(rspec *specs.Spec, config *configs.Config) {
+ config.Hooks = configs.Hooks{}
+ if rspec.Hooks != nil {
+ for _, h := range rspec.Hooks.Prestart {
+ cmd := createCommandHook(h)
+ config.Hooks[configs.Prestart] = append(config.Hooks[configs.Prestart], configs.NewCommandHook(cmd))
+ }
+ for _, h := range rspec.Hooks.CreateRuntime {
+ cmd := createCommandHook(h)
+ config.Hooks[configs.CreateRuntime] = append(config.Hooks[configs.CreateRuntime], configs.NewCommandHook(cmd))
+ }
+ for _, h := range rspec.Hooks.CreateContainer {
+ cmd := createCommandHook(h)
+ config.Hooks[configs.CreateContainer] = append(config.Hooks[configs.CreateContainer], configs.NewCommandHook(cmd))
+ }
+ for _, h := range rspec.Hooks.StartContainer {
+ cmd := createCommandHook(h)
+ config.Hooks[configs.StartContainer] = append(config.Hooks[configs.StartContainer], configs.NewCommandHook(cmd))
+ }
+ for _, h := range rspec.Hooks.Poststart {
+ cmd := createCommandHook(h)
+ config.Hooks[configs.Poststart] = append(config.Hooks[configs.Poststart], configs.NewCommandHook(cmd))
+ }
+ for _, h := range rspec.Hooks.Poststop {
+ cmd := createCommandHook(h)
+ config.Hooks[configs.Poststop] = append(config.Hooks[configs.Poststop], configs.NewCommandHook(cmd))
+ }
+ }
+}
+
+func createCommandHook(h specs.Hook) configs.Command {
+ cmd := configs.Command{
+ Path: h.Path,
+ Args: h.Args,
+ Env: h.Env,
+ }
+ if h.Timeout != nil {
+ d := time.Duration(*h.Timeout) * time.Second
+ cmd.Timeout = &d
+ }
+ return cmd
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
new file mode 100644
index 00000000..49471960
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
@@ -0,0 +1,150 @@
+// +build linux
+
+package system
+
+import (
+ "os"
+ "os/exec"
+ "sync"
+ "unsafe"
+
+ "github.com/opencontainers/runc/libcontainer/user"
+ "golang.org/x/sys/unix"
+)
+
+type ParentDeathSignal int
+
+func (p ParentDeathSignal) Restore() error {
+ if p == 0 {
+ return nil
+ }
+ current, err := GetParentDeathSignal()
+ if err != nil {
+ return err
+ }
+ if p == current {
+ return nil
+ }
+ return p.Set()
+}
+
+func (p ParentDeathSignal) Set() error {
+ return SetParentDeathSignal(uintptr(p))
+}
+
+func Execv(cmd string, args []string, env []string) error {
+ name, err := exec.LookPath(cmd)
+ if err != nil {
+ return err
+ }
+
+ return unix.Exec(name, args, env)
+}
+
+func Prlimit(pid, resource int, limit unix.Rlimit) error {
+ _, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0)
+ if err != 0 {
+ return err
+ }
+ return nil
+}
+
+func SetParentDeathSignal(sig uintptr) error {
+ if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetParentDeathSignal() (ParentDeathSignal, error) {
+ var sig int
+ if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil {
+ return -1, err
+ }
+ return ParentDeathSignal(sig), nil
+}
+
+func SetKeepCaps() error {
+ if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func ClearKeepCaps() error {
+ if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func Setctty() error {
+ if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+var (
+ inUserNS bool
+ nsOnce sync.Once
+)
+
+// RunningInUserNS detects whether we are currently running in a user namespace.
+// Originally copied from github.com/lxc/lxd/shared/util.go
+func RunningInUserNS() bool {
+ nsOnce.Do(func() {
+ uidmap, err := user.CurrentProcessUIDMap()
+ if err != nil {
+ // This kernel-provided file only exists if user namespaces are supported
+ return
+ }
+ inUserNS = UIDMapInUserNS(uidmap)
+ })
+ return inUserNS
+}
+
+func UIDMapInUserNS(uidmap []user.IDMap) bool {
+ /*
+ * We assume we are in the initial user namespace if we have a full
+ * range - 4294967295 uids starting at uid 0.
+ */
+ if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 {
+ return false
+ }
+ return true
+}
+
+// GetParentNSeuid returns the euid within the parent user namespace
+func GetParentNSeuid() int64 {
+ euid := int64(os.Geteuid())
+ uidmap, err := user.CurrentProcessUIDMap()
+ if err != nil {
+ // This kernel-provided file only exists if user namespaces are supported
+ return euid
+ }
+ for _, um := range uidmap {
+ if um.ID <= euid && euid <= um.ID+um.Count-1 {
+ return um.ParentID + euid - um.ID
+ }
+ }
+ return euid
+}
+
+// SetSubreaper sets the value i as the subreaper setting for the calling process
+func SetSubreaper(i int) error {
+ return unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
+}
+
+// GetSubreaper returns the subreaper setting for the calling process
+func GetSubreaper() (int, error) {
+ var i uintptr
+
+ if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil {
+ return -1, err
+ }
+
+ return int(i), nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
new file mode 100644
index 00000000..b73cf70b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go
@@ -0,0 +1,103 @@
+package system
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// State is the status of a process.
+type State rune
+
+const ( // Only values for Linux 3.14 and later are listed here
+ Dead State = 'X'
+ DiskSleep State = 'D'
+ Running State = 'R'
+ Sleeping State = 'S'
+ Stopped State = 'T'
+ TracingStop State = 't'
+ Zombie State = 'Z'
+)
+
+// String forms of the state from proc(5)'s documentation for
+// /proc/[pid]/status' "State" field.
+func (s State) String() string {
+ switch s {
+ case Dead:
+ return "dead"
+ case DiskSleep:
+ return "disk sleep"
+ case Running:
+ return "running"
+ case Sleeping:
+ return "sleeping"
+ case Stopped:
+ return "stopped"
+ case TracingStop:
+ return "tracing stop"
+ case Zombie:
+ return "zombie"
+ default:
+ return fmt.Sprintf("unknown (%c)", s)
+ }
+}
+
+// Stat_t represents the information from /proc/[pid]/stat, as
+// described in proc(5) with names based on the /proc/[pid]/status
+// fields.
+type Stat_t struct {
+ // PID is the process ID.
+ PID uint
+
+ // Name is the command run by the process.
+ Name string
+
+ // State is the state of the process.
+ State State
+
+ // StartTime is the number of clock ticks after system boot (since
+ // Linux 2.6).
+ StartTime uint64
+}
+
+// Stat returns a Stat_t instance for the specified process.
+func Stat(pid int) (stat Stat_t, err error) {
+ bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
+ if err != nil {
+ return stat, err
+ }
+ return parseStat(string(bytes))
+}
+
+func parseStat(data string) (stat Stat_t, err error) {
+ // From proc(5), field 2 could contain space and is inside `(` and `)`.
+ // The following is an example:
+ // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+ i := strings.LastIndex(data, ")")
+ if i <= 2 || i >= len(data)-1 {
+ return stat, fmt.Errorf("invalid stat data: %q", data)
+ }
+
+ parts := strings.SplitN(data[:i], "(", 2)
+ if len(parts) != 2 {
+ return stat, fmt.Errorf("invalid stat data: %q", data)
+ }
+
+ stat.Name = parts[1]
+ _, err = fmt.Sscanf(parts[0], "%d", &stat.PID)
+ if err != nil {
+ return stat, err
+ }
+
+ // parts indexes should be offset by 3 from the field number given
+ // proc(5), because parts is zero-indexed and we've removed fields
+ // one (PID) and two (Name) in the paren-split.
+ parts = strings.Split(data[i+2:], " ")
+ var state int
+ fmt.Sscanf(parts[3-3], "%c", &state)
+ stat.State = State(state)
+ fmt.Sscanf(parts[22-3], "%d", &stat.StartTime)
+ return stat, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go
new file mode 100644
index 00000000..c5ca5d86
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go
@@ -0,0 +1,26 @@
+// +build linux
+// +build 386 arm
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
new file mode 100644
index 00000000..e05e30ad
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go
@@ -0,0 +1,26 @@
+// +build linux
+// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv64 s390x
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Setuid sets the uid of the calling thread to the specified uid.
+func Setuid(uid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+// Setgid sets the gid of the calling thread to the specified gid.
+func Setgid(gid int) (err error) {
+ _, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
new file mode 100644
index 00000000..b94be74a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
@@ -0,0 +1,27 @@
+// +build !linux
+
+package system
+
+import (
+ "os"
+
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+// RunningInUserNS is a stub for non-Linux systems
+// Always returns false
+func RunningInUserNS() bool {
+ return false
+}
+
+// UIDMapInUserNS is a stub for non-Linux systems
+// Always returns false
+func UIDMapInUserNS(uidmap []user.IDMap) bool {
+ return false
+}
+
+// GetParentNSeuid returns the euid within the parent user namespace
+// Always returns os.Geteuid on non-linux
+func GetParentNSeuid() int {
+ return os.Geteuid()
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
new file mode 100644
index 00000000..a6823fc9
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go
@@ -0,0 +1,35 @@
+package system
+
+import "golang.org/x/sys/unix"
+
+// Returns a []byte slice if the xattr is set and nil otherwise
+// Requires path and its attribute as arguments
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ var sz int
+ // Start with a 128 length byte array
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+
+ switch {
+ case errno == unix.ENODATA:
+ return nil, errno
+ case errno == unix.ENOTSUP:
+ return nil, errno
+ case errno == unix.ERANGE:
+ // 128 byte array might just not be good enough,
+ // A dummy buffer is used to get the real size
+ // of the xattrs on disk
+ sz, errno = unix.Lgetxattr(path, attr, []byte{})
+ if errno != nil {
+ return nil, errno
+ }
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ if errno != nil {
+ return nil, errno
+ }
+ case errno != nil:
+ return nil, errno
+ }
+ return dest[:sz], nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
new file mode 100644
index 00000000..edbe2006
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS
@@ -0,0 +1,2 @@
+Tianon Gravi (@tianon)
+Aleksa Sarai (@cyphar)
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
new file mode 100644
index 00000000..6fd8dd0d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
@@ -0,0 +1,41 @@
+package user
+
+import (
+ "errors"
+)
+
+var (
+ // The current operating system does not provide the required data for user lookups.
+ ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
+ // No matching entries found in file.
+ ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
+ ErrNoGroupEntries = errors.New("no matching entries in group file")
+)
+
+// LookupUser looks up a user by their username in /etc/passwd. If the user
+// cannot be found (or there is no /etc/passwd file on the filesystem), then
+// LookupUser returns an error.
+func LookupUser(username string) (User, error) {
+ return lookupUser(username)
+}
+
+// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
+// be found (or there is no /etc/passwd file on the filesystem), then LookupId
+// returns an error.
+func LookupUid(uid int) (User, error) {
+ return lookupUid(uid)
+}
+
+// LookupGroup looks up a group by its name in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGroup
+// returns an error.
+func LookupGroup(groupname string) (Group, error) {
+ return lookupGroup(groupname)
+}
+
+// LookupGid looks up a group by its group id in /etc/group. If the group cannot
+// be found (or there is no /etc/group file on the filesystem), then LookupGid
+// returns an error.
+func LookupGid(gid int) (Group, error) {
+ return lookupGid(gid)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
new file mode 100644
index 00000000..92b5ae8d
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
@@ -0,0 +1,144 @@
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package user
+
+import (
+ "io"
+ "os"
+ "strconv"
+
+ "golang.org/x/sys/unix"
+)
+
+// Unix-specific path to the passwd and group formatted files.
+const (
+ unixPasswdPath = "/etc/passwd"
+ unixGroupPath = "/etc/group"
+)
+
+func lookupUser(username string) (User, error) {
+ return lookupUserFunc(func(u User) bool {
+ return u.Name == username
+ })
+}
+
+func lookupUid(uid int) (User, error) {
+ return lookupUserFunc(func(u User) bool {
+ return u.Uid == uid
+ })
+}
+
+func lookupUserFunc(filter func(u User) bool) (User, error) {
+ // Get operating system-specific passwd reader-closer.
+ passwd, err := GetPasswd()
+ if err != nil {
+ return User{}, err
+ }
+ defer passwd.Close()
+
+ // Get the users.
+ users, err := ParsePasswdFilter(passwd, filter)
+ if err != nil {
+ return User{}, err
+ }
+
+ // No user entries found.
+ if len(users) == 0 {
+ return User{}, ErrNoPasswdEntries
+ }
+
+ // Assume the first entry is the "correct" one.
+ return users[0], nil
+}
+
+func lookupGroup(groupname string) (Group, error) {
+ return lookupGroupFunc(func(g Group) bool {
+ return g.Name == groupname
+ })
+}
+
+func lookupGid(gid int) (Group, error) {
+ return lookupGroupFunc(func(g Group) bool {
+ return g.Gid == gid
+ })
+}
+
+func lookupGroupFunc(filter func(g Group) bool) (Group, error) {
+ // Get operating system-specific group reader-closer.
+ group, err := GetGroup()
+ if err != nil {
+ return Group{}, err
+ }
+ defer group.Close()
+
+ // Get the users.
+ groups, err := ParseGroupFilter(group, filter)
+ if err != nil {
+ return Group{}, err
+ }
+
+ // No user entries found.
+ if len(groups) == 0 {
+ return Group{}, ErrNoGroupEntries
+ }
+
+ // Assume the first entry is the "correct" one.
+ return groups[0], nil
+}
+
+func GetPasswdPath() (string, error) {
+ return unixPasswdPath, nil
+}
+
+func GetPasswd() (io.ReadCloser, error) {
+ return os.Open(unixPasswdPath)
+}
+
+func GetGroupPath() (string, error) {
+ return unixGroupPath, nil
+}
+
+func GetGroup() (io.ReadCloser, error) {
+ return os.Open(unixGroupPath)
+}
+
+// CurrentUser looks up the current user by their user id in /etc/passwd. If the
+// user cannot be found (or there is no /etc/passwd file on the filesystem),
+// then CurrentUser returns an error.
+func CurrentUser() (User, error) {
+ return LookupUid(unix.Getuid())
+}
+
+// CurrentGroup looks up the current user's group by their primary group id's
+// entry in /etc/passwd. If the group cannot be found (or there is no
+// /etc/group file on the filesystem), then CurrentGroup returns an error.
+func CurrentGroup() (Group, error) {
+ return LookupGid(unix.Getgid())
+}
+
+func currentUserSubIDs(fileName string) ([]SubID, error) {
+ u, err := CurrentUser()
+ if err != nil {
+ return nil, err
+ }
+ filter := func(entry SubID) bool {
+ return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
+ }
+ return ParseSubIDFileFilter(fileName, filter)
+}
+
+func CurrentUserSubUIDs() ([]SubID, error) {
+ return currentUserSubIDs("/etc/subuid")
+}
+
+func CurrentUserSubGIDs() ([]SubID, error) {
+ return currentUserSubIDs("/etc/subgid")
+}
+
+func CurrentProcessUIDMap() ([]IDMap, error) {
+ return ParseIDMapFile("/proc/self/uid_map")
+}
+
+func CurrentProcessGIDMap() ([]IDMap, error) {
+ return ParseIDMapFile("/proc/self/gid_map")
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go
new file mode 100644
index 00000000..f19333e6
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go
@@ -0,0 +1,40 @@
+// +build windows
+
+package user
+
+import (
+ "os/user"
+ "strconv"
+)
+
+func lookupUser(username string) (User, error) {
+ u, err := user.Lookup(username)
+ if err != nil {
+ return User{}, err
+ }
+ return userFromOS(u)
+}
+
+func lookupUid(uid int) (User, error) {
+ u, err := user.LookupId(strconv.Itoa(uid))
+ if err != nil {
+ return User{}, err
+ }
+ return userFromOS(u)
+}
+
+func lookupGroup(groupname string) (Group, error) {
+ g, err := user.LookupGroup(groupname)
+ if err != nil {
+ return Group{}, err
+ }
+ return groupFromOS(g)
+}
+
+func lookupGid(gid int) (Group, error) {
+ g, err := user.LookupGroupId(strconv.Itoa(gid))
+ if err != nil {
+ return Group{}, err
+ }
+ return groupFromOS(g)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
new file mode 100644
index 00000000..4b89dad7
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go
@@ -0,0 +1,604 @@
+package user
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "os/user"
+ "strconv"
+ "strings"
+)
+
+const (
+ minId = 0
+ maxId = 1<<31 - 1 //for 32-bit systems compatibility
+)
+
+var (
+ ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId)
+)
+
+type User struct {
+ Name string
+ Pass string
+ Uid int
+ Gid int
+ Gecos string
+ Home string
+ Shell string
+}
+
+// userFromOS converts an os/user.(*User) to local User
+//
+// (This does not include Pass, Shell or Gecos)
+func userFromOS(u *user.User) (User, error) {
+ newUser := User{
+ Name: u.Username,
+ Home: u.HomeDir,
+ }
+ id, err := strconv.Atoi(u.Uid)
+ if err != nil {
+ return newUser, err
+ }
+ newUser.Uid = id
+
+ id, err = strconv.Atoi(u.Gid)
+ if err != nil {
+ return newUser, err
+ }
+ newUser.Gid = id
+ return newUser, nil
+}
+
+type Group struct {
+ Name string
+ Pass string
+ Gid int
+ List []string
+}
+
+// groupFromOS converts an os/user.(*Group) to local Group
+//
+// (This does not include Pass or List)
+func groupFromOS(g *user.Group) (Group, error) {
+ newGroup := Group{
+ Name: g.Name,
+ }
+
+ id, err := strconv.Atoi(g.Gid)
+ if err != nil {
+ return newGroup, err
+ }
+ newGroup.Gid = id
+
+ return newGroup, nil
+}
+
+// SubID represents an entry in /etc/sub{u,g}id
+type SubID struct {
+ Name string
+ SubID int64
+ Count int64
+}
+
+// IDMap represents an entry in /proc/PID/{u,g}id_map
+type IDMap struct {
+ ID int64
+ ParentID int64
+ Count int64
+}
+
+func parseLine(line string, v ...interface{}) {
+ parseParts(strings.Split(line, ":"), v...)
+}
+
+func parseParts(parts []string, v ...interface{}) {
+ if len(parts) == 0 {
+ return
+ }
+
+ for i, p := range parts {
+ // Ignore cases where we don't have enough fields to populate the arguments.
+ // Some configuration files like to misbehave.
+ if len(v) <= i {
+ break
+ }
+
+ // Use the type of the argument to figure out how to parse it, scanf() style.
+ // This is legit.
+ switch e := v[i].(type) {
+ case *string:
+ *e = p
+ case *int:
+ // "numbers", with conversion errors ignored because of some misbehaving configuration files.
+ *e, _ = strconv.Atoi(p)
+ case *int64:
+ *e, _ = strconv.ParseInt(p, 10, 64)
+ case *[]string:
+ // Comma-separated lists.
+ if p != "" {
+ *e = strings.Split(p, ",")
+ } else {
+ *e = []string{}
+ }
+ default:
+ // Someone goof'd when writing code using this function. Scream so they can hear us.
+ panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
+ }
+ }
+}
+
+func ParsePasswdFile(path string) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswd(passwd)
+}
+
+func ParsePasswd(passwd io.Reader) ([]User, error) {
+ return ParsePasswdFilter(passwd, nil)
+}
+
+func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
+ passwd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer passwd.Close()
+ return ParsePasswdFilter(passwd, filter)
+}
+
+func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for passwd-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []User{}
+ )
+
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ // see: man 5 passwd
+ // name:password:UID:GID:GECOS:directory:shell
+ // Name:Pass:Uid:Gid:Gecos:Home:Shell
+ // root:x:0:0:root:/root:/bin/bash
+ // adm:x:3:4:adm:/var/adm:/bin/false
+ p := User{}
+ parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+func ParseGroupFile(path string) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ defer group.Close()
+ return ParseGroup(group)
+}
+
+func ParseGroup(group io.Reader) ([]Group, error) {
+ return ParseGroupFilter(group, nil)
+}
+
+func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
+ group, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer group.Close()
+ return ParseGroupFilter(group, filter)
+}
+
+func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for group-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []Group{}
+ )
+
+ for s.Scan() {
+ text := s.Text()
+ if text == "" {
+ continue
+ }
+
+ // see: man 5 group
+ // group_name:password:GID:user_list
+ // Name:Pass:Gid:List
+ // root:x:0:root
+ // adm:x:4:root,adm,daemon
+ p := Group{}
+ parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+type ExecUser struct {
+ Uid int
+ Gid int
+ Sgids []int
+ Home string
+}
+
+// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
+// given file paths and uses that data as the arguments to GetExecUser. If the
+// files cannot be opened for any reason, the error is ignored and a nil
+// io.Reader is passed instead.
+func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
+ var passwd, group io.Reader
+
+ if passwdFile, err := os.Open(passwdPath); err == nil {
+ passwd = passwdFile
+ defer passwdFile.Close()
+ }
+
+ if groupFile, err := os.Open(groupPath); err == nil {
+ group = groupFile
+ defer groupFile.Close()
+ }
+
+ return GetExecUser(userSpec, defaults, passwd, group)
+}
+
+// GetExecUser parses a user specification string (using the passwd and group
+// readers as sources for /etc/passwd and /etc/group data, respectively). In
+// the case of blank fields or missing data from the sources, the values in
+// defaults is used.
+//
+// GetExecUser will return an error if a user or group literal could not be
+// found in any entry in passwd and group respectively.
+//
+// Examples of valid user specifications are:
+// * ""
+// * "user"
+// * "uid"
+// * "user:group"
+// * "uid:gid
+// * "user:gid"
+// * "uid:group"
+//
+// It should be noted that if you specify a numeric user or group id, they will
+// not be evaluated as usernames (only the metadata will be filled). So attempting
+// to parse a user with user.Name = "1337" will produce the user with a UID of
+// 1337.
+func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
+ if defaults == nil {
+ defaults = new(ExecUser)
+ }
+
+ // Copy over defaults.
+ user := &ExecUser{
+ Uid: defaults.Uid,
+ Gid: defaults.Gid,
+ Sgids: defaults.Sgids,
+ Home: defaults.Home,
+ }
+
+ // Sgids slice *cannot* be nil.
+ if user.Sgids == nil {
+ user.Sgids = []int{}
+ }
+
+ // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
+ var userArg, groupArg string
+ parseLine(userSpec, &userArg, &groupArg)
+
+ // Convert userArg and groupArg to be numeric, so we don't have to execute
+ // Atoi *twice* for each iteration over lines.
+ uidArg, uidErr := strconv.Atoi(userArg)
+ gidArg, gidErr := strconv.Atoi(groupArg)
+
+ // Find the matching user.
+ users, err := ParsePasswdFilter(passwd, func(u User) bool {
+ if userArg == "" {
+ // Default to current state of the user.
+ return u.Uid == user.Uid
+ }
+
+ if uidErr == nil {
+ // If the userArg is numeric, always treat it as a UID.
+ return uidArg == u.Uid
+ }
+
+ return u.Name == userArg
+ })
+
+ // If we can't find the user, we have to bail.
+ if err != nil && passwd != nil {
+ if userArg == "" {
+ userArg = strconv.Itoa(user.Uid)
+ }
+ return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
+ }
+
+ var matchedUserName string
+ if len(users) > 0 {
+ // First match wins, even if there's more than one matching entry.
+ matchedUserName = users[0].Name
+ user.Uid = users[0].Uid
+ user.Gid = users[0].Gid
+ user.Home = users[0].Home
+ } else if userArg != "" {
+ // If we can't find a user with the given username, the only other valid
+ // option is if it's a numeric username with no associated entry in passwd.
+
+ if uidErr != nil {
+ // Not numeric.
+ return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
+ }
+ user.Uid = uidArg
+
+ // Must be inside valid uid range.
+ if user.Uid < minId || user.Uid > maxId {
+ return nil, ErrRange
+ }
+
+ // Okay, so it's numeric. We can just roll with this.
+ }
+
+ // On to the groups. If we matched a username, we need to do this because of
+ // the supplementary group IDs.
+ if groupArg != "" || matchedUserName != "" {
+ groups, err := ParseGroupFilter(group, func(g Group) bool {
+ // If the group argument isn't explicit, we'll just search for it.
+ if groupArg == "" {
+ // Check if user is a member of this group.
+ for _, u := range g.List {
+ if u == matchedUserName {
+ return true
+ }
+ }
+ return false
+ }
+
+ if gidErr == nil {
+ // If the groupArg is numeric, always treat it as a GID.
+ return gidArg == g.Gid
+ }
+
+ return g.Name == groupArg
+ })
+ if err != nil && group != nil {
+ return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
+ }
+
+ // Only start modifying user.Gid if it is in explicit form.
+ if groupArg != "" {
+ if len(groups) > 0 {
+ // First match wins, even if there's more than one matching entry.
+ user.Gid = groups[0].Gid
+ } else {
+ // If we can't find a group with the given name, the only other valid
+ // option is if it's a numeric group name with no associated entry in group.
+
+ if gidErr != nil {
+ // Not numeric.
+ return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
+ }
+ user.Gid = gidArg
+
+ // Must be inside valid gid range.
+ if user.Gid < minId || user.Gid > maxId {
+ return nil, ErrRange
+ }
+
+ // Okay, so it's numeric. We can just roll with this.
+ }
+ } else if len(groups) > 0 {
+ // Supplementary group ids only make sense if in the implicit form.
+ user.Sgids = make([]int, len(groups))
+ for i, group := range groups {
+ user.Sgids[i] = group.Gid
+ }
+ }
+ }
+
+ return user, nil
+}
+
+// GetAdditionalGroups looks up a list of groups by name or group id
+// against the given /etc/group formatted data. If a group name cannot
+// be found, an error will be returned. If a group id cannot be found,
+// or the given group data is nil, the id will be returned as-is
+// provided it is in the legal range.
+func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
+ var groups = []Group{}
+ if group != nil {
+ var err error
+ groups, err = ParseGroupFilter(group, func(g Group) bool {
+ for _, ag := range additionalGroups {
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
+ }
+ }
+
+ gidMap := make(map[int]struct{})
+ for _, ag := range additionalGroups {
+ var found bool
+ for _, g := range groups {
+ // if we found a matched group either by name or gid, take the
+ // first matched as correct
+ if g.Name == ag || strconv.Itoa(g.Gid) == ag {
+ if _, ok := gidMap[g.Gid]; !ok {
+ gidMap[g.Gid] = struct{}{}
+ found = true
+ break
+ }
+ }
+ }
+ // we asked for a group but didn't find it. let's check to see
+ // if we wanted a numeric group
+ if !found {
+ gid, err := strconv.Atoi(ag)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to find group %s", ag)
+ }
+ // Ensure gid is inside gid range.
+ if gid < minId || gid > maxId {
+ return nil, ErrRange
+ }
+ gidMap[gid] = struct{}{}
+ }
+ }
+ gids := []int{}
+ for gid := range gidMap {
+ gids = append(gids, gid)
+ }
+ return gids, nil
+}
+
+// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
+// that opens the groupPath given and gives it as an argument to
+// GetAdditionalGroups.
+func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
+ var group io.Reader
+
+ if groupFile, err := os.Open(groupPath); err == nil {
+ group = groupFile
+ defer groupFile.Close()
+ }
+ return GetAdditionalGroups(additionalGroups, group)
+}
+
+func ParseSubIDFile(path string) ([]SubID, error) {
+ subid, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer subid.Close()
+ return ParseSubID(subid)
+}
+
+func ParseSubID(subid io.Reader) ([]SubID, error) {
+ return ParseSubIDFilter(subid, nil)
+}
+
+func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
+ subid, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer subid.Close()
+ return ParseSubIDFilter(subid, filter)
+}
+
+func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for subid-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []SubID{}
+ )
+
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ // see: man 5 subuid
+ p := SubID{}
+ parseLine(line, &p.Name, &p.SubID, &p.Count)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+func ParseIDMapFile(path string) ([]IDMap, error) {
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ return ParseIDMap(r)
+}
+
+func ParseIDMap(r io.Reader) ([]IDMap, error) {
+ return ParseIDMapFilter(r, nil)
+}
+
+func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ return ParseIDMapFilter(r, filter)
+}
+
+func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
+ if r == nil {
+ return nil, fmt.Errorf("nil source for idmap-formatted data")
+ }
+
+ var (
+ s = bufio.NewScanner(r)
+ out = []IDMap{}
+ )
+
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ // see: man 7 user_namespaces
+ p := IDMap{}
+ parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count)
+
+ if filter == nil || filter(p) {
+ out = append(out, p)
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
new file mode 100644
index 00000000..f7815641
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go
@@ -0,0 +1,98 @@
+// +build linux
+
+package utils
+
+/*
+ * Copyright 2016, 2017 SUSE LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// MaxSendfdLen is the maximum length of the name of a file descriptor being
+// sent using SendFd. The name of the file handle returned by RecvFd will never
+// be larger than this value.
+const MaxNameLen = 4096
+
+// oobSpace is the size of the oob slice required to store a single FD. Note
+// that unix.UnixRights appears to make the assumption that fd is always int32,
+// so sizeof(fd) = 4.
+var oobSpace = unix.CmsgSpace(4)
+
+// RecvFd waits for a file descriptor to be sent over the given AF_UNIX
+// socket. The file name of the remote file descriptor will be recreated
+// locally (it is sent as non-auxiliary data in the same payload).
+func RecvFd(socket *os.File) (*os.File, error) {
+ // For some reason, unix.Recvmsg uses the length rather than the capacity
+ // when passing the msg_controllen and other attributes to recvmsg. So we
+ // have to actually set the length.
+ name := make([]byte, MaxNameLen)
+ oob := make([]byte, oobSpace)
+
+ sockfd := socket.Fd()
+ n, oobn, _, _, err := unix.Recvmsg(int(sockfd), name, oob, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ if n >= MaxNameLen || oobn != oobSpace {
+ return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn)
+ }
+
+ // Truncate.
+ name = name[:n]
+ oob = oob[:oobn]
+
+ scms, err := unix.ParseSocketControlMessage(oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms))
+ }
+ scm := scms[0]
+
+ fds, err := unix.ParseUnixRights(&scm)
+ if err != nil {
+ return nil, err
+ }
+ if len(fds) != 1 {
+ return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds))
+ }
+ fd := uintptr(fds[0])
+
+ return os.NewFile(fd, string(name)), nil
+}
+
+// SendFd sends a file descriptor over the given AF_UNIX socket. In
+// addition, the file.Name() of the given file will also be sent as
+// non-auxiliary data in the same payload (allowing to send contextual
+// information for a file descriptor).
+func SendFd(socket *os.File, name string, fd uintptr) error {
+ if len(name) >= MaxNameLen {
+ return fmt.Errorf("sendfd: filename too long: %s", name)
+ }
+ return SendFds(socket, []byte(name), int(fd))
+}
+
+// SendFds sends a list of files descriptor and msg over the given AF_UNIX socket.
+func SendFds(socket *os.File, msg []byte, fds ...int) error {
+ oob := unix.UnixRights(fds...)
+ return unix.Sendmsg(int(socket.Fd()), msg, oob, nil, 0)
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
new file mode 100644
index 00000000..40ccfaa1
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go
@@ -0,0 +1,112 @@
+package utils
+
+import (
+ "encoding/json"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ exitSignalOffset = 128
+)
+
+// ResolveRootfs ensures that the current working directory is
+// not a symlink and returns the absolute path to the rootfs
+func ResolveRootfs(uncleanRootfs string) (string, error) {
+ rootfs, err := filepath.Abs(uncleanRootfs)
+ if err != nil {
+ return "", err
+ }
+ return filepath.EvalSymlinks(rootfs)
+}
+
+// ExitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func ExitStatus(status unix.WaitStatus) int {
+ if status.Signaled() {
+ return exitSignalOffset + int(status.Signal())
+ }
+ return status.ExitStatus()
+}
+
+// WriteJSON writes the provided struct v to w using standard json marshaling
+func WriteJSON(w io.Writer, v interface{}) error {
+ data, err := json.Marshal(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ return err
+}
+
+// CleanPath makes a path safe for use with filepath.Join. This is done by not
+// only cleaning the path, but also (if the path is relative) adding a leading
+// '/' and cleaning it (then removing the leading '/'). This ensures that a
+// path resulting from prepending another path will always resolve to lexically
+// be a subdirectory of the prefixed path. This is all done lexically, so paths
+// that include symlinks won't be safe as a result of using CleanPath.
+func CleanPath(path string) string {
+ // Deal with empty strings nicely.
+ if path == "" {
+ return ""
+ }
+
+ // Ensure that all paths are cleaned (especially problematic ones like
+ // "/../../../../../" which can cause lots of issues).
+ path = filepath.Clean(path)
+
+ // If the path isn't absolute, we need to do more processing to fix paths
+ // such as "../../../..//some/path". We also shouldn't convert absolute
+ // paths to relative ones.
+ if !filepath.IsAbs(path) {
+ path = filepath.Clean(string(os.PathSeparator) + path)
+ // This can't fail, as (by definition) all paths are relative to root.
+ path, _ = filepath.Rel(string(os.PathSeparator), path)
+ }
+
+ // Clean the path again for good measure.
+ return filepath.Clean(path)
+}
+
+// SearchLabels searches a list of key-value pairs for the provided key and
+// returns the corresponding value. The pairs must be separated with '='.
+func SearchLabels(labels []string, query string) string {
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == query {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+// Annotations returns the bundle path and user defined annotations from the
+// libcontainer state. We need to remove the bundle because that is a label
+// added by libcontainer.
+func Annotations(labels []string) (bundle string, userAnnotations map[string]string) {
+ userAnnotations = make(map[string]string)
+ for _, l := range labels {
+ parts := strings.SplitN(l, "=", 2)
+ if len(parts) < 2 {
+ continue
+ }
+ if parts[0] == "bundle" {
+ bundle = parts[1]
+ } else {
+ userAnnotations[parts[0]] = parts[1]
+ }
+ }
+ return
+}
+
+func GetIntSize() int {
+ return int(unsafe.Sizeof(1))
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
new file mode 100644
index 00000000..1576f2d4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go
@@ -0,0 +1,68 @@
+// +build !windows
+
+package utils
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ "golang.org/x/sys/unix"
+)
+
+// EnsureProcHandle returns whether or not the given file handle is on procfs.
+func EnsureProcHandle(fh *os.File) error {
+ var buf unix.Statfs_t
+ if err := unix.Fstatfs(int(fh.Fd()), &buf); err != nil {
+ return fmt.Errorf("ensure %s is on procfs: %v", fh.Name(), err)
+ }
+ if buf.Type != unix.PROC_SUPER_MAGIC {
+ return fmt.Errorf("%s is not on procfs", fh.Name())
+ }
+ return nil
+}
+
+// CloseExecFrom applies O_CLOEXEC to all file descriptors currently open for
+// the process (except for those below the given fd value).
+func CloseExecFrom(minFd int) error {
+ fdDir, err := os.Open("/proc/self/fd")
+ if err != nil {
+ return err
+ }
+ defer fdDir.Close()
+
+ if err := EnsureProcHandle(fdDir); err != nil {
+ return err
+ }
+
+ fdList, err := fdDir.Readdirnames(-1)
+ if err != nil {
+ return err
+ }
+ for _, fdStr := range fdList {
+ fd, err := strconv.Atoi(fdStr)
+ // Ignore non-numeric file names.
+ if err != nil {
+ continue
+ }
+ // Ignore descriptors lower than our specified minimum.
+ if fd < minFd {
+ continue
+ }
+ // Intentionally ignore errors from unix.CloseOnExec -- the cases where
+ // this might fail are basically file descriptors that have already
+ // been closed (including and especially the one that was created when
+ // ioutil.ReadDir did the "opendir" syscall).
+ unix.CloseOnExec(fd)
+ }
+ return nil
+}
+
+// NewSockPair returns a new unix socket pair
+func NewSockPair(name string) (parent *os.File, child *os.File, err error) {
+ fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return os.NewFile(uintptr(fds[1]), name+"-p"), os.NewFile(uintptr(fds[0]), name+"-c"), nil
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
index 87709497..df1f4321 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
@@ -599,6 +599,7 @@ type VMImage struct {
// LinuxSeccomp represents syscall restrictions
type LinuxSeccomp struct {
DefaultAction LinuxSeccompAction `json:"defaultAction"`
+ DefaultErrnoRet *uint `json:"defaultErrnoRet,omitempty"`
Architectures []Arch `json:"architectures,omitempty"`
Flags []LinuxSeccompFlag `json:"flags,omitempty"`
ListenerPath string `json:"listenerPath,omitempty"`
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
index 88fee3fa..7c010d4f 100644
--- a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
@@ -34,22 +34,19 @@ type State struct {
Annotations map[string]string `json:"annotations,omitempty"`
}
-// FdIndexKey is the key used in the FdIndexes map of the ContainerProcessState struct.
-type FdIndexKey string
-
const (
- // SeccompFdIndexKey is the index of the seccomp notify file descriptor.
- SeccompFdIndexKey FdIndexKey = "seccompFd"
- // PidFdIndexKey is the index of the target process file descriptor.
- PidFdIndexKey FdIndexKey = "pidFd"
+ // SeccompFdName is the name of the seccomp notify file descriptor.
+ SeccompFdName string = "seccompFd"
)
// ContainerProcessState holds information about the state of a container process.
type ContainerProcessState struct {
// Version is the version of the specification that is supported.
Version string `json:"ociVersion"`
- // FdIndexes is a map containing the indexes of the file descriptors in the `SCM_RIGHTS` array.
- FdIndexes map[FdIndexKey]int `json:"fdIndexes"`
+ // Fds is a string array containing the names of the file descriptors passed.
+ // The index of the name in this array corresponds to index of the file
+ // descriptor in the `SCM_RIGHTS` array.
+ Fds []string `json:"fds"`
// Pid is the process ID as seen by the runtime.
Pid int `json:"pid"`
// Opaque metadata.
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 00000000..daf913b1
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 00000000..9159de03
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+script:
+ - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 00000000..835ba3e7
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 00000000..ce9d7cde
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 00000000..54dfdcb1
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 00000000..a932eade
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 00000000..161aea25
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively.
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// The returned errors.StackTrace type is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d\n", f, f)
+// }
+// }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 00000000..be0d10d0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 00000000..779a8348
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (\n\t)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
new file mode 100644
index 00000000..83c8f823
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
new file mode 100644
index 00000000..409a5b63
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -0,0 +1,22 @@
+language: go
+
+go:
+ - "1.3"
+ - "1.4"
+ - "1.5"
+ - "1.6"
+ - "1.7"
+ - "1.8"
+ - "1.9"
+ - "1.10"
+ - "1.11"
+ - "1.12"
+ - "1.13"
+ - "1.14"
+
+script:
+ - ./validate.sh
+
+# this should give us faster builds according to
+# http://docs.travis-ci.com/user/migrating-from-legacy/
+sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 00000000..363fa9ee
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 00000000..27ddfee8
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,171 @@
+go-metrics
+==========
+
+
+
+Go port of Coda Hale's Metrics library: .
+
+Documentation: .
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```go
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
+leak memory:
+
+```go
+// Will call Stop() on the Meter to allow for garbage collection
+metrics.Unregister("quux")
+// Or similarly for a Timer that embeds a Meter
+metrics.Unregister("bang")
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.InfluxDB(metrics.DefaultRegistry,
+ 10e9,
+ "127.0.0.1:8086",
+ "database-name",
+ "username",
+ "password"
+)
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+ 10e9, // interval
+ "example@example.com", // account owner email address
+ "token", // Librato API token
+ "hostname", // source
+ []float64{0.95}, // percentiles to send
+ time.Millisecond, // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* AppOptics - https://github.com/ysamlan/go-metrics-appoptics
+* Librato - https://github.com/mihasya/go-metrics-librato
+* Graphite - https://github.com/cyberdelia/go-metrics-graphite
+* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
+* Ganglia - https://github.com/appscode/metlia
+* Prometheus - https://github.com/deathowl/go-metrics-prometheus
+* DataDog - https://github.com/syntaqx/go-metrics-datadog
+* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
+* Honeycomb - https://github.com/getspine/go-metrics-honeycomb
+* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront
+* Open-Falcon - https://github.com/g4zhuj/go-metrics-falcon
+* AWS CloudWatch - [https://github.com/savaki/cloudmetrics](https://github.com/savaki/cloudmetrics)
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 00000000..bb7b039c
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+ Clear()
+ Count() int64
+ Dec(int64)
+ Inc(int64)
+ Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+ if UseNilMetrics {
+ return NilCounter{}
+ }
+ return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+ c := NewCounter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+ panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+ panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+ panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+ count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+ atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+ return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+ atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+ atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+ return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 00000000..179e5aae
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+ "runtime/debug"
+ "sync"
+ "time"
+)
+
+var (
+ debugMetrics struct {
+ GCStats struct {
+ LastGC Gauge
+ NumGC Gauge
+ Pause Histogram
+ //PauseQuantiles Histogram
+ PauseTotal Gauge
+ }
+ ReadGCStats Timer
+ }
+ gcStats debug.GCStats
+ registerDebugMetricsOnce = sync.Once{}
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureDebugGCStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+ lastGC := gcStats.LastGC
+ t := time.Now()
+ debug.ReadGCStats(&gcStats)
+ debugMetrics.ReadGCStats.UpdateSince(t)
+
+ debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+ debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+ if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+ debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+ }
+ //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+ debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+ registerDebugMetricsOnce.Do(func() {
+ debugMetrics.GCStats.LastGC = NewGauge()
+ debugMetrics.GCStats.NumGC = NewGauge()
+ debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+ //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+ debugMetrics.GCStats.PauseTotal = NewGauge()
+ debugMetrics.ReadGCStats = NewTimer()
+
+ r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+ r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+ r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+ //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+ r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+ r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+ })
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+ gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 00000000..a8183dd7
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,138 @@
+package metrics
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+ Rate() float64
+ Snapshot() EWMA
+ Tick()
+ Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+ if UseNilMetrics {
+ return NilEWMA{}
+ }
+ return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+ panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+ panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick. It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+ uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ alpha float64
+ rate uint64
+ init uint32
+ mutex sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+ currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9)
+ return currentRate
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+ return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average. It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+ // Optimization to avoid mutex locking in the hot-path.
+ if atomic.LoadUint32(&a.init) == 1 {
+ a.updateRate(a.fetchInstantRate())
+ } else {
+ // Slow-path: this is only needed on the first Tick() and preserves transactional updating
+ // of init and rate in the else block. The first conditional is needed below because
+ // a different thread could have set a.init = 1 between the time of the first atomic load and when
+ // the lock was acquired.
+ a.mutex.Lock()
+ if atomic.LoadUint32(&a.init) == 1 {
+ // The fetchInstantRate() uses atomic loading, which is unecessary in this critical section
+ // but again, this section is only invoked on the first successful Tick() operation.
+ a.updateRate(a.fetchInstantRate())
+ } else {
+ atomic.StoreUint32(&a.init, 1)
+ atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate()))
+ }
+ a.mutex.Unlock()
+ }
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
+ count := atomic.LoadInt64(&a.uncounted)
+ atomic.AddInt64(&a.uncounted, -count)
+ instantRate := float64(count) / float64(5e9)
+ return instantRate
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+ currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate))
+ currentRate += a.alpha * (instantRate - currentRate)
+ atomic.StoreUint64(&a.rate, math.Float64bits(currentRate))
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+ atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 00000000..cb57a938
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+ Snapshot() Gauge
+ Update(int64)
+ Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+ c := NewGauge()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &FunctionalGauge{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+ c := NewFunctionalGauge(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+ panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+ value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+ return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+ atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+ return atomic.LoadInt64(&g.value)
+}
+
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+ value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+ panic("Update called on a FunctionalGauge")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 00000000..3962e6db
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,125 @@
+package metrics
+
+import (
+ "math"
+ "sync/atomic"
+)
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+ Snapshot() GaugeFloat64
+ Update(float64)
+ Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &StandardGaugeFloat64{
+ value: 0.0,
+ }
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ c := NewGaugeFloat64()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+ c := NewFunctionalGaugeFloat64(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+ panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+ value uint64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+ return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+ atomic.StoreUint64(&g.value, math.Float64bits(v))
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&g.value))
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+ value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+ panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 00000000..abd0a7d2
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+ Percentiles []float64 // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ GraphiteWithConfig(GraphiteConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+ })
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := graphite(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ case Gauge:
+ fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 00000000..445131ca
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+ Check()
+ Error() error
+ Healthy()
+ Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+ if UseNilMetrics {
+ return NilHealthcheck{}
+ }
+ return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+ err error
+ f func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+ h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+ return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+ h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy. The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+ h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 00000000..dbc837fe
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Sample() Sample
+ Snapshot() Histogram
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+ if UseNilMetrics {
+ return NilHistogram{}
+ }
+ return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+ c := NewHistogram(s)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+ sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+ panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+ panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+ sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+ return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 00000000..174b9477
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,31 @@
+package metrics
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+ return json.Marshal(r.GetAll())
+}
+
+// WriteJSON writes metrics from the given registry periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteJSONOnce(r, w)
+ }
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+ json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+ return json.Marshal(p.GetAll())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 00000000..2614a0a3
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+ "time"
+)
+
+type Logger interface {
+ Printf(format string, v ...interface{})
+}
+
+// Log outputs each metric in the given registry periodically using the given logger.
+func Log(r Registry, freq time.Duration, l Logger) {
+ LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// LogOnCue outputs each metric in the given registry on demand through the channel
+// using the given logger
+func LogOnCue(r Registry, ch chan interface{}, l Logger) {
+ LogScaledOnCue(r, ch, time.Nanosecond, l)
+}
+
+// LogScaled outputs each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+ ch := make(chan interface{})
+ go func(channel chan interface{}) {
+ for _ = range time.Tick(freq) {
+ channel <- struct{}{}
+ }
+ }(ch)
+ LogScaledOnCue(r, ch, scale, l)
+}
+
+// LogScaledOnCue outputs each metric in the given registry on demand through the channel
+// using the given logger. Print timings in `scale` units (eg time.Millisecond) rather
+// than nanos.
+func LogScaledOnCue(r Registry, ch chan interface{}, scale time.Duration, l Logger) {
+ du := float64(scale)
+ duSuffix := scale.String()[1:]
+
+ for _ = range ch {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ l.Printf("counter %s\n", name)
+ l.Printf(" count: %9d\n", metric.Count())
+ case Gauge:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ l.Printf("healthcheck %s\n", name)
+ l.Printf(" error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("histogram %s\n", name)
+ l.Printf(" count: %9d\n", h.Count())
+ l.Printf(" min: %9d\n", h.Min())
+ l.Printf(" max: %9d\n", h.Max())
+ l.Printf(" mean: %12.2f\n", h.Mean())
+ l.Printf(" stddev: %12.2f\n", h.StdDev())
+ l.Printf(" median: %12.2f\n", ps[0])
+ l.Printf(" 75%%: %12.2f\n", ps[1])
+ l.Printf(" 95%%: %12.2f\n", ps[2])
+ l.Printf(" 99%%: %12.2f\n", ps[3])
+ l.Printf(" 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ l.Printf("meter %s\n", name)
+ l.Printf(" count: %9d\n", m.Count())
+ l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
+ l.Printf(" mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("timer %s\n", name)
+ l.Printf(" count: %9d\n", t.Count())
+ l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
+ l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
+ l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
+ l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
+ l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
+ l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
+ l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
+ l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
+ l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
+ l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
+ l.Printf(" mean rate: %12.2f\n", t.RateMean())
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 00000000..47454f54
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+ time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak: 42604 kB
+VmSize: 42604 kB
+VmLck: 0 kB
+VmHWM: 1120 kB
+VmRSS: 1120 kB
+VmData: 35460 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 36 kB
+VmSwap: 0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+ "fmt"
+ "metrics"
+ "time"
+)
+
+func main() {
+ fmt.Sprintf("foo")
+ metrics.NewRegistry()
+ time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak: 44016 kB
+VmSize: 44016 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak: 55024 kB
+VmSize: 55024 kB
+VmLck: 0 kB
+VmHWM: 12440 kB
+VmRSS: 12440 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak: 44012 kB
+VmSize: 44012 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak: 55020 kB
+VmSize: 55020 kB
+VmLck: 0 kB
+VmHWM: 12432 kB
+VmRSS: 12432 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 60 kB
+VmSwap: 0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak: 72272 kB
+VmSize: 72272 kB
+VmLck: 0 kB
+VmHWM: 16204 kB
+VmRSS: 16204 kB
+VmData: 65100 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 80 kB
+VmSwap: 0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 256912 kB
+VmSize: 256912 kB
+VmLck: 0 kB
+VmHWM: 146204 kB
+VmRSS: 146204 kB
+VmData: 249740 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 448 kB
+VmSwap: 0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 908112 kB
+VmSize: 908112 kB
+VmLck: 0 kB
+VmHWM: 645832 kB
+VmRSS: 645588 kB
+VmData: 900940 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1716 kB
+VmSwap: 1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak: 62480 kB
+VmSize: 62480 kB
+VmLck: 0 kB
+VmHWM: 11572 kB
+VmRSS: 11572 kB
+VmData: 55308 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 153296 kB
+VmSize: 153296 kB
+VmLck: 0 kB
+VmHWM: 101176 kB
+VmRSS: 101176 kB
+VmData: 146124 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 240 kB
+VmSwap: 0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 557264 kB
+VmSize: 557264 kB
+VmLck: 0 kB
+VmHWM: 501056 kB
+VmRSS: 501056 kB
+VmData: 550092 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1032 kB
+VmSwap: 0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak: 74504 kB
+VmSize: 74504 kB
+VmLck: 0 kB
+VmHWM: 24124 kB
+VmRSS: 24124 kB
+VmData: 67340 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 92 kB
+VmSwap: 0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak: 278920 kB
+VmSize: 278920 kB
+VmLck: 0 kB
+VmHWM: 227300 kB
+VmRSS: 227300 kB
+VmData: 271756 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 488 kB
+VmSwap: 0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 00000000..223669bc
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,251 @@
+package metrics
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+ Count() int64
+ Mark(int64)
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Meter
+ Stop()
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
+func NewMeter() Meter {
+ if UseNilMetrics {
+ return NilMeter{}
+ }
+ m := newStandardMeter()
+ arbiter.Lock()
+ defer arbiter.Unlock()
+ arbiter.meters[m] = struct{}{}
+ if !arbiter.started {
+ arbiter.started = true
+ go arbiter.tick()
+ }
+ return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredMeter(name string, r Registry) Meter {
+ c := NewMeter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+ count int64
+ rate1, rate5, rate15, rateMean uint64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+ panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// Stop is a no-op.
+func (m *MeterSnapshot) Stop() {}
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// Stop is a no-op.
+func (NilMeter) Stop() {}
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+ snapshot *MeterSnapshot
+ a1, a5, a15 EWMA
+ startTime time.Time
+ stopped uint32
+}
+
+func newStandardMeter() *StandardMeter {
+ return &StandardMeter{
+ snapshot: &MeterSnapshot{},
+ a1: NewEWMA1(),
+ a5: NewEWMA5(),
+ a15: NewEWMA15(),
+ startTime: time.Now(),
+ }
+}
+
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *StandardMeter) Stop() {
+ if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) {
+ arbiter.Lock()
+ delete(arbiter.meters, m)
+ arbiter.Unlock()
+ }
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+ return atomic.LoadInt64(&m.snapshot.count)
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+ if atomic.LoadUint32(&m.stopped) == 1 {
+ return
+ }
+
+ atomic.AddInt64(&m.snapshot.count, n)
+
+ m.a1.Update(n)
+ m.a5.Update(n)
+ m.a15.Update(n)
+ m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1))
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5))
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15))
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+ return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean))
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+ copiedSnapshot := MeterSnapshot{
+ count: atomic.LoadInt64(&m.snapshot.count),
+ rate1: atomic.LoadUint64(&m.snapshot.rate1),
+ rate5: atomic.LoadUint64(&m.snapshot.rate5),
+ rate15: atomic.LoadUint64(&m.snapshot.rate15),
+ rateMean: atomic.LoadUint64(&m.snapshot.rateMean),
+ }
+ return &copiedSnapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+ rate1 := math.Float64bits(m.a1.Rate())
+ rate5 := math.Float64bits(m.a5.Rate())
+ rate15 := math.Float64bits(m.a15.Rate())
+ rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds())
+
+ atomic.StoreUint64(&m.snapshot.rate1, rate1)
+ atomic.StoreUint64(&m.snapshot.rate5, rate5)
+ atomic.StoreUint64(&m.snapshot.rate15, rate15)
+ atomic.StoreUint64(&m.snapshot.rateMean, rateMean)
+}
+
+func (m *StandardMeter) tick() {
+ m.a1.Tick()
+ m.a5.Tick()
+ m.a15.Tick()
+ m.updateSnapshot()
+}
+
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
+type meterArbiter struct {
+ sync.RWMutex
+ started bool
+ meters map[*StandardMeter]struct{}
+ ticker *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+ for {
+ select {
+ case <-ma.ticker.C:
+ ma.tickMeters()
+ }
+ }
+}
+
+func (ma *meterArbiter) tickMeters() {
+ ma.RLock()
+ defer ma.RUnlock()
+ for meter := range ma.meters {
+ meter.tick()
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 00000000..b97a49ed
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+//
+//
+// Coda Hale's original work:
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics. If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 00000000..266b6c93
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "strings"
+ "time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ OpenTSDBWithConfig(OpenTSDBConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ })
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := openTSDB(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+func getShortHostname() string {
+ if shortHostName == "" {
+ host, _ := os.Hostname()
+ if index := strings.Index(host, "."); index > 0 {
+ shortHostName = host[:index]
+ } else {
+ shortHostName = host
+ }
+ }
+ return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+ shortHostname := getShortHostname()
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ case Gauge:
+ fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 00000000..a8e67228
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,373 @@
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists. If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+ return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+ // Call the given function for each registered metric.
+ Each(func(string, interface{}))
+
+ // Get the metric by the given name or nil if none is registered.
+ Get(string) interface{}
+
+ // GetAll metrics in the Registry.
+ GetAll() map[string]map[string]interface{}
+
+ // Gets an existing metric or registers the given one.
+ // The interface can be the metric to register if not found in registry,
+ // or a function returning the metric for lazy instantiation.
+ GetOrRegister(string, interface{}) interface{}
+
+ // Register the given metric under the given name.
+ Register(string, interface{}) error
+
+ // Run all registered healthchecks.
+ RunHealthchecks()
+
+ // Unregister the metric with the given name.
+ Unregister(string)
+
+ // Unregister all metrics. (Mostly for testing.)
+ UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+ metrics map[string]interface{}
+ mutex sync.RWMutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+ return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+ metrics := r.registered()
+ for i := range metrics {
+ kv := &metrics[i]
+ f(kv.name, kv.value)
+ }
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+ // access the read lock first which should be re-entrant
+ r.mutex.RLock()
+ metric, ok := r.metrics[name]
+ r.mutex.RUnlock()
+ if ok {
+ return metric
+ }
+
+ // only take the write lock if we'll be modifying the metrics map
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if metric, ok := r.metrics[name]; ok {
+ return metric
+ }
+ if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+ i = v.Call(nil)[0].Interface()
+ }
+ r.register(name, i)
+ return i
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ for _, i := range r.metrics {
+ if h, ok := i.(Healthcheck); ok {
+ h.Check()
+ }
+ }
+}
+
+// GetAll metrics in the Registry
+func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
+ data := make(map[string]map[string]interface{})
+ r.Each(func(name string, i interface{}) {
+ values := make(map[string]interface{})
+ switch metric := i.(type) {
+ case Counter:
+ values["count"] = metric.Count()
+ case Gauge:
+ values["value"] = metric.Value()
+ case GaugeFloat64:
+ values["value"] = metric.Value()
+ case Healthcheck:
+ values["error"] = nil
+ metric.Check()
+ if err := metric.Error(); nil != err {
+ values["error"] = metric.Error().Error()
+ }
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = h.Count()
+ values["min"] = h.Min()
+ values["max"] = h.Max()
+ values["mean"] = h.Mean()
+ values["stddev"] = h.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ case Meter:
+ m := metric.Snapshot()
+ values["count"] = m.Count()
+ values["1m.rate"] = m.Rate1()
+ values["5m.rate"] = m.Rate5()
+ values["15m.rate"] = m.Rate15()
+ values["mean.rate"] = m.RateMean()
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = t.Count()
+ values["min"] = t.Min()
+ values["max"] = t.Max()
+ values["mean"] = t.Mean()
+ values["stddev"] = t.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ values["1m.rate"] = t.Rate1()
+ values["5m.rate"] = t.Rate5()
+ values["15m.rate"] = t.Rate15()
+ values["mean.rate"] = t.RateMean()
+ }
+ data[name] = values
+ })
+ return data
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ r.stop(name)
+ delete(r.metrics, name)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for name, _ := range r.metrics {
+ r.stop(name)
+ delete(r.metrics, name)
+ }
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+ if _, ok := r.metrics[name]; ok {
+ return DuplicateMetric(name)
+ }
+ switch i.(type) {
+ case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+ r.metrics[name] = i
+ }
+ return nil
+}
+
+type metricKV struct {
+ name string
+ value interface{}
+}
+
+func (r *StandardRegistry) registered() []metricKV {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ metrics := make([]metricKV, 0, len(r.metrics))
+ for name, i := range r.metrics {
+ metrics = append(metrics, metricKV{
+ name: name,
+ value: i,
+ })
+ }
+ return metrics
+}
+
+func (r *StandardRegistry) stop(name string) {
+ if i, ok := r.metrics[name]; ok {
+ if s, ok := i.(Stoppable); ok {
+ s.Stop()
+ }
+ }
+}
+
+// Stoppable defines the metrics which has to be stopped.
+type Stoppable interface {
+ Stop()
+}
+
+type PrefixedRegistry struct {
+ underlying Registry
+ prefix string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: NewRegistry(),
+ prefix: prefix,
+ }
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: parent,
+ prefix: prefix,
+ }
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+ wrappedFn := func(prefix string) func(string, interface{}) {
+ return func(name string, iface interface{}) {
+ if strings.HasPrefix(name, prefix) {
+ fn(name, iface)
+ } else {
+ return
+ }
+ }
+ }
+
+ baseRegistry, prefix := findPrefix(r, "")
+ baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+ switch r := registry.(type) {
+ case *PrefixedRegistry:
+ return findPrefix(r.underlying, r.prefix+prefix)
+ case *StandardRegistry:
+ return r, prefix
+ }
+ return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+ realName := r.prefix + name
+ return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+ realName := r.prefix + name
+ return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+ realName := r.prefix + name
+ return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+ r.underlying.RunHealthchecks()
+}
+
+// GetAll metrics in the Registry
+func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
+ return r.underlying.GetAll()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+ realName := r.prefix + name
+ r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+ r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+ DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+ return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+ return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+ return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name. Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+ if err := Register(name, i); err != nil {
+ panic(err)
+ }
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+ DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+ DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 00000000..4047ab3d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,216 @@
+package metrics
+
+import (
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "time"
+)
+
+var (
+ memStats runtime.MemStats
+ runtimeMetrics struct {
+ MemStats struct {
+ Alloc Gauge
+ BuckHashSys Gauge
+ DebugGC Gauge
+ EnableGC Gauge
+ Frees Gauge
+ HeapAlloc Gauge
+ HeapIdle Gauge
+ HeapInuse Gauge
+ HeapObjects Gauge
+ HeapReleased Gauge
+ HeapSys Gauge
+ LastGC Gauge
+ Lookups Gauge
+ Mallocs Gauge
+ MCacheInuse Gauge
+ MCacheSys Gauge
+ MSpanInuse Gauge
+ MSpanSys Gauge
+ NextGC Gauge
+ NumGC Gauge
+ GCCPUFraction GaugeFloat64
+ PauseNs Histogram
+ PauseTotalNs Gauge
+ StackInuse Gauge
+ StackSys Gauge
+ Sys Gauge
+ TotalAlloc Gauge
+ }
+ NumCgoCall Gauge
+ NumGoroutine Gauge
+ NumThread Gauge
+ ReadMemStats Timer
+ }
+ frees uint64
+ lookups uint64
+ mallocs uint64
+ numGC uint32
+ numCgoCalls int64
+
+ threadCreateProfile = pprof.Lookup("threadcreate")
+ registerRuntimeMetricsOnce = sync.Once{}
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureRuntimeMemStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called in a background
+// goroutine. Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+ t := time.Now()
+ runtime.ReadMemStats(&memStats) // This takes 50-200us.
+ runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+ runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+ runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+ if memStats.DebugGC {
+ runtimeMetrics.MemStats.DebugGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.DebugGC.Update(0)
+ }
+ if memStats.EnableGC {
+ runtimeMetrics.MemStats.EnableGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.EnableGC.Update(0)
+ }
+
+ runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+ runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+ runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+ runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+ runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+ runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+ runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+ runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+ runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+ runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+ runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+ runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+ runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+ runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+ runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+ runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+ runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+ //
+ i := numGC % uint32(len(memStats.PauseNs))
+ ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+ if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+ for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ } else {
+ if i > ii {
+ for ; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ i = 0
+ }
+ for ; i < ii; i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ }
+ frees = memStats.Frees
+ lookups = memStats.Lookups
+ mallocs = memStats.Mallocs
+ numGC = memStats.NumGC
+
+ runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+ runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+ runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+ runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+ runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+ currentNumCgoCalls := numCgoCall()
+ runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+ numCgoCalls = currentNumCgoCalls
+
+ runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+ runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats. The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+ registerRuntimeMetricsOnce.Do(func() {
+ runtimeMetrics.MemStats.Alloc = NewGauge()
+ runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+ runtimeMetrics.MemStats.DebugGC = NewGauge()
+ runtimeMetrics.MemStats.EnableGC = NewGauge()
+ runtimeMetrics.MemStats.Frees = NewGauge()
+ runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+ runtimeMetrics.MemStats.HeapIdle = NewGauge()
+ runtimeMetrics.MemStats.HeapInuse = NewGauge()
+ runtimeMetrics.MemStats.HeapObjects = NewGauge()
+ runtimeMetrics.MemStats.HeapReleased = NewGauge()
+ runtimeMetrics.MemStats.HeapSys = NewGauge()
+ runtimeMetrics.MemStats.LastGC = NewGauge()
+ runtimeMetrics.MemStats.Lookups = NewGauge()
+ runtimeMetrics.MemStats.Mallocs = NewGauge()
+ runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+ runtimeMetrics.MemStats.MCacheSys = NewGauge()
+ runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+ runtimeMetrics.MemStats.MSpanSys = NewGauge()
+ runtimeMetrics.MemStats.NextGC = NewGauge()
+ runtimeMetrics.MemStats.NumGC = NewGauge()
+ runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+ runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+ runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+ runtimeMetrics.MemStats.StackInuse = NewGauge()
+ runtimeMetrics.MemStats.StackSys = NewGauge()
+ runtimeMetrics.MemStats.Sys = NewGauge()
+ runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+ runtimeMetrics.NumCgoCall = NewGauge()
+ runtimeMetrics.NumGoroutine = NewGauge()
+ runtimeMetrics.NumThread = NewGauge()
+ runtimeMetrics.ReadMemStats = NewTimer()
+
+ r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+ r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+ r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+ r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+ r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+ r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+ r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+ r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+ r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+ r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+ r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+ r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+ r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+ r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+ r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+ r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+ r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+ r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+ r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+ r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+ r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+ r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+ r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+ r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+ r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+ r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+ r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+ r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+ r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+ r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+ r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+ })
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 00000000..e3391f4e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+ return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 00000000..ca12c05b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 00000000..616a3b47
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 00000000..be96aa6f
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 00000000..fecee5ef
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,616 @@
+package metrics
+
+import (
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Size() int
+ Snapshot() Sample
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Values() []int64
+ Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+//
+type ExpDecaySample struct {
+ alpha float64
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ t0, t1 time.Time
+ values *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ s := &ExpDecaySample{
+ alpha: alpha,
+ reservoirSize: reservoirSize,
+ t0: time.Now(),
+ values: newExpDecaySampleHeap(reservoirSize),
+ }
+ s.t1 = s.t0.Add(rescaleThreshold)
+ return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.t0 = time.Now()
+ s.t1 = s.t0.Add(rescaleThreshold)
+ s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+ return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+ return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+ return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+ return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+ return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+ return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+ s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+ return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp. This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if s.values.Size() == s.reservoirSize {
+ s.values.Pop()
+ }
+ s.values.Push(expDecaySample{
+ k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+ v: v,
+ })
+ if t.After(s.t1) {
+ values := s.values.Values()
+ t0 := s.t0
+ s.values.Clear()
+ s.t0 = t
+ s.t1 = s.t0.Add(rescaleThreshold)
+ for _, v := range values {
+ v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+ s.values.Push(v)
+ }
+ }
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var max int64 = math.MinInt64
+ for _, v := range values {
+ if max < v {
+ max = v
+ }
+ }
+ return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var min int64 = math.MaxInt64
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ }
+ return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+ return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+ scores := make([]float64, len(ps))
+ size := len(values)
+ if size > 0 {
+ sort.Sort(values)
+ for i, p := range ps {
+ pos := p * float64(size+1)
+ if pos < 1.0 {
+ scores[i] = float64(values[0])
+ } else if pos >= float64(size) {
+ scores[i] = float64(values[size-1])
+ } else {
+ lower := float64(values[int(pos)-1])
+ upper := float64(values[int(pos)])
+ scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+ }
+ }
+ }
+ return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+ count int64
+ values []int64
+}
+
+func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
+ return &SampleSnapshot{
+ count: count,
+ values: values,
+ }
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+ panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+ panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+ return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+ var sum int64
+ for _, v := range values {
+ sum += v
+ }
+ return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ m := SampleMean(values)
+ var sum float64
+ for _, v := range values {
+ d := float64(v) - m
+ sum += d * d
+ }
+ return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+//
+type UniformSample struct {
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ values []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ return &UniformSample{
+ reservoirSize: reservoirSize,
+ values: make([]int64, 0, reservoirSize),
+ }
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if len(s.values) < s.reservoirSize {
+ s.values = append(s.values, v)
+ } else {
+ r := rand.Int63n(s.count)
+ if r < int64(len(s.values)) {
+ s.values[int(r)] = v
+ }
+ }
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+ k float64
+ v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+ return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+ s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+ h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+ n := len(h.s)
+ h.s = h.s[0 : n+1]
+ h.s[n] = s
+ h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+ n := len(h.s) - 1
+ h.s[0], h.s[n] = h.s[n], h.s[0]
+ h.down(0, n)
+
+ n = len(h.s)
+ s := h.s[n-1]
+ h.s = h.s[0 : n-1]
+ return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+ return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+ return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+ for {
+ i := (j - 1) / 2 // parent
+ if i == j || !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ j = i
+ }
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+ for {
+ j1 := 2*i + 1
+ if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+ break
+ }
+ j := j1 // left child
+ if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+ j = j2 // = 2*i + 2 // right child
+ }
+ if !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ i = j
+ }
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 00000000..693f1908
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+ "fmt"
+ "log/syslog"
+ "time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+ for _ = range time.Tick(d) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ case Gauge:
+ w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+ case GaugeFloat64:
+ w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+ case Healthcheck:
+ metric.Check()
+ w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+ name,
+ h.Count(),
+ h.Min(),
+ h.Max(),
+ h.Mean(),
+ h.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ ))
+ case Meter:
+ m := metric.Snapshot()
+ w.Info(fmt.Sprintf(
+ "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+ name,
+ m.Count(),
+ m.Rate1(),
+ m.Rate5(),
+ m.Rate15(),
+ m.RateMean(),
+ ))
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+ name,
+ t.Count(),
+ t.Min(),
+ t.Max(),
+ t.Mean(),
+ t.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ t.Rate1(),
+ t.Rate5(),
+ t.Rate15(),
+ t.RateMean(),
+ ))
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 00000000..d6ec4c62
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,329 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Timer
+ StdDev() float64
+ Stop()
+ Sum() int64
+ Time(func())
+ Update(time.Duration)
+ UpdateSince(time.Time)
+ Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: h,
+ meter: m,
+ }
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredTimer(name string, r Registry) Timer {
+ c := NewTimer()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewTimer() Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+ meter: NewMeter(),
+ }
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+ h Histogram
+ m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Stop is a no-op.
+func (NilTimer) Stop() {}
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+ histogram Histogram
+ meter Meter
+ mutex sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+ return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+ return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+ return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+ return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+ return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+ return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+ return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+ return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return &TimerSnapshot{
+ histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+ meter: t.meter.Snapshot().(*MeterSnapshot),
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+ return t.histogram.StdDev()
+}
+
+// Stop stops the meter.
+func (t *StandardTimer) Stop() {
+ t.meter.Stop()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+ return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+ ts := time.Now()
+ f()
+ t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(d))
+ t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(time.Since(ts)))
+ t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+ return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+ histogram *HistogramSnapshot
+ meter *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Stop is a no-op.
+func (t *TimerSnapshot) Stop() {}
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+ panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+ panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+ panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100644
index 00000000..c4ae91e6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test -race .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 00000000..091e971d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteOnce(r, w)
+ }
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+ var namedMetrics namedMetricSlice
+ r.Each(func(name string, i interface{}) {
+ namedMetrics = append(namedMetrics, namedMetric{name, i})
+ })
+
+ sort.Sort(namedMetrics)
+ for _, namedMetric := range namedMetrics {
+ switch metric := namedMetric.m.(type) {
+ case Counter:
+ fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ case Gauge:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+ fmt.Fprintf(w, " error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", h.Count())
+ fmt.Fprintf(w, " min: %9d\n", h.Min())
+ fmt.Fprintf(w, " max: %9d\n", h.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", m.Count())
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", t.Count())
+ fmt.Fprintf(w, " min: %9d\n", t.Min())
+ fmt.Fprintf(w, " max: %9d\n", t.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
+ }
+ }
+}
+
+type namedMetric struct {
+ name string
+ m interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+ return nms[i].name < nms[j].name
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/LICENSE b/vendor/github.com/wasmerio/go-ext-wasm/LICENSE
new file mode 100644
index 00000000..62bb543e
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019-present Wasmer, Inc. and its affiliates.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/BUILD.bazel b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/BUILD.bazel
new file mode 100644
index 00000000..06e48d59
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/BUILD.bazel
@@ -0,0 +1,36 @@
+package(default_visibility = ["//visibility:public"])
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@rules_cc//cc:defs.bzl", "cc_library")
+
+cc_library(
+ name = "wasm",
+ srcs = glob(["*.h"]) + select({
+ "@io_bazel_rules_go//go/platform:darwin": ["libwasmer_runtime_c_api.dylib"],
+ "@io_bazel_rules_go//go/platform:linux_amd64": ["libwasmer_runtime_c_api.so"],
+ "@io_bazel_rules_go//go/platform:windows_amd64": ["lwasmer_runtime_c_api.dll"],
+ }),
+ hdrs = glob(["*.h"]),
+ visibility = ["//visibility:public"],
+ includes = ["."],
+)
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "bridge.go",
+ "error.go",
+ "import.go",
+ "instance.go",
+ "memory.go",
+ "module.go",
+ "value.go",
+ "wasi.go",
+ "wasmer.go",
+ "wasmer.h",
+ ],
+ cgo = True,
+ clinkopts = ["-Wl,-rpath,wasmer -Lwasmer/wasmer -lwasmer_runtime_c_api"],
+ importpath = "github.com/wasmerio/go-ext-wasm/wasmer",
+ visibility = ["//visibility:public"],
+ cdeps = [":wasm"],
+)
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/bridge.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/bridge.go
new file mode 100644
index 00000000..d761efff
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/bridge.go
@@ -0,0 +1,756 @@
+package wasmer
+
+// #cgo LDFLAGS: -Wl,-rpath,${SRCDIR} -L${SRCDIR} -lwasmer
+// #include "./wasmer.h"
+//
+import "C"
+import "unsafe"
+
+// This file is the foundation of this package.
+//
+// [Wasmer] is the WebAssembly runtime used by this package to run
+// WebAssembly modules. It is written in [Rust]. The runtime exposes a C
+// API through [the `wasmer-runtime-c-api` crate][wasmer-runtime-c-api],
+// which is also written in Rust but compiles to C compatible shared
+// libraries. C and C++ headers are also automatically built at
+// compile-time. [Wasmer releases][wasmer-releases] come with
+// pre-compiled `.dylib` and `.so` shared libraries. It is also possible
+// to re-compile specific shared libraries with this project. In this
+// package, they are located in the root as `libwasmer\..*`.
+//
+// Go provides [cgo] that enables the creation of Go packages that
+// call C code. This package uses cgo to communicate with Wasmer through
+// C.
+//
+// This `bridge.go` file contains a thin layer on top of the cgo
+// generated code to get a more user-friendly API. It is the only place
+// where data transit from Go to the Wasmer runtime. Any new features
+// provided by Wasmer that needs to be exposed in this package must be
+// “described” here.
+//
+// Schematically, the workflow looks like this:
+//
+// +------------------------------+
+// | |
+// | +------------------------+ |
+// | | | |
+// | | Go | |
+// | | | |
+// | +----+--------------+----+ |
+// | | ^ |
+// | v | |
+// | +----+--------------+----+ |
+// | | | |
+// | | bridge.go | | go-ext-wasm/wasmer
+// | | | |
+// | +----+--------------+----+ |
+// | | ^ |
+// | v | |
+// | +----+--------------+----+ |
+// | | | |
+// | | cgo | |
+// | | | |
+// | +----+--------------+----+ |
+// | | ^ |
+// +-------|--------------|-------+
+// | v | |
+// | +----+--------------+----+ |
+// | | | |
+// | | wasmer-runtime-c-api | |
+// | | | |
+// | +----+--------------+----+ |
+// | | ^ | Wasmer runtime
+// | v | | (shared library)
+// | +----+--------------+----+ |
+// | | | |
+// | | Wasmer runtime | |
+// | | | |
+// | +------------------------+ |
+// | |
+// +------------------------------+
+//
+// The cgo part is auto-generated by Go. It should be considered
+// as an opaque black-box.
+//
+// Thanks to `bridge.go`, the rest of this package can talk to the
+// Wasmer runtime as if it is almost regular Go code.
+//
+//
+// [Wasmer]: https://github.com/wasmerio/wasmer
+// [Rust]: https://www.rust-lang.org/
+// [wasmer-runtime-c-api]: https://github.com/wasmerio/wasmer/tree/master/lib/runtime-c-api
+// [wasmer-releases]: https://github.com/wasmerio/wasmer/releases
+// [cgo]: https://golang.org/cmd/cgo/
+
+type cBool C.bool
+type cChar C.char
+type cInt C.int
+type cUchar C.uchar
+type cUint C.uint
+type cUint32T C.uint32_t
+type cUint8T C.uint8_t
+type cWasmerByteArray C.wasmer_byte_array
+type cWasmerExportDescriptorT C.wasmer_export_descriptor_t
+type cWasmerExportDescriptorsT C.wasmer_export_descriptors_t
+type cWasmerExportFuncT C.wasmer_export_func_t
+type cWasmerExportT C.wasmer_export_t
+type cWasmerExportsT C.wasmer_exports_t
+type cWasmerImportDescriptorT C.wasmer_import_descriptor_t
+type cWasmerImportDescriptorsT C.wasmer_import_descriptors_t
+type cWasmerImportExportKind C.wasmer_import_export_kind
+type cWasmerImportExportValue C.wasmer_import_export_value
+type cWasmerImportFuncT C.wasmer_import_func_t
+type cWasmerImportObjectT C.wasmer_import_object_t
+type cWasmerImportT C.wasmer_import_t
+type cWasmerInstanceContextT C.wasmer_instance_context_t
+type cWasmerInstanceT C.wasmer_instance_t
+type cWasmerMemoryT C.wasmer_memory_t
+type cWasmerModuleT C.wasmer_module_t
+type cWasmerResultT C.wasmer_result_t
+type cWasmerSerializedModuleT C.wasmer_serialized_module_t
+type cWasmerValueT C.wasmer_value_t
+type cWasmerValueTag C.wasmer_value_tag
+type cWasmerWasiMapDirEntryT C.wasmer_wasi_map_dir_entry_t
+
+const cVersionLatest = C.Latest
+const cVersionSnapshot0 = C.Snapshot0
+const cVersionSnapshot1 = C.Snapshot1
+const cVersionUnknown = C.Unknown
+const cWasmF32 = C.WASM_F32
+const cWasmF64 = C.WASM_F64
+const cWasmFunction = C.WASM_FUNCTION
+const cWasmGlobal = C.WASM_GLOBAL
+const cWasmI32 = C.WASM_I32
+const cWasmI64 = C.WASM_I64
+const cWasmMemory = C.WASM_MEMORY
+const cWasmTable = C.WASM_TABLE
+const cWasmerOk = C.WASMER_OK
+
+func cGetParamsForImportFunc(function *cWasmerImportFuncT) []cWasmerValueTag {
+ var arity C.uint32_t
+ var result = C.wasmer_import_func_params_arity((*C.wasmer_import_func_t)(function), &arity)
+
+ if result != C.WASMER_OK {
+ return nil
+ }
+
+ var params = make([]cWasmerValueTag, (int)(arity))
+
+ if arity == 0 {
+ return params
+ }
+
+ result = C.wasmer_import_func_params(
+ (*C.wasmer_import_func_t)(function),
+ (*C.wasmer_value_tag)(unsafe.Pointer(¶ms[0])),
+ arity,
+ )
+
+ if result != C.WASMER_OK {
+ return nil
+ }
+
+ return params
+}
+
+func cGetReturnsForImportFunc(function *cWasmerImportFuncT) []cWasmerValueTag {
+ var arity C.uint32_t
+ var result = C.wasmer_import_func_returns_arity((*C.wasmer_import_func_t)(function), &arity)
+
+ if result != C.WASMER_OK {
+ return nil
+ }
+
+ var returns = make([]cWasmerValueTag, (int)(arity))
+
+ if arity == 0 {
+ return returns
+ }
+
+ result = C.wasmer_import_func_returns(
+ (*C.wasmer_import_func_t)(function),
+ (*C.wasmer_value_tag)(unsafe.Pointer(&returns[0])),
+ arity,
+ )
+
+ if result != C.WASMER_OK {
+ return nil
+ }
+
+ return returns
+}
+
+func cNewWasmerImportTFunction(
+ moduleName string,
+ importName string,
+ function *cWasmerImportFuncT,
+) cWasmerImportT {
+ var importedFunction C.wasmer_import_t
+ importedFunction.module_name = (C.wasmer_byte_array)(cGoStringToWasmerByteArray(moduleName))
+ importedFunction.import_name = (C.wasmer_byte_array)(cGoStringToWasmerByteArray(importName))
+ importedFunction.tag = cWasmFunction
+
+ var pointer = (**C.wasmer_import_func_t)(unsafe.Pointer(&importedFunction.value))
+ *pointer = (*C.wasmer_import_func_t)(function)
+
+ return (cWasmerImportT)(importedFunction)
+}
+
+func cNewWasmerImportTMemory(
+ moduleName string,
+ importName string,
+ memory *cWasmerMemoryT,
+) cWasmerImportT {
+ var importedMemory C.wasmer_import_t
+ importedMemory.module_name = (C.wasmer_byte_array)(cGoStringToWasmerByteArray(moduleName))
+ importedMemory.import_name = (C.wasmer_byte_array)(cGoStringToWasmerByteArray(importName))
+ importedMemory.tag = cWasmMemory
+
+ var pointer = (**C.wasmer_memory_t)(unsafe.Pointer(&importedMemory.value))
+ *pointer = (*C.wasmer_memory_t)(memory)
+
+ return (cWasmerImportT)(importedMemory)
+}
+
+func cNewWasmerWasiImportObject(
+ arguments *cWasmerByteArray,
+ argumentsLength uint,
+ environmentVariables *cWasmerByteArray,
+ environmentVariablesLength uint,
+ preopenedFiles *cWasmerByteArray,
+ preopenFilesLength uint,
+ mappedDirs *cWasmerWasiMapDirEntryT,
+ mappedDirsLength uint,
+) *cWasmerImportObjectT {
+ return (*cWasmerImportObjectT)(C.wasmer_wasi_generate_import_object(
+ (*C.wasmer_byte_array)(arguments),
+ (C.uint)(argumentsLength),
+ (*C.wasmer_byte_array)(environmentVariables),
+ (C.uint)(environmentVariablesLength),
+ (*C.wasmer_byte_array)(preopenedFiles),
+ (C.uint)(preopenFilesLength),
+ (*C.wasmer_wasi_map_dir_entry_t)(mappedDirs),
+ (C.uint)(mappedDirsLength),
+ ))
+}
+
+func cNewWasmerWasiImportObjectForVersion(
+ version uint,
+ arguments *cWasmerByteArray,
+ argumentsLength uint,
+ environmentVariables *cWasmerByteArray,
+ environmentVariablesLength uint,
+ preopenedFiles *cWasmerByteArray,
+ preopenFilesLength uint,
+ mappedDirs *cWasmerWasiMapDirEntryT,
+ mappedDirsLength uint,
+) *cWasmerImportObjectT {
+ return (*cWasmerImportObjectT)(C.wasmer_wasi_generate_import_object_for_version(
+ (C.uchar)(version),
+ (*C.wasmer_byte_array)(arguments),
+ (C.uint)(argumentsLength),
+ (*C.wasmer_byte_array)(environmentVariables),
+ (C.uint)(environmentVariablesLength),
+ (*C.wasmer_byte_array)(preopenedFiles),
+ (C.uint)(preopenFilesLength),
+ (*C.wasmer_wasi_map_dir_entry_t)(mappedDirs),
+ (C.uint)(mappedDirsLength),
+ ))
+}
+
+func cWasmerWasiGetVersion(module *cWasmerModuleT) uint {
+ return (uint)(C.wasmer_wasi_get_version(
+ (*C.wasmer_module_t)(module),
+ ))
+}
+
+func cWasmerImportObjectDestroy(importObject *cWasmerImportObjectT) {
+ C.wasmer_import_object_destroy((*C.wasmer_import_object_t)(importObject))
+}
+
+func cWasmerImportObjectExtend(
+ importObject *cWasmerImportObjectT,
+ imports *cWasmerImportT,
+ importLength cUint,
+) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_import_object_extend(
+ (*C.wasmer_import_object_t)(importObject),
+ (*C.wasmer_import_t)(imports),
+ (C.uint)(importLength),
+ ))
+}
+
+func cNewWasmerImportObject() *cWasmerImportObjectT {
+ return (*cWasmerImportObjectT)(C.wasmer_import_object_new())
+}
+
+func cWasmerImportObjectGetFunctions(importObject *cWasmerImportObjectT) []cWasmerImportT {
+ var iterator = C.wasmer_import_object_iterate_functions((*C.wasmer_import_object_t)(importObject))
+
+ if iterator == nil {
+ return nil
+ }
+
+ var imports []cWasmerImportT
+
+ for !C.wasmer_import_object_iter_at_end(iterator) {
+ var impoort cWasmerImportT
+
+ result := C.wasmer_import_object_iter_next(iterator, (*C.wasmer_import_t)(&impoort))
+
+ if result != C.WASMER_OK {
+ C.wasmer_import_object_imports_destroy(
+ (*C.wasmer_import_t)(&imports[0]),
+ (C.uint)(len(imports)),
+ )
+ C.wasmer_import_object_iter_destroy(iterator)
+
+ return nil
+ }
+
+ imports = append(imports, impoort)
+ }
+
+ return imports
+}
+
+func cWasmerCompile(module **cWasmerModuleT, wasmBytes *cUchar, wasmBytesLength cUint) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_compile(
+ (**C.wasmer_module_t)(unsafe.Pointer(module)),
+ (*C.uchar)(wasmBytes),
+ (C.uint)(wasmBytesLength),
+ ))
+}
+
+func cWasmerExportDescriptorKind(exportDescriptor *cWasmerExportDescriptorT) cWasmerImportExportKind {
+ return (cWasmerImportExportKind)(C.wasmer_export_descriptor_kind(
+ (*C.wasmer_export_descriptor_t)(exportDescriptor),
+ ))
+}
+
+func cWasmerExportDescriptorName(exportDescriptor *cWasmerExportDescriptorT) cWasmerByteArray {
+ return (cWasmerByteArray)(C.wasmer_export_descriptor_name(
+ (*C.wasmer_export_descriptor_t)(exportDescriptor),
+ ))
+}
+
+func cWasmerExportDescriptors(module *cWasmerModuleT, exportDescriptors **cWasmerExportDescriptorsT) {
+ C.wasmer_export_descriptors(
+ (*C.wasmer_module_t)(module),
+ (**C.wasmer_export_descriptors_t)(unsafe.Pointer(exportDescriptors)),
+ )
+}
+
+func cWasmerExportDescriptorsDestroy(exportDescriptors *cWasmerExportDescriptorsT) {
+ C.wasmer_export_descriptors_destroy(
+ (*C.wasmer_export_descriptors_t)(exportDescriptors),
+ )
+}
+
+func cWasmerExportDescriptorsGet(exportDescriptors *cWasmerExportDescriptorsT, index cInt) *cWasmerExportDescriptorT {
+ return (*cWasmerExportDescriptorT)(C.wasmer_export_descriptors_get(
+ (*C.wasmer_export_descriptors_t)(exportDescriptors),
+ (C.int)(index),
+ ))
+}
+
+func cWasmerExportDescriptorsLen(exportDescriptors *cWasmerExportDescriptorsT) cInt {
+ return (cInt)(C.wasmer_export_descriptors_len(
+ (*C.wasmer_export_descriptors_t)(exportDescriptors),
+ ))
+}
+
+func cWasmerExportFuncParams(function *cWasmerExportFuncT, parameters *cWasmerValueTag, parametersLength cUint32T) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_export_func_params(
+ (*C.wasmer_export_func_t)(function),
+ (*C.wasmer_value_tag)(parameters),
+ (C.uint32_t)(parametersLength),
+ ))
+}
+
+func cWasmerExportFuncParamsArity(function *cWasmerExportFuncT, result *cUint32T) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_export_func_params_arity(
+ (*C.wasmer_export_func_t)(function),
+ (*C.uint32_t)(result),
+ ))
+}
+
+func cWasmerExportFuncResultsArity(function *cWasmerExportFuncT, result *cUint32T) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_export_func_returns_arity(
+ (*C.wasmer_export_func_t)(function),
+ (*C.uint32_t)(result),
+ ))
+}
+
+func cWasmerExportKind(export *cWasmerExportT) cWasmerImportExportKind {
+ return (cWasmerImportExportKind)(C.wasmer_export_kind(
+ (*C.wasmer_export_t)(export),
+ ))
+}
+
+func cWasmerExportName(export *cWasmerExportT) cWasmerByteArray {
+ return (cWasmerByteArray)(C.wasmer_export_name(
+ (*C.wasmer_export_t)(export),
+ ))
+}
+
+func cWasmerExportToFunc(export *cWasmerExportT) *cWasmerExportFuncT {
+ return (*cWasmerExportFuncT)(C.wasmer_export_to_func(
+ (*C.wasmer_export_t)(export),
+ ))
+}
+
+func cWasmerExportToMemory(export *cWasmerExportT, memory **cWasmerMemoryT) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_export_to_memory(
+ (*C.wasmer_export_t)(export),
+ (**C.wasmer_memory_t)(unsafe.Pointer(memory)),
+ ))
+}
+
+func cWasmerExportsDestroy(exports *cWasmerExportsT) {
+ C.wasmer_exports_destroy(
+ (*C.wasmer_exports_t)(exports),
+ )
+}
+
+func cWasmerExportsGet(exports *cWasmerExportsT, index cInt) *cWasmerExportT {
+ return (*cWasmerExportT)(C.wasmer_exports_get(
+ (*C.wasmer_exports_t)(exports),
+ (C.int)(index),
+ ))
+}
+
+func cWasmerExportsLen(exports *cWasmerExportsT) cInt {
+ return (cInt)(C.wasmer_exports_len(
+ (*C.wasmer_exports_t)(exports),
+ ))
+}
+
+func cWasmerImportDescriptorKind(importDescriptor *cWasmerImportDescriptorT) cWasmerImportExportKind {
+ return (cWasmerImportExportKind)(C.wasmer_import_descriptor_kind(
+ (*C.wasmer_import_descriptor_t)(importDescriptor),
+ ))
+}
+
+func cWasmerImportDescriptorModuleName(importDescriptor *cWasmerImportDescriptorT) cWasmerByteArray {
+ return (cWasmerByteArray)(C.wasmer_import_descriptor_module_name(
+ (*C.wasmer_import_descriptor_t)(importDescriptor),
+ ))
+}
+
+func cWasmerImportDescriptorName(importDescriptor *cWasmerImportDescriptorT) cWasmerByteArray {
+ return (cWasmerByteArray)(C.wasmer_import_descriptor_name(
+ (*C.wasmer_import_descriptor_t)(importDescriptor),
+ ))
+}
+
+func cWasmerImportDescriptors(module *cWasmerModuleT, importDescriptors **cWasmerImportDescriptorsT) {
+ C.wasmer_import_descriptors(
+ (*C.wasmer_module_t)(module),
+ (**C.wasmer_import_descriptors_t)(unsafe.Pointer(importDescriptors)),
+ )
+}
+
+func cWasmerImportDescriptorsDestroy(importDescriptors *cWasmerImportDescriptorsT) {
+ C.wasmer_import_descriptors_destroy(
+ (*C.wasmer_import_descriptors_t)(importDescriptors),
+ )
+}
+
+func cWasmerImportDescriptorsGet(importDescriptors *cWasmerImportDescriptorsT, index cInt) *cWasmerImportDescriptorT {
+ return (*cWasmerImportDescriptorT)(C.wasmer_import_descriptors_get(
+ (*C.wasmer_import_descriptors_t)(importDescriptors),
+ (C.uint)(index),
+ ))
+}
+
+func cWasmerImportDescriptorsLen(importDescriptors *cWasmerImportDescriptorsT) cInt {
+ return (cInt)(C.wasmer_import_descriptors_len(
+ (*C.wasmer_import_descriptors_t)(importDescriptors),
+ ))
+}
+
+func cWasmerImportFuncDestroy(function *cWasmerImportFuncT) {
+ C.wasmer_import_func_destroy(
+ (*C.wasmer_import_func_t)(function),
+ )
+}
+
+func cWasmerImportFuncNew(
+ function unsafe.Pointer,
+ parametersSignature *cWasmerValueTag,
+ parametersLength cUint,
+ resultsSignature *cWasmerValueTag,
+ resultsLength cUint,
+) *cWasmerImportFuncT {
+ return (*cWasmerImportFuncT)(C.wasmer_import_func_new(
+ (*[0]byte)(function),
+ (*C.wasmer_value_tag)(parametersSignature),
+ (C.uint)(parametersLength),
+ (*C.wasmer_value_tag)(resultsSignature),
+ (C.uint)(resultsLength),
+ ))
+}
+
+func cWasmerInstanceCall(
+ instance *cWasmerInstanceT,
+ name *cChar,
+ parameters *cWasmerValueT,
+ parametersLength cUint32T,
+ results *cWasmerValueT,
+ resultsLength cUint32T,
+) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_instance_call(
+ (*C.wasmer_instance_t)(instance),
+ (*C.char)(name),
+ (*C.wasmer_value_t)(parameters),
+ (C.uint32_t)(parametersLength),
+ (*C.wasmer_value_t)(results),
+ (C.uint32_t)(resultsLength),
+ ))
+}
+
+func cWasmerInstanceContextDataGet(instanceContext *cWasmerInstanceContextT) unsafe.Pointer {
+ return unsafe.Pointer(C.wasmer_instance_context_data_get(
+ (*C.wasmer_instance_context_t)(instanceContext),
+ ))
+}
+
+func cWasmerInstanceContextDataSet(instance *cWasmerInstanceT, dataPointer unsafe.Pointer) {
+ C.wasmer_instance_context_data_set(
+ (*C.wasmer_instance_t)(instance),
+ dataPointer,
+ )
+}
+
+func cWasmerInstanceContextMemory(instanceContext *cWasmerInstanceContextT) *cWasmerMemoryT {
+ return (*cWasmerMemoryT)(C.wasmer_instance_context_memory(
+ (*C.wasmer_instance_context_t)(instanceContext),
+ 0,
+ ))
+}
+
+func cWasmerInstanceDestroy(instance *cWasmerInstanceT) {
+ C.wasmer_instance_destroy(
+ (*C.wasmer_instance_t)(instance),
+ )
+}
+
+func cWasmerInstanceExports(instance *cWasmerInstanceT, exports **cWasmerExportsT) {
+ C.wasmer_instance_exports(
+ (*C.wasmer_instance_t)(instance),
+ (**C.wasmer_exports_t)(unsafe.Pointer(exports)),
+ )
+}
+
+func cWasmerInstantiate(
+ instance **cWasmerInstanceT,
+ wasmBytes *cUchar,
+ wasmBytesLength cUint,
+ imports *cWasmerImportT,
+ importsLength cInt,
+) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_instantiate(
+ (**C.wasmer_instance_t)(unsafe.Pointer(instance)),
+ (*C.uchar)(wasmBytes),
+ (C.uint)(wasmBytesLength),
+ (*C.wasmer_import_t)(imports),
+ (C.int)(importsLength),
+ ))
+}
+
+func cWasmerLastErrorLength() cInt {
+ return (cInt)(C.wasmer_last_error_length())
+}
+
+func cWasmerLastErrorMessage(buffer *cChar, length cInt) cInt {
+ return (cInt)(C.wasmer_last_error_message(
+ (*C.char)(buffer),
+ (C.int)(length),
+ ))
+}
+
+func cWasmerMemoryData(memory *cWasmerMemoryT) *cUint8T {
+ return (*cUint8T)(C.wasmer_memory_data(
+ (*C.wasmer_memory_t)(memory),
+ ))
+}
+
+func cWasmerMemoryDataLength(memory *cWasmerMemoryT) cUint32T {
+ return (cUint32T)(C.wasmer_memory_data_length(
+ (*C.wasmer_memory_t)(memory),
+ ))
+}
+
+func cWasmerMemoryDestroy(memory *cWasmerMemoryT) {
+ C.wasmer_memory_destroy(
+ (*C.wasmer_memory_t)(memory),
+ )
+}
+
+func cWasmerMemoryGrow(memory *cWasmerMemoryT, numberOfPages cUint32T) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_memory_grow(
+ (*C.wasmer_memory_t)(memory),
+ (C.uint32_t)(numberOfPages),
+ ))
+}
+
+func cWasmerMemoryNew(memory **cWasmerMemoryT, min, max cUint32T) cWasmerResultT {
+ limits := C.wasmer_limits_t{
+ min: C.uint32_t(min),
+ max: C.wasmer_limit_option_t{
+ has_some: false,
+ },
+ }
+
+ if max > 0 {
+ limits.max = C.wasmer_limit_option_t{
+ has_some: true,
+ some: C.uint32_t(max),
+ }
+ }
+
+ return (cWasmerResultT)(C.wasmer_memory_new(
+ (**C.wasmer_memory_t)(unsafe.Pointer(memory)),
+ limits,
+ ))
+}
+
+func cWasmerModuleDeserialize(module **cWasmerModuleT, serializedModule *cWasmerSerializedModuleT) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_module_deserialize(
+ (**C.wasmer_module_t)(unsafe.Pointer(module)),
+ (*C.wasmer_serialized_module_t)(serializedModule),
+ ))
+}
+
+func cWasmerModuleDestroy(module *cWasmerModuleT) {
+ C.wasmer_module_destroy((*C.wasmer_module_t)(module))
+}
+
+func cWasmerModuleImportInstantiate(
+ instance **cWasmerInstanceT,
+ module *cWasmerModuleT,
+ importObject *cWasmerImportObjectT,
+) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_module_import_instantiate(
+ (**C.wasmer_instance_t)(unsafe.Pointer(instance)),
+ (*C.wasmer_module_t)(module),
+ (*C.wasmer_import_object_t)(importObject),
+ ))
+}
+
+func cWasmerModuleInstantiate(
+ module *cWasmerModuleT,
+ instance **cWasmerInstanceT,
+ imports *cWasmerImportT,
+ importsLength cInt,
+) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_module_instantiate(
+ (*C.wasmer_module_t)(module),
+ (**C.wasmer_instance_t)(unsafe.Pointer(instance)),
+ (*C.wasmer_import_t)(imports),
+ (C.int)(importsLength),
+ ))
+}
+
+func cWasmerModuleSerialize(serializedModule **cWasmerSerializedModuleT, module *cWasmerModuleT) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_module_serialize(
+ (**C.wasmer_serialized_module_t)(unsafe.Pointer(serializedModule)),
+ (*C.wasmer_module_t)(module),
+ ))
+}
+
+func cWasmerSerializedModuleBytes(serializedModule *cWasmerSerializedModuleT) []byte {
+ var byteArray = C.wasmer_serialized_module_bytes(
+ (*C.wasmer_serialized_module_t)(serializedModule),
+ )
+
+ return C.GoBytes(unsafe.Pointer(byteArray.bytes), (C.int)(byteArray.bytes_len))
+}
+
+func cWasmerSerializedModuleDestroy(serializedModule *cWasmerSerializedModuleT) {
+ C.wasmer_serialized_module_destroy(
+ (*C.wasmer_serialized_module_t)(serializedModule),
+ )
+}
+
+func cWasmerSerializedModuleFromBytes(
+ serializedModule **cWasmerSerializedModuleT,
+ serializedModuleBytes *cUint8T,
+ serializedModuleBytesLength cInt,
+) cWasmerResultT {
+ return (cWasmerResultT)(C.wasmer_serialized_module_from_bytes(
+ (**C.wasmer_serialized_module_t)(unsafe.Pointer(serializedModule)),
+ (*C.uint8_t)(serializedModuleBytes),
+ (C.uint)(serializedModuleBytesLength),
+ ))
+}
+
+func cWasmerValidate(wasmBytes *cUchar, wasmBytesLength cUint) cBool {
+ return (cBool)(C.wasmer_validate(
+ (*C.uchar)(wasmBytes),
+ (C.uint)(wasmBytesLength),
+ ))
+}
+
+func cCString(string string) *cChar {
+ return (*cChar)(C.CString(string))
+}
+
+func cFree(pointer unsafe.Pointer) {
+ C.free(pointer)
+}
+
+func cGoString(string *cChar) string {
+ return C.GoString((*C.char)(string))
+}
+
+func cGoStringN(string *cChar, length cInt) string {
+ return C.GoStringN((*C.char)(string), (C.int)(length))
+}
+
+func cGoStringToWasmerByteArray(string string) cWasmerByteArray {
+ var cString = cCString(string)
+
+ var byteArray cWasmerByteArray
+ byteArray.bytes = (*C.uchar)(unsafe.Pointer(cString))
+ byteArray.bytes_len = (C.uint)(len(string))
+
+ return byteArray
+}
+
+func cWasmerByteArrayToGoString(byteArray *cWasmerByteArray) string {
+ return string(C.GoBytes(
+ unsafe.Pointer(byteArray.bytes),
+ (C.int)(byteArray.bytes_len),
+ ))
+}
+
+func cAliasAndHostPathToWasiDirEntry(alias string, hostPath string) cWasmerWasiMapDirEntryT {
+ var wasiMappedDir cWasmerWasiMapDirEntryT
+ wasiMappedDir.alias = (C.wasmer_byte_array)(cGoStringToWasmerByteArray(alias))
+ wasiMappedDir.host_file_path = (C.wasmer_byte_array)(cGoStringToWasmerByteArray(hostPath))
+
+ return wasiMappedDir
+}
+
+// Returns the module name and import name for a given import
+func cGetInfoFromImport(inner *cWasmerImportT) (string, string) {
+ moduleName := cWasmerByteArrayToGoString((*cWasmerByteArray)(&inner.module_name))
+ importName := cWasmerByteArrayToGoString((*cWasmerByteArray)(&inner.import_name))
+
+ return moduleName, importName
+}
+
+// Returns the raw pointer to the inner function or nil if it's not a function
+func cGetFunctionFromImport(inner *cWasmerImportT) *cWasmerImportFuncT {
+ if inner.tag != C.WASM_FUNCTION {
+ return nil
+ }
+
+ var functionPointerBytes [8]byte = inner.value
+ var functionPointerAddress = &functionPointerBytes[0]
+ var functionPointerPointer = (**cWasmerImportFuncT)(unsafe.Pointer(functionPointerAddress))
+
+ return *functionPointerPointer
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/error.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/error.go
new file mode 100644
index 00000000..b156ff19
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/error.go
@@ -0,0 +1,26 @@
+package wasmer
+
+import (
+ "errors"
+ "unsafe"
+)
+
+// GetLastError returns the last error message if any, otherwise returns an error.
+func GetLastError() (string, error) {
+ var errorLength = cWasmerLastErrorLength()
+
+ if errorLength == 0 {
+ return "", nil
+ }
+
+ var errorMessage = make([]cChar, errorLength)
+ var errorMessagePointer = (*cChar)(unsafe.Pointer(&errorMessage[0]))
+
+ var errorResult = cWasmerLastErrorMessage(errorMessagePointer, errorLength)
+
+ if -1 == errorResult {
+ return "", errors.New("Cannot read last error")
+ }
+
+ return cGoString(errorMessagePointer), nil
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/import.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/import.go
new file mode 100644
index 00000000..850fab61
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/import.go
@@ -0,0 +1,395 @@
+package wasmer
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// ImportedFunctionError represents any kind of errors related to a
+// WebAssembly imported function. It is returned by `Import` or `Imports`
+// functions only.
+type ImportedFunctionError struct {
+ functionName string
+ message string
+}
+
+// ImportObjectError represents errors related to `ImportObject`s.
+type ImportObjectError struct {
+ message string
+}
+
+// ImportObject owns a set of imports.
+// It can be combined with a `Module` to create an `Instance`.
+type ImportObject struct {
+ inner *cWasmerImportObjectT
+}
+
+// NewImportObject creates an empty `ImportObject`.
+func NewImportObject() *ImportObject {
+ var inner = cNewWasmerImportObject()
+
+ return &ImportObject{inner}
+}
+
+// Imports returns `*Imports` for a given `ImportObject`
+func (importObject *ImportObject) Imports() (*Imports, error) {
+ imports := cWasmerImportObjectGetFunctions(importObject.inner)
+ output := NewImports()
+
+ for _, impoort := range imports {
+ rawFunction := cGetFunctionFromImport(&impoort)
+
+ if rawFunction == nil {
+ // This is expected to never happen
+ continue
+ }
+
+ namespaceName, importName := cGetInfoFromImport(&impoort)
+ nextOutput, err := output.appendRaw(namespaceName, importName, rawFunction)
+
+ if err != nil {
+ return nil, err
+ }
+
+ output = nextOutput
+ }
+
+ return output, nil
+}
+
+// Extend adds the given imports to the existing import object
+func (importObject *ImportObject) Extend(imports Imports) error {
+ var numberOfImports = len(imports.imports)
+
+ if numberOfImports == 0 {
+ return nil
+ }
+
+ var cImports = make([]cWasmerImportT, numberOfImports)
+ var importNth = 0
+
+ for importName, importImport := range imports.imports {
+ cImports[importNth] = *getCWasmerImport(importName, importImport)
+ importNth++
+ }
+
+ if importNth == 0 {
+ return nil
+ }
+
+ var extendResult = cWasmerImportObjectExtend(
+ importObject.inner,
+ (*cWasmerImportT)(unsafe.Pointer(&cImports[0])),
+ (cUint)(len(imports.imports)),
+ )
+
+ if extendResult != cWasmerOk {
+ return NewImportObjectError("Could not extend import object with the given imports")
+ }
+
+ return nil
+}
+
+// Close frees the `ImportObject`
+func (importObject *ImportObject) Close() {
+ cWasmerImportObjectDestroy(importObject.inner)
+}
+
+// NewImportObjectError constructs a new `ImportObjectError`
+func NewImportObjectError(message string) *ImportObjectError {
+ return &ImportObjectError{message}
+}
+
+// ImportObjectError is an actual error. The `Error` function
+// returns the error message.
+func (error *ImportObjectError) Error() string {
+ return fmt.Sprintf(error.message)
+}
+
+// NewImportedFunctionError constructs a new `ImportedFunctionError`,
+// where `functionName` is the name of the imported function, and
+// `message` is the error message. If the error message contains `%s`,
+// then this parameter will be replaced by `functionName`.
+func NewImportedFunctionError(functionName string, message string) *ImportedFunctionError {
+ return &ImportedFunctionError{functionName, message}
+}
+
+// ImportedFunctionError is an actual error. The `Error` function
+// returns the error message.
+func (error *ImportedFunctionError) Error() string {
+ return fmt.Sprintf(error.message, error.functionName)
+}
+
+// Import represents a WebAssembly instance imported function or
+// memory. Imagine it is an union of `ImportFunction` and `ImportMemory`.
+type Import interface{}
+
+// ImportFunction represents a WebAssembly instance imported function.
+type ImportFunction struct {
+ // An implementation must be of type:
+ // `func(context unsafe.Pointer, arguments ...interface{}) interface{}`.
+ // It represents the real function implementation written in Go.
+ implementation interface{}
+
+ // The pointer to the cgo function implementation, something
+ // like `C.foo`.
+ cgoPointer unsafe.Pointer
+
+ // The pointer to the Wasmer imported function.
+ importedFunctionPointer *cWasmerImportFuncT
+
+ // The function implementation signature as a WebAssembly signature.
+ wasmInputs []cWasmerValueTag
+
+ // The function implementation signature as a WebAssembly signature.
+ wasmOutputs []cWasmerValueTag
+
+ // The namespace of the imported function.
+ namespace string
+}
+
+// ImportMemory represents a WebAssembly instance imported memory.
+type ImportMemory struct {
+ // Memory to import.
+ memory *Memory
+
+ // The namespace of the imported function.
+ namespace string
+}
+
+// Imports represents a set of imported functions for a WebAssembly instance.
+type Imports struct {
+ // All imports.
+ imports map[string]Import
+
+ // Current namespace where to register the import.
+ currentNamespace string
+}
+
+// NewImports constructs a new empty `Imports`.
+func NewImports() *Imports {
+ var imports = make(map[string]Import)
+ var currentNamespace = "env"
+
+ return &Imports{imports, currentNamespace}
+}
+
+// Namespace changes the current namespace of the next imported functions.
+func (imports *Imports) Namespace(namespace string) *Imports {
+ imports.currentNamespace = namespace
+
+ return imports
+}
+
+// Append adds a new imported function to the current set. Deprecated, please use AppendFunction instead.
+func (imports *Imports) Append(importName string, implementation interface{}, cgoPointer unsafe.Pointer) (*Imports, error) {
+ return imports.AppendFunction(importName, implementation, cgoPointer)
+}
+
+// AppendFunction adds a new imported function to the current set.
+func (imports *Imports) AppendFunction(importName string, implementation interface{}, cgoPointer unsafe.Pointer) (*Imports, error) {
+ var importType = reflect.TypeOf(implementation)
+
+ if importType.Kind() != reflect.Func {
+ return nil, NewImportedFunctionError(importName, fmt.Sprintf("Imported function `%%s` must be a function; given `%s`.", importType.Kind()))
+ }
+
+ var importInputsArity = importType.NumIn()
+
+ if importInputsArity < 1 {
+ return nil, NewImportedFunctionError(importName, "Imported function `%s` must at least have one argument for the instance context.")
+ }
+
+ if importType.In(0).Kind() != reflect.UnsafePointer {
+ return nil, NewImportedFunctionError(importName, fmt.Sprintf("The instance context of the `%%s` imported function must be of kind `unsafe.Pointer`; given `%s`; is it missing?", importType.In(0).Kind()))
+ }
+
+ importInputsArity--
+ var importOutputsArity = importType.NumOut()
+ var wasmInputs = make([]cWasmerValueTag, importInputsArity)
+ var wasmOutputs = make([]cWasmerValueTag, importOutputsArity)
+
+ for nth := 0; nth < importInputsArity; nth++ {
+ var importInput = importType.In(nth + 1)
+
+ switch importInput.Kind() {
+ case reflect.Int32:
+ wasmInputs[nth] = cWasmI32
+ case reflect.Int64:
+ wasmInputs[nth] = cWasmI64
+ case reflect.Float32:
+ wasmInputs[nth] = cWasmF32
+ case reflect.Float64:
+ wasmInputs[nth] = cWasmF64
+ default:
+ return nil, NewImportedFunctionError(importName, fmt.Sprintf("Invalid input type for the `%%s` imported function; given `%s`; only accept `int32`, `int64`, `float32`, and `float64`.", importInput.Kind()))
+ }
+ }
+
+ if importOutputsArity > 1 {
+ return nil, NewImportedFunctionError(importName, "The `%s` imported function must have at most one output value.")
+ } else if importOutputsArity == 1 {
+ switch importType.Out(0).Kind() {
+ case reflect.Int32:
+ wasmOutputs[0] = cWasmI32
+ case reflect.Int64:
+ wasmOutputs[0] = cWasmI64
+ case reflect.Float32:
+ wasmOutputs[0] = cWasmF32
+ case reflect.Float64:
+ wasmOutputs[0] = cWasmF64
+ default:
+ return nil, NewImportedFunctionError(importName, fmt.Sprintf("Invalid output type for the `%%s` imported function; given `%s`; only accept `int32`, `int64`, `float32`, and `float64`.", importType.Out(0).Kind()))
+ }
+ }
+
+ var importedFunctionPointer *cWasmerImportFuncT
+ var namespace = imports.currentNamespace
+
+ imports.imports[importName] = ImportFunction{
+ implementation,
+ cgoPointer,
+ importedFunctionPointer,
+ wasmInputs,
+ wasmOutputs,
+ namespace,
+ }
+
+ return imports, nil
+}
+
+// AppendMemory adds a new imported memory to the current set.
+func (imports *Imports) AppendMemory(importName string, memory *Memory) (*Imports, error) {
+ var namespace = imports.currentNamespace
+
+ imports.imports[importName] = ImportMemory{
+ memory,
+ namespace,
+ }
+
+ return imports, nil
+}
+
+// Like Append but not for Go imports.
+func (imports *Imports) appendRaw(
+ namespace string,
+ importName string,
+ wasmerImportFunc *cWasmerImportFuncT,
+) (*Imports, error) {
+ wasmInputs := cGetParamsForImportFunc(wasmerImportFunc)
+
+ if wasmInputs == nil {
+ return imports, NewImportedFunctionError(importName, fmt.Sprintf("Could not get the inputs for `%%s` in namespace `%s`", namespace))
+ }
+
+ wasmOutputs := cGetReturnsForImportFunc(wasmerImportFunc)
+
+ if wasmOutputs == nil {
+ return imports, NewImportedFunctionError(importName, fmt.Sprintf("Could not get the outputs for `%%s` in namespace `%s`", namespace))
+ }
+
+ imports.imports[importName] = ImportFunction{
+ nil,
+ unsafe.Pointer(wasmerImportFunc),
+ wasmerImportFunc,
+ wasmInputs,
+ wasmOutputs,
+ namespace,
+ }
+
+ return imports, nil
+}
+
+// Close closes/frees all imports. For the moment, only imported
+// functions must be freed. Imported memory must be freed manually by the
+// owner.
+func (imports *Imports) Close() {
+ for _, importImport := range imports.imports {
+ if importFunction, ok := importImport.(ImportFunction); ok {
+ if nil != importFunction.importedFunctionPointer {
+ cWasmerImportFuncDestroy(importFunction.importedFunctionPointer)
+ }
+ }
+ }
+}
+
+// Helper function: Get a C import for a given import
+func getCWasmerImport(importName string, importImport Import) *cWasmerImportT {
+ // Imported function.
+ if importFunction, ok := importImport.(ImportFunction); ok {
+ var wasmInputsArity = len(importFunction.wasmInputs)
+ var wasmOutputsArity = len(importFunction.wasmOutputs)
+
+ var importFunctionInputsCPointer *cWasmerValueTag
+ var importFunctionOutputsCPointer *cWasmerValueTag
+
+ if wasmInputsArity > 0 {
+ importFunctionInputsCPointer = (*cWasmerValueTag)(unsafe.Pointer(&importFunction.wasmInputs[0]))
+ }
+
+ if wasmOutputsArity > 0 {
+ importFunctionOutputsCPointer = (*cWasmerValueTag)(unsafe.Pointer(&importFunction.wasmOutputs[0]))
+ }
+
+ importFunction.importedFunctionPointer = cWasmerImportFuncNew(
+ importFunction.cgoPointer,
+ importFunctionInputsCPointer,
+ cUint(wasmInputsArity),
+ importFunctionOutputsCPointer,
+ cUint(wasmOutputsArity),
+ )
+ var newImport = cNewWasmerImportTFunction(
+ importFunction.namespace,
+ importName,
+ importFunction.importedFunctionPointer,
+ )
+
+ return &newImport
+ }
+
+ // Imported memory.
+ if importMemory, ok := importImport.(ImportMemory); ok {
+ var newImport = cNewWasmerImportTMemory(
+ importMemory.namespace,
+ importName,
+ importMemory.memory.memory,
+ )
+
+ return &newImport
+ }
+
+ return nil
+}
+
+// InstanceContext represents a way to access instance API from within
+// an imported context.
+type InstanceContext struct {
+ context *cWasmerInstanceContextT
+ memory Memory
+}
+
+// IntoInstanceContext casts the first `context unsafe.Pointer`
+// argument of an imported function into an `InstanceContext`.
+func IntoInstanceContext(instanceContext unsafe.Pointer) InstanceContext {
+ context := (*cWasmerInstanceContextT)(instanceContext)
+ memory := newBorrowedMemory(cWasmerInstanceContextMemory(context))
+
+ return InstanceContext{context, memory}
+}
+
+// Memory returns the current instance memory.
+func (instanceContext *InstanceContext) Memory() *Memory {
+ return &instanceContext.memory
+}
+
+// Data returns the instance context data as an `interface{}`. It's up to the
+// user to assert the proper type.
+func (instanceContext *InstanceContext) Data() interface{} {
+ contextDataIndex := *(*int)(cWasmerInstanceContextDataGet(instanceContext.context))
+
+ instancesContextDataMutex.RLock()
+ defer instancesContextDataMutex.RUnlock()
+
+ return instancesContextData[contextDataIndex]
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/instance.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/instance.go
new file mode 100644
index 00000000..280bdd34
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/instance.go
@@ -0,0 +1,476 @@
+package wasmer
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+// InstanceError represents any kind of errors related to a WebAssembly instance. It
+// is returned by `Instance` functions only.
+type InstanceError struct {
+ // Error message.
+ message string
+}
+
+// NewInstanceError constructs a new `InstanceError`.
+func NewInstanceError(message string) *InstanceError {
+ return &InstanceError{message}
+}
+
+// `InstanceError` is an actual error. The `Error` function returns
+// the error message.
+func (error *InstanceError) Error() string {
+ return error.message
+}
+
+// ExportedFunctionError represents any kind of errors related to a
+// WebAssembly exported function. It is returned by `Instance`
+// functions only.
+type ExportedFunctionError struct {
+ functionName string
+ message string
+}
+
+// NewExportedFunctionError constructs a new `ExportedFunctionError`,
+// where `functionName` is the name of the exported function, and
+// `message` is the error message. If the error message contains `%s`,
+// then this parameter will be replaced by `functionName`.
+func NewExportedFunctionError(functionName string, message string) *ExportedFunctionError {
+ return &ExportedFunctionError{functionName, message}
+}
+
+// ExportedFunctionError is an actual error. The `Error` function
+// returns the error message.
+func (error *ExportedFunctionError) Error() string {
+ return fmt.Sprintf(error.message, error.functionName)
+}
+
+// Instance represents a WebAssembly instance.
+type Instance struct {
+ // The underlying WebAssembly instance.
+ instance *cWasmerInstanceT
+
+ // The imported functions and memories. Use the
+ // `NewInstanceWithImports` constructor to set it.
+ imports *Imports
+
+ // All functions exported by the WebAssembly instance, indexed
+ // by their name as a string. An exported function is a
+ // regular variadic Go closure. Arguments are untyped. Since
+ // WebAssembly only supports: `i32`, `i64`, `f32` and `f64`
+ // types, the accepted Go types are: `int8`, `uint8`, `int16`,
+ // `uint16`, `int32`, `uint32`, `int64`, `int`, `uint`, `float32`
+ // and `float64`. In addition to those types, the `Value` type
+ // (from this project) is accepted. The conversion from a Go
+ // value to a WebAssembly value is done automatically except for
+ // the `Value` type (where type is coerced, that's the intent
+ // here). The WebAssembly type is automatically inferred. Note
+ // that the returned value is of kind `Value`, and not a
+ // standard Go type.
+ Exports map[string]func(...interface{}) (Value, error)
+
+ // The exported memory of a WebAssembly instance.
+ Memory *Memory
+
+ contextDataIndex *int
+}
+
+// NewInstance constructs a new `Instance` with no imports.
+func NewInstance(bytes []byte) (Instance, error) {
+ return NewInstanceWithImports(bytes, NewImports())
+}
+
+// NewInstanceWithImports constructs a new `Instance` with imports.
+func NewInstanceWithImports(bytes []byte, imports *Imports) (Instance, error) {
+ return newInstanceWithImports(
+ imports,
+ func(wasmImportsCPointer *cWasmerImportT, numberOfImports int) (*cWasmerInstanceT, error) {
+ var instance *cWasmerInstanceT
+
+ var compileResult = cWasmerInstantiate(
+ &instance,
+ (*cUchar)(unsafe.Pointer(&bytes[0])),
+ cUint(len(bytes)),
+ wasmImportsCPointer,
+ cInt(numberOfImports),
+ )
+
+ if compileResult != cWasmerOk {
+ var lastError, err = GetLastError()
+ var errorMessage = "Failed to instantiate the module:\n %s"
+
+ if err != nil {
+ errorMessage = fmt.Sprintf(errorMessage, "(unknown details)")
+ } else {
+ errorMessage = fmt.Sprintf(errorMessage, lastError)
+ }
+
+ return nil, NewInstanceError(errorMessage)
+ }
+
+ return instance, nil
+ },
+ )
+}
+
+func newInstanceWithImports(
+ imports *Imports,
+ instanceBuilder func(*cWasmerImportT, int) (*cWasmerInstanceT, error),
+) (Instance, error) {
+ var numberOfImports = len(imports.imports)
+ var wasmImports = make([]cWasmerImportT, numberOfImports)
+ var importNth = 0
+
+ for importName, importImport := range imports.imports {
+ wasmImports[importNth] = *getCWasmerImport(importName, importImport)
+ importNth++
+ }
+
+ var wasmImportsCPointer *cWasmerImportT
+
+ if numberOfImports > 0 {
+ wasmImportsCPointer = (*cWasmerImportT)(unsafe.Pointer(&wasmImports[0]))
+ }
+
+ instance, err := instanceBuilder(wasmImportsCPointer, numberOfImports)
+
+ var emptyInstance = Instance{instance: nil, imports: nil, Exports: nil, Memory: nil}
+
+ if err != nil {
+ return emptyInstance, err
+ }
+
+ exports, memoryPointer, err := getExportsFromInstance(instance)
+
+ if err != nil {
+ return emptyInstance, err
+ }
+
+ return Instance{instance: instance, imports: imports, Exports: exports, Memory: memoryPointer}, nil
+}
+
+// Returns the exports, whether it has memory or an error
+func getExportsFromInstance(
+ instance *cWasmerInstanceT,
+) (
+ map[string]func(...interface{}) (Value, error),
+ *Memory,
+ error,
+) {
+ var exports = make(map[string]func(...interface{}) (Value, error))
+ var wasmExports *cWasmerExportsT
+ var memoryPointer *Memory
+ cWasmerInstanceExports(instance, &wasmExports)
+ defer cWasmerExportsDestroy(wasmExports)
+
+ var numberOfExports = int(cWasmerExportsLen(wasmExports))
+
+ for nth := 0; nth < numberOfExports; nth++ {
+ var wasmExport = cWasmerExportsGet(wasmExports, cInt(nth))
+ var wasmExportKind = cWasmerExportKind(wasmExport)
+
+ switch wasmExportKind {
+ case cWasmMemory:
+ var wasmMemory *cWasmerMemoryT
+
+ if cWasmerExportToMemory(wasmExport, &wasmMemory) != cWasmerOk {
+ return nil, nil, NewInstanceError("Failed to extract the exported memory.")
+ }
+
+ var memory = newBorrowedMemory(wasmMemory)
+ memoryPointer = &memory
+
+ case cWasmFunction:
+ var wasmExportName = cWasmerExportName(wasmExport)
+ var exportedFunctionName = cGoStringN((*cChar)(unsafe.Pointer(wasmExportName.bytes)), (cInt)(wasmExportName.bytes_len))
+ var wasmFunction = cWasmerExportToFunc(wasmExport)
+ var wasmFunctionInputsArity cUint32T
+
+ if cWasmerExportFuncParamsArity(wasmFunction, &wasmFunctionInputsArity) != cWasmerOk {
+ return nil, nil, NewExportedFunctionError(exportedFunctionName, "Failed to read the input arity of the `%s` exported function.")
+ }
+
+ var wasmFunctionInputSignatures = make([]cWasmerValueTag, int(wasmFunctionInputsArity))
+
+ if wasmFunctionInputsArity > 0 {
+ var wasmFunctionInputSignaturesCPointer = (*cWasmerValueTag)(unsafe.Pointer(&wasmFunctionInputSignatures[0]))
+
+ if cWasmerExportFuncParams(wasmFunction, wasmFunctionInputSignaturesCPointer, wasmFunctionInputsArity) != cWasmerOk {
+ return nil, nil, NewExportedFunctionError(exportedFunctionName, "Failed to read the signature of the `%s` exported function.")
+ }
+ }
+
+ var wasmFunctionOutputsArity cUint32T
+
+ if cWasmerExportFuncResultsArity(wasmFunction, &wasmFunctionOutputsArity) != cWasmerOk {
+ return nil, nil, NewExportedFunctionError(exportedFunctionName, "Failed to read the output arity of the `%s` exported function.")
+ }
+
+ var numberOfExpectedArguments = int(wasmFunctionInputsArity)
+
+ var wasmInputs = make([]cWasmerValueT, wasmFunctionInputsArity)
+ var wasmOutputs = make([]cWasmerValueT, wasmFunctionOutputsArity)
+
+ type wasmFunctionNameHolder struct {
+ CPointer *cChar
+ }
+
+ wasmFunctionName := &wasmFunctionNameHolder{
+ CPointer: cCString(exportedFunctionName),
+ }
+
+ runtime.SetFinalizer(wasmFunctionName, func(h *wasmFunctionNameHolder) {
+ cFree(unsafe.Pointer(h.CPointer))
+ })
+
+ exports[exportedFunctionName] = func(arguments ...interface{}) (Value, error) {
+ var numberOfGivenArguments = len(arguments)
+ var diff = numberOfExpectedArguments - numberOfGivenArguments
+
+ if diff > 0 {
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Missing %d argument(s) when calling the `%%s` exported function; Expect %d argument(s), given %d.", diff, numberOfExpectedArguments, numberOfGivenArguments))
+ } else if diff < 0 {
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Given %d extra argument(s) when calling the `%%s` exported function; Expect %d argument(s), given %d.", -diff, numberOfExpectedArguments, numberOfGivenArguments))
+ }
+
+ for nth, value := range arguments {
+ var wasmInputType = wasmFunctionInputSignatures[nth]
+
+ switch wasmInputType {
+ case cWasmI32:
+ wasmInputs[nth].tag = cWasmI32
+ var pointer = (*int32)(unsafe.Pointer(&wasmInputs[nth].value))
+
+ switch value.(type) {
+ case int8:
+ *pointer = int32(value.(int8))
+ case uint8:
+ *pointer = int32(value.(uint8))
+ case int16:
+ *pointer = int32(value.(int16))
+ case uint16:
+ *pointer = int32(value.(uint16))
+ case int32:
+ *pointer = int32(value.(int32))
+ case int:
+ *pointer = int32(value.(int))
+ case uint:
+ *pointer = int32(value.(uint))
+ case Value:
+ var value = value.(Value)
+
+ if value.GetType() != TypeI32 {
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `i32`, cannot cast given value to this type.", nth+1))
+ }
+
+ *pointer = value.ToI32()
+ default:
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `i32`, cannot cast given value to this type.", nth+1))
+ }
+ case cWasmI64:
+ wasmInputs[nth].tag = cWasmI64
+ var pointer = (*int64)(unsafe.Pointer(&wasmInputs[nth].value))
+
+ switch value.(type) {
+ case int8:
+ *pointer = int64(value.(int8))
+ case uint8:
+ *pointer = int64(value.(uint8))
+ case int16:
+ *pointer = int64(value.(int16))
+ case uint16:
+ *pointer = int64(value.(uint16))
+ case int32:
+ *pointer = int64(value.(int32))
+ case uint32:
+ *pointer = int64(value.(uint32))
+ case int64:
+ *pointer = int64(value.(int64))
+ case int:
+ *pointer = int64(value.(int))
+ case uint:
+ *pointer = int64(value.(uint))
+ case Value:
+ var value = value.(Value)
+
+ if value.GetType() != TypeI64 {
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `i64`, cannot cast given value to this type.", nth+1))
+ }
+
+ *pointer = value.ToI64()
+ default:
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `i64`, cannot cast given value to this type.", nth+1))
+ }
+ case cWasmF32:
+ wasmInputs[nth].tag = cWasmF32
+ var pointer = (*float32)(unsafe.Pointer(&wasmInputs[nth].value))
+
+ switch value.(type) {
+ case float32:
+ *pointer = value.(float32)
+ case Value:
+ var value = value.(Value)
+
+ if value.GetType() != TypeF32 {
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `f32`, cannot cast given value to this type.", nth+1))
+ }
+
+ *pointer = value.ToF32()
+ default:
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `f32`, cannot cast given value to this type.", nth+1))
+ }
+ case cWasmF64:
+ wasmInputs[nth].tag = cWasmF64
+ var pointer = (*float64)(unsafe.Pointer(&wasmInputs[nth].value))
+
+ switch value.(type) {
+ case float32:
+ *pointer = float64(value.(float32))
+ case float64:
+ *pointer = value.(float64)
+ case Value:
+ var value = value.(Value)
+
+ if value.GetType() != TypeF64 {
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `f64`, cannot cast given value to this type.", nth+1))
+ }
+
+ *pointer = value.ToF64()
+ default:
+ return I32(0), NewExportedFunctionError(exportedFunctionName, fmt.Sprintf("Argument #%d of the `%%s` exported function must be of type `f64`, cannot cast given value to this type.", nth+1))
+ }
+ default:
+ panic(fmt.Sprintf("Invalid arguments type when calling the `%s` exported function.", exportedFunctionName))
+ }
+ }
+
+ var wasmInputsCPointer *cWasmerValueT
+
+ if wasmFunctionInputsArity > 0 {
+ wasmInputsCPointer = (*cWasmerValueT)(unsafe.Pointer(&wasmInputs[0]))
+ } else {
+ wasmInputsCPointer = (*cWasmerValueT)(unsafe.Pointer(&wasmInputs))
+ }
+
+ var wasmOutputsCPointer *cWasmerValueT
+
+ if wasmFunctionOutputsArity > 0 {
+ wasmOutputsCPointer = (*cWasmerValueT)(unsafe.Pointer(&wasmOutputs[0]))
+ } else {
+ wasmOutputsCPointer = (*cWasmerValueT)(unsafe.Pointer(&wasmOutputs))
+ }
+
+ var callResult = cWasmerInstanceCall(
+ instance,
+ wasmFunctionName.CPointer,
+ wasmInputsCPointer,
+ wasmFunctionInputsArity,
+ wasmOutputsCPointer,
+ wasmFunctionOutputsArity,
+ )
+
+ if callResult != cWasmerOk {
+ return I32(0), NewExportedFunctionError(exportedFunctionName, "Failed to call the `%s` exported function.")
+ }
+
+ if wasmFunctionOutputsArity > 0 {
+ var result = wasmOutputs[0]
+
+ switch result.tag {
+ case cWasmI32:
+ pointer := (*int32)(unsafe.Pointer(&result.value))
+
+ return I32(*pointer), nil
+ case cWasmI64:
+ pointer := (*int64)(unsafe.Pointer(&result.value))
+
+ return I64(*pointer), nil
+ case cWasmF32:
+ pointer := (*float32)(unsafe.Pointer(&result.value))
+
+ return F32(*pointer), nil
+ case cWasmF64:
+ pointer := (*float64)(unsafe.Pointer(&result.value))
+
+ return F64(*pointer), nil
+ default:
+ panic("unreachable")
+ }
+ } else {
+ return void(), nil
+ }
+ }
+ }
+ }
+ return exports, memoryPointer, nil
+}
+
+// HasMemory checks whether the instance has at least one exported memory.
+func (instance *Instance) HasMemory() bool {
+ return nil != instance.Memory
+}
+
+var (
+
+ // In order to avoid passing illegal Go pointers across the
+ // CGo FFI, store Instance Context Data in instanceContextData
+ // and simply pass the index through the FFI instead.
+ //
+ // See `Instance.SetContextData` and `InstanceContext.Data`.
+ instancesContextData = make(map[int]interface{})
+ nextContextDataIndex int
+ instancesContextDataMutex sync.RWMutex
+)
+
+// SetContextData assigns a data that can be used by all imported functions.
+// Each imported function receives as its first argument an instance context
+// (see `InstanceContext`). An instance context can hold any kind of data,
+// including data that contain Go references such as slices, maps, or structs
+// with reference types or pointers. It is important to understand that data is
+// global to the instance, and thus is shared by all imported functions.
+func (instance *Instance) SetContextData(data interface{}) {
+ instancesContextDataMutex.Lock()
+
+ if instance.contextDataIndex == nil {
+ instance.contextDataIndex = new(int)
+ *instance.contextDataIndex = nextContextDataIndex
+ nextContextDataIndex++
+
+ // When instance is garbage-collected, clean up its
+ // `instanceContextData`. Set the finalizer on the
+ // unexported `instance.contextDataIndex`, instead of
+ // directly on the instance, to allow users of this
+ // package to set their own finalizer on the `Instance`
+ // for other reasons.
+ runtime.SetFinalizer(instance.contextDataIndex, func(index *int) {
+ // Launch a goroutine to avoid blocking other
+ // finalizers while waiting for the mutex lock.
+ go func() {
+ instancesContextDataMutex.Lock()
+ delete(instancesContextData, *index)
+ instancesContextDataMutex.Unlock()
+ }()
+ })
+ }
+
+ instancesContextData[*instance.contextDataIndex] = data
+ instancesContextDataMutex.Unlock()
+
+ cWasmerInstanceContextDataSet(
+ instance.instance,
+ unsafe.Pointer(instance.contextDataIndex),
+ )
+}
+
+// Close closes/frees an `Instance`.
+func (instance *Instance) Close() {
+ if instance.imports != nil {
+ instance.imports.Close()
+ }
+
+ if instance.instance != nil {
+ cWasmerInstanceDestroy(instance.instance)
+ }
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/libwasmer.dylib b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/libwasmer.dylib
new file mode 100644
index 00000000..b7a0e496
Binary files /dev/null and b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/libwasmer.dylib differ
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/libwasmer.so b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/libwasmer.so
new file mode 100644
index 00000000..6990f9ac
Binary files /dev/null and b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/libwasmer.so differ
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/memory.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/memory.go
new file mode 100644
index 00000000..ed4598d2
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/memory.go
@@ -0,0 +1,129 @@
+package wasmer
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// MemoryError represents any kind of errors related to a WebAssembly memory. It
+// is returned by `Memory` functions only.
+type MemoryError struct {
+ // Error message.
+ message string
+}
+
+// NewMemoryError constructs a new `MemoryError`.
+func NewMemoryError(message string) *MemoryError {
+ return &MemoryError{message}
+}
+
+// `MemoryError` is an actual error. The `Error` function returns
+// the error message.
+func (error *MemoryError) Error() string {
+ return error.message
+}
+
+// Memory represents a WebAssembly memory. To read and write data,
+// please see the `Data` function. The memory can be owned or
+// borrowed. It is only possible to create an owned memory from the
+// user-land.
+type Memory struct {
+ memory *cWasmerMemoryT
+
+ // If set to true, the memory can be freed.
+ owned bool
+}
+
+// NewMemory instantiates a new owned WebAssembly memory, bound for
+// imported memory.
+func NewMemory(min, max uint32) (*Memory, error) {
+ var memory Memory
+
+ memory.owned = true
+ newResult := cWasmerMemoryNew(&memory.memory, cUint32T(min), cUint32T(max))
+
+ if newResult != cWasmerOk {
+ var lastError, err = GetLastError()
+ var errorMessage = "Failed to allocate the memory:\n %s"
+
+ if err != nil {
+ errorMessage = fmt.Sprintf(errorMessage, "(unknown details)")
+ } else {
+ errorMessage = fmt.Sprintf(errorMessage, lastError)
+ }
+
+ return nil, NewMemoryError(errorMessage)
+ }
+
+ return &memory, nil
+}
+
+// Creates a new WebAssembly borrowed memory.
+func newBorrowedMemory(memory *cWasmerMemoryT) Memory {
+ return Memory{memory, false}
+}
+
+// IsOwned checks whether the memory is owned, or borrowed.
+func (memory *Memory) IsOwned() bool {
+ return memory.owned
+}
+
+// Length calculates the memory length (in bytes).
+func (memory *Memory) Length() uint32 {
+ if nil == memory.memory {
+ return 0
+ }
+
+ return uint32(cWasmerMemoryDataLength(memory.memory))
+}
+
+// Data returns a slice of bytes over the WebAssembly memory.
+func (memory *Memory) Data() []byte {
+ if nil == memory.memory {
+ return make([]byte, 0)
+ }
+
+ var length = memory.Length()
+ var data = (*uint8)(cWasmerMemoryData(memory.memory))
+
+ var header reflect.SliceHeader
+ header = *(*reflect.SliceHeader)(unsafe.Pointer(&header))
+
+ header.Data = uintptr(unsafe.Pointer(data))
+ header.Len = int(length)
+ header.Cap = int(length)
+
+ return *(*[]byte)(unsafe.Pointer(&header))
+}
+
+// Grow the memory by a number of pages (65kb each).
+func (memory *Memory) Grow(numberOfPages uint32) error {
+ if nil == memory.memory {
+ return nil
+ }
+
+ var growResult = cWasmerMemoryGrow(memory.memory, cUint32T(numberOfPages))
+
+ if growResult != cWasmerOk {
+ var lastError, err = GetLastError()
+ var errorMessage = "Failed to grow the memory:\n %s"
+
+ if err != nil {
+ errorMessage = fmt.Sprintf(errorMessage, "(unknown details)")
+ } else {
+ errorMessage = fmt.Sprintf(errorMessage, lastError)
+ }
+
+ return NewMemoryError(errorMessage)
+ }
+
+ return nil
+}
+
+// Close closes/frees memory allocated at the NewMemory at time.
+func (memory *Memory) Close() {
+ if memory.IsOwned() {
+ cWasmerMemoryDestroy(memory.memory)
+ }
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/module.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/module.go
new file mode 100644
index 00000000..15533f4f
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/module.go
@@ -0,0 +1,285 @@
+package wasmer
+
+import (
+ "fmt"
+ "io/ioutil"
+ "unsafe"
+)
+
+// ReadBytes reads a `.wasm` file and returns its content as an array of bytes.
+func ReadBytes(filename string) ([]byte, error) {
+ return ioutil.ReadFile(filename)
+}
+
+// Validate validates a sequence of bytes that is supposed to represent a valid
+// WebAssembly module.
+func Validate(bytes []byte) bool {
+ return true == cWasmerValidate((*cUchar)(unsafe.Pointer(&bytes[0])), cUint(len(bytes)))
+}
+
+// ModuleError represents any kind of errors related to a WebAssembly
+// module.
+type ModuleError struct {
+ // Error message.
+ message string
+}
+
+// NewModuleError constructs a new `ModuleError`.
+func NewModuleError(message string) *ModuleError {
+ return &ModuleError{message}
+}
+
+// `ModuleError` is an actual error. The `Error` function returns the
+// error message.
+func (error *ModuleError) Error() string {
+ return error.message
+}
+
+// ExportDescriptor represents an export descriptor of a WebAssembly
+// module. It is different of an export of a WebAssembly instance. An
+// export descriptor only has a name and a kind/type.
+type ExportDescriptor struct {
+ // The export name.
+ Name string
+
+ // The export kind/type.
+ Kind ImportExportKind
+}
+
+// ImportExportKind represents an import/export descriptor kind/type.
+type ImportExportKind int
+
+const (
+ // ImportExportKindFunction represents an import/export descriptor of kind function.
+ ImportExportKindFunction = ImportExportKind(cWasmFunction)
+
+ // ImportExportKindGlobal represents an import/export descriptor of kind global.
+ ImportExportKindGlobal = ImportExportKind(cWasmGlobal)
+
+ // ImportExportKindMemory represents an import/export descriptor of kind memory.
+ ImportExportKindMemory = ImportExportKind(cWasmMemory)
+
+ // ImportExportKindTable represents an import/export descriptor of kind table.
+ ImportExportKindTable = ImportExportKind(cWasmTable)
+)
+
+// ImportDescriptor represents an import descriptor of a WebAssembly
+// module. It is different of an import of a WebAssembly instance. An
+// import descriptor only has a name, a namespace, and a kind/type.
+type ImportDescriptor struct {
+ // The import name.
+ Name string
+
+ // The import namespace.
+ Namespace string
+
+ // The import kind/type.
+ Kind ImportExportKind
+}
+
+// Module represents a WebAssembly module.
+type Module struct {
+ module *cWasmerModuleT
+ Exports []ExportDescriptor
+ Imports []ImportDescriptor
+}
+
+// Compile compiles a WebAssembly module from bytes.
+func Compile(bytes []byte) (Module, error) {
+ var module *cWasmerModuleT
+
+ var compileResult = cWasmerCompile(
+ &module,
+ (*cUchar)(unsafe.Pointer(&bytes[0])),
+ cUint(len(bytes)),
+ )
+
+ var emptyModule = Module{module: nil}
+
+ if compileResult != cWasmerOk {
+ return emptyModule, NewModuleError("Failed to compile the module.")
+ }
+
+ var exports = moduleExports(module)
+ var imports = moduleImports(module)
+
+ return Module{module, exports, imports}, nil
+}
+
+func moduleExports(module *cWasmerModuleT) []ExportDescriptor {
+ var exportDescriptors *cWasmerExportDescriptorsT
+ cWasmerExportDescriptors(module, &exportDescriptors)
+ defer cWasmerExportDescriptorsDestroy(exportDescriptors)
+
+ var numberOfExportDescriptors = int(cWasmerExportDescriptorsLen(exportDescriptors))
+ var exports = make([]ExportDescriptor, numberOfExportDescriptors)
+
+ for nth := 0; nth < numberOfExportDescriptors; nth++ {
+ var exportDescriptor = cWasmerExportDescriptorsGet(exportDescriptors, cInt(nth))
+ var exportKind = cWasmerExportDescriptorKind(exportDescriptor)
+ var wasmExportName = cWasmerExportDescriptorName(exportDescriptor)
+ var exportName = cGoStringN((*cChar)(unsafe.Pointer(wasmExportName.bytes)), (cInt)(wasmExportName.bytes_len))
+
+ exports[nth] = ExportDescriptor{
+ Name: exportName,
+ Kind: ImportExportKind(exportKind),
+ }
+ }
+
+ return exports
+}
+
+func moduleImports(module *cWasmerModuleT) []ImportDescriptor {
+ var importDescriptors *cWasmerImportDescriptorsT
+ cWasmerImportDescriptors(module, &importDescriptors)
+ defer cWasmerImportDescriptorsDestroy(importDescriptors)
+
+ var numberOfImportDescriptors = int(cWasmerImportDescriptorsLen(importDescriptors))
+ var imports = make([]ImportDescriptor, numberOfImportDescriptors)
+
+ for nth := 0; nth < numberOfImportDescriptors; nth++ {
+ var importDescriptor = cWasmerImportDescriptorsGet(importDescriptors, cInt(nth))
+ var importKind = cWasmerImportDescriptorKind(importDescriptor)
+ var wasmImportName = cWasmerImportDescriptorName(importDescriptor)
+ var importName = cGoStringN((*cChar)(unsafe.Pointer(wasmImportName.bytes)), (cInt)(wasmImportName.bytes_len))
+ var wasmImportNamespace = cWasmerImportDescriptorModuleName(importDescriptor)
+ var importNamespace = cGoStringN((*cChar)(unsafe.Pointer(wasmImportNamespace.bytes)), (cInt)(wasmImportNamespace.bytes_len))
+
+ imports[nth] = ImportDescriptor{
+ Name: importName,
+ Namespace: importNamespace,
+ Kind: ImportExportKind(importKind),
+ }
+ }
+
+ return imports
+}
+
+// Instantiate creates a new instance of the WebAssembly module.
+func (module *Module) Instantiate() (Instance, error) {
+ return module.InstantiateWithImports(NewImports())
+}
+
+// InstantiateWithImports creates a new instance with imports of the WebAssembly module.
+func (module *Module) InstantiateWithImports(imports *Imports) (Instance, error) {
+ return newInstanceWithImports(
+ imports,
+ func(wasmImportsCPointer *cWasmerImportT, numberOfImports int) (*cWasmerInstanceT, error) {
+ var instance *cWasmerInstanceT
+
+ var instantiateResult = cWasmerModuleInstantiate(
+ module.module,
+ &instance,
+ wasmImportsCPointer,
+ cInt(numberOfImports),
+ )
+
+ if instantiateResult != cWasmerOk {
+ var lastError, err = GetLastError()
+ var errorMessage = "Failed to instantiate the module:\n %s"
+
+ if err != nil {
+ errorMessage = fmt.Sprintf(errorMessage, "(unknown details)")
+ } else {
+ errorMessage = fmt.Sprintf(errorMessage, lastError)
+ }
+
+ return nil, NewModuleError(errorMessage)
+ }
+
+ return instance, nil
+ },
+ )
+}
+
+// InstantiateWithImportObject creates a new instance of a WebAssembly module with an
+// `ImportObject`
+func (module *Module) InstantiateWithImportObject(importObject *ImportObject) (Instance, error) {
+ var instance *cWasmerInstanceT
+ var emptyInstance = Instance{instance: nil, imports: nil, Exports: nil, Memory: nil}
+
+ var instantiateResult = cWasmerModuleImportInstantiate(&instance, module.module, importObject.inner)
+
+ if instantiateResult != cWasmerOk {
+ var lastError, err = GetLastError()
+ var errorMessage = "Failed to instantiate the module:\n %s"
+
+ if err != nil {
+ errorMessage = fmt.Sprintf(errorMessage, "(unknown details)")
+ } else {
+ errorMessage = fmt.Sprintf(errorMessage, lastError)
+ }
+
+ return emptyInstance, NewModuleError(errorMessage)
+ }
+
+ exports, memoryPointer, err := getExportsFromInstance(instance)
+
+ if err != nil {
+ return emptyInstance, err
+ }
+
+ imports, err := importObject.Imports()
+
+ if err != nil {
+ return emptyInstance, NewModuleError(fmt.Sprintf("Could not get imports from ImportObject: %s", err))
+ }
+
+ return Instance{instance: instance, imports: imports, Exports: exports, Memory: memoryPointer}, nil
+}
+
+// Serialize serializes the current module into a sequence of
+// bytes. Those bytes can be deserialized into a module with
+// `DeserializeModule`.
+func (module *Module) Serialize() ([]byte, error) {
+ var serializedModule *cWasmerSerializedModuleT
+ var serializeResult = cWasmerModuleSerialize(&serializedModule, module.module)
+ defer cWasmerSerializedModuleDestroy(serializedModule)
+
+ if serializeResult != cWasmerOk {
+ return nil, NewModuleError("Failed to serialize the module.")
+ }
+
+ return cWasmerSerializedModuleBytes(serializedModule), nil
+}
+
+// DeserializeModule deserializes a sequence of bytes into a
+// module. Ideally, those bytes must come from `Module.Serialize`.
+func DeserializeModule(serializedModuleBytes []byte) (Module, error) {
+ var emptyModule = Module{module: nil}
+
+ if len(serializedModuleBytes) < 1 {
+ return emptyModule, NewModuleError("Serialized module bytes are empty.")
+ }
+
+ var serializedModule *cWasmerSerializedModuleT
+ var deserializeBytesResult = cWasmerSerializedModuleFromBytes(
+ &serializedModule,
+ (*cUint8T)(unsafe.Pointer(&serializedModuleBytes[0])),
+ cInt(len(serializedModuleBytes)),
+ )
+ defer cWasmerSerializedModuleDestroy(serializedModule)
+
+ if deserializeBytesResult != cWasmerOk {
+ return emptyModule, NewModuleError("Failed to reconstitute the serialized module from the given bytes.")
+ }
+
+ var module *cWasmerModuleT
+ var deserializeResult = cWasmerModuleDeserialize(&module, serializedModule)
+
+ if deserializeResult != cWasmerOk {
+ return emptyModule, NewModuleError("Failed to deserialize the module.")
+ }
+
+ var exports = moduleExports(module)
+ var imports = moduleImports(module)
+
+ return Module{module, exports, imports}, nil
+}
+
+// Close closes/frees a `Module`.
+func (module *Module) Close() {
+ if module.module != nil {
+ cWasmerModuleDestroy(module.module)
+ }
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/value.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/value.go
new file mode 100644
index 00000000..635fdec8
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/value.go
@@ -0,0 +1,131 @@
+package wasmer
+
+import (
+ "fmt"
+ "math"
+)
+
+// ValueType represents the `Value` type.
+type ValueType int
+
+const (
+ // TypeI32 represents the WebAssembly `i32` type.
+ TypeI32 ValueType = iota
+
+ // TypeI64 represents the WebAssembly `i64` type.
+ TypeI64
+
+ // TypeF32 represents the WebAssembly `f32` type.
+ TypeF32
+
+ // TypeF64 represents the WebAssembly `f64` type.
+ TypeF64
+
+ // TypeVoid represents nothing.
+ // WebAssembly doesn't have “void” type, but it is introduced
+ // here to represent the returned value of a WebAssembly exported
+ // function that returns nothing.
+ TypeVoid
+)
+
+// Value represents a WebAssembly value of a particular type.
+type Value struct {
+ // The WebAssembly value (as bits).
+ value uint64
+
+ // The WebAssembly value type.
+ ty ValueType
+}
+
+// I32 constructs a WebAssembly value of type `i32`.
+func I32(value int32) Value {
+ return Value{
+ value: uint64(value),
+ ty: TypeI32,
+ }
+}
+
+// I64 constructs a WebAssembly value of type `i64`.
+func I64(value int64) Value {
+ return Value{
+ value: uint64(value),
+ ty: TypeI64,
+ }
+}
+
+// F32 constructs a WebAssembly value of type `f32`.
+func F32(value float32) Value {
+ return Value{
+ value: uint64(math.Float32bits(value)),
+ ty: TypeF32,
+ }
+}
+
+// F64 constructs a WebAssembly value of type `f64`.
+func F64(value float64) Value {
+ return Value{
+ value: math.Float64bits(value),
+ ty: TypeF64,
+ }
+}
+
+// void constructs an empty WebAssembly value.
+func void() Value {
+ return Value{
+ value: 0,
+ ty: TypeVoid,
+ }
+}
+
+// GetType gets the type of the WebAssembly value.
+func (value Value) GetType() ValueType {
+ return value.ty
+}
+
+// ToI32 reads the WebAssembly value bits as an `int32`. The WebAssembly
+// value type is ignored.
+func (value Value) ToI32() int32 {
+ return int32(value.value)
+}
+
+// ToI64 reads the WebAssembly value bits as an `int64`. The WebAssembly
+// value type is ignored.
+func (value Value) ToI64() int64 {
+ return int64(value.value)
+}
+
+// ToF32 reads the WebAssembly value bits as a `float32`. The WebAssembly
+// value type is ignored.
+func (value Value) ToF32() float32 {
+ return math.Float32frombits(uint32(value.value))
+}
+
+// ToF64 reads the WebAssembly value bits as a `float64`. The WebAssembly
+// value type is ignored.
+func (value Value) ToF64() float64 {
+ return math.Float64frombits(value.value)
+}
+
+// ToVoid reads the WebAssembly value bits as a `nil`. The WebAssembly
+// value type is ignored.
+func (value Value) ToVoid() interface{} {
+ return nil
+}
+
+// String formats the WebAssembly value as a Go string.
+func (value Value) String() string {
+ switch value.ty {
+ case TypeI32:
+ return fmt.Sprintf("%d", value.ToI32())
+ case TypeI64:
+ return fmt.Sprintf("%d", value.ToI64())
+ case TypeF32:
+ return fmt.Sprintf("%f", value.ToF32())
+ case TypeF64:
+ return fmt.Sprintf("%f", value.ToF64())
+ case TypeVoid:
+ return "void"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasi.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasi.go
new file mode 100644
index 00000000..58a0282a
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasi.go
@@ -0,0 +1,121 @@
+package wasmer
+
+import (
+ "unsafe"
+)
+
+// WasiVersion represents the WASI version.
+type WasiVersion uint
+
+const (
+ // Unknown represents an unknown WASI version.
+ Unknown = WasiVersion(cVersionUnknown)
+
+ // Latest represents the latest WASI version.
+ Latest = WasiVersion(cVersionSnapshot0)
+
+ // Snapshot0 represents the `wasi_unstable` WASI version.
+ Snapshot0 = WasiVersion(cVersionSnapshot0)
+
+ // Snapshot1 represents the `wasi_snapshot1_preview` WASI version.
+ Snapshot1 = WasiVersion(cVersionSnapshot1)
+)
+
+// MapDirEntry is an entry that can be passed to `NewWasiImportObject`.
+// Preopens a file for the WASI module but renames it to the given name
+type MapDirEntry struct {
+ alias string
+ hostPath string
+}
+
+// NewDefaultWasiImportObject constructs a new `ImportObject`
+// with WASI host imports.
+//
+// To specify WASI program arguments, environment variables,
+// preopened directories, and more, see `NewWasiImportObject`
+func NewDefaultWasiImportObject() *ImportObject {
+ return NewDefaultWasiImportObjectForVersion(Latest)
+}
+
+// NewDefaultWasiImportObjectForVersion is similar to
+// `NewDefaultWasiImportObject` but it specifies the WASI version.
+func NewDefaultWasiImportObjectForVersion(version WasiVersion) *ImportObject {
+ var inner = cNewWasmerWasiImportObjectForVersion((uint)(version), nil, 0, nil, 0, nil, 0, nil, 0)
+
+ return &ImportObject{inner}
+}
+
+// NewWasiImportObject creates an `ImportObject` with the default WASI imports.
+// Specify arguments (the first is the program name),
+// environment variables ("envvar=value"), preopened directories
+// (host file paths), and mapped directories (host file paths with an
+// alias, see `MapDirEntry`)
+func NewWasiImportObject(
+ arguments []string,
+ environmentVariables []string,
+ preopenedDirs []string,
+ mappedDirs []MapDirEntry,
+) *ImportObject {
+ return NewWasiImportObjectForVersion(
+ Latest,
+ arguments,
+ environmentVariables,
+ preopenedDirs,
+ mappedDirs,
+ )
+}
+
+// NewWasiImportObjectForVersion is similar to `NewWasiImportObject`
+// but it specifies the WASI version.
+func NewWasiImportObjectForVersion(
+ version WasiVersion,
+ arguments []string,
+ environmentVariables []string,
+ preopenedDirs []string,
+ mappedDirs []MapDirEntry,
+) *ImportObject {
+ var argumentsBytes = []cWasmerByteArray{}
+
+ for _, argument := range arguments {
+ argumentsBytes = append(argumentsBytes, cGoStringToWasmerByteArray(argument))
+ }
+
+ var environmentVariablesBytes = []cWasmerByteArray{}
+
+ for _, env := range environmentVariables {
+ environmentVariablesBytes = append(environmentVariablesBytes, cGoStringToWasmerByteArray(env))
+ }
+
+ var preopenedDirsBytes = []cWasmerByteArray{}
+
+ for _, preopenedDir := range preopenedDirs {
+ preopenedDirsBytes = append(preopenedDirsBytes, cGoStringToWasmerByteArray(preopenedDir))
+ }
+ var mappedDirsBytes = []cWasmerWasiMapDirEntryT{}
+
+ for _, mappedDir := range mappedDirs {
+ var wasiMappedDir = cAliasAndHostPathToWasiDirEntry(mappedDir.alias, mappedDir.hostPath)
+ mappedDirsBytes = append(mappedDirsBytes, wasiMappedDir)
+ }
+
+ var inner = cNewWasmerWasiImportObject(
+ (*cWasmerByteArray)(unsafe.Pointer(&argumentsBytes)),
+ (uint)(len(argumentsBytes)),
+ (*cWasmerByteArray)(unsafe.Pointer(&environmentVariablesBytes)),
+ (uint)(len(environmentVariablesBytes)),
+ (*cWasmerByteArray)(unsafe.Pointer(&preopenedDirsBytes)),
+ (uint)(len(preopenedDirsBytes)),
+ (*cWasmerWasiMapDirEntryT)(unsafe.Pointer(&mappedDirsBytes)),
+ (uint)(len(mappedDirsBytes)),
+ )
+
+ return &ImportObject{inner}
+}
+
+// WasiGetVersion returns the WASI version of a module if any, other
+// `Unknown` is returned.
+func WasiGetVersion(module Module) WasiVersion {
+ return (WasiVersion)(cWasmerWasiGetVersion(
+ module.module,
+ ))
+}
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.dll b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.dll
new file mode 100644
index 00000000..cbf828ef
Binary files /dev/null and b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.dll differ
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.go b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.go
new file mode 100644
index 00000000..5c97d807
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.go
@@ -0,0 +1,2 @@
+// Package wasmer is a Go library to run WebAssembly binaries.
+package wasmer
diff --git a/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.h b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.h
new file mode 100644
index 00000000..94b2fbb3
--- /dev/null
+++ b/vendor/github.com/wasmerio/go-ext-wasm/wasmer/wasmer.h
@@ -0,0 +1,1534 @@
+
+#if !defined(WASMER_H_MACROS)
+
+#define WASMER_H_MACROS
+
+// Define the `ARCH_X86_X64` constant.
+#if defined(MSVC) && defined(_M_AMD64)
+# define ARCH_X86_64
+#elif (defined(GCC) || defined(__GNUC__) || defined(__clang__)) && defined(__x86_64__)
+# define ARCH_X86_64
+#endif
+
+// Compatibility with non-Clang compilers.
+#if !defined(__has_attribute)
+# define __has_attribute(x) 0
+#endif
+
+// Compatibility with non-Clang compilers.
+#if !defined(__has_declspec_attribute)
+# define __has_declspec_attribute(x) 0
+#endif
+
+// Define the `DEPRECATED` macro.
+#if defined(GCC) || defined(__GNUC__) || __has_attribute(deprecated)
+# define DEPRECATED(message) __attribute__((deprecated(message)))
+#elif defined(MSVC) || __has_declspec_attribute(deprecated)
+# define DEPRECATED(message) __declspec(deprecated(message))
+#endif
+
+#define WASMER_WASI_ENABLED
+#endif // WASMER_H_MACROS
+
+
+#ifndef WASMER_H
+#define WASMER_H
+
+#include
+#include
+#include
+#include
+
+#if defined(WASMER_WASI_ENABLED)
+enum Version {
+ /**
+ * Version cannot be detected or is unknown.
+ */
+ Unknown = 0,
+ /**
+ * Latest version. See `wasmer_wasi::WasiVersion::Latest` to
+ * learn more.
+ */
+ Latest = 1,
+ /**
+ * `wasi_unstable`.
+ */
+ Snapshot0 = 2,
+ /**
+ * `wasi_snapshot_preview1`.
+ */
+ Snapshot1 = 3,
+};
+typedef uint8_t Version;
+#endif
+
+/**
+ * List of export/import kinds.
+ */
+enum wasmer_import_export_kind {
+ /**
+ * The export/import is a function.
+ */
+ WASM_FUNCTION = 0,
+ /**
+ * The export/import is a global.
+ */
+ WASM_GLOBAL = 1,
+ /**
+ * The export/import is a memory.
+ */
+ WASM_MEMORY = 2,
+ /**
+ * The export/import is a table.
+ */
+ WASM_TABLE = 3,
+};
+typedef uint32_t wasmer_import_export_kind;
+
+/**
+ * The `wasmer_result_t` enum is a type that represents either a
+ * success, or a failure.
+ */
+typedef enum {
+ /**
+ * Represents a success.
+ */
+ WASMER_OK = 1,
+ /**
+ * Represents a failure.
+ */
+ WASMER_ERROR = 2,
+} wasmer_result_t;
+
+/**
+ * Represents all possibles WebAssembly value types.
+ *
+ * See `wasmer_value_t` to get a complete example.
+ */
+enum wasmer_value_tag {
+ /**
+ * Represents the `i32` WebAssembly type.
+ */
+ WASM_I32,
+ /**
+ * Represents the `i64` WebAssembly type.
+ */
+ WASM_I64,
+ /**
+ * Represents the `f32` WebAssembly type.
+ */
+ WASM_F32,
+ /**
+ * Represents the `f64` WebAssembly type.
+ */
+ WASM_F64,
+};
+typedef uint32_t wasmer_value_tag;
+
+typedef struct {
+
+} wasmer_module_t;
+
+/**
+ * Opaque pointer to a `wasmer_runtime::Instance` value in Rust.
+ *
+ * A `wasmer_runtime::Instance` represents a WebAssembly instance. It
+ * is generally generated by the `wasmer_instantiate()` function, or by
+ * the `wasmer_module_instantiate()` function for the most common paths.
+ */
+typedef struct {
+
+} wasmer_instance_t;
+
+typedef struct {
+ const uint8_t *bytes;
+ uint32_t bytes_len;
+} wasmer_byte_array;
+
+#if defined(WASMER_EMSCRIPTEN_ENABLED)
+/**
+ * Type used to construct an import_object_t with Emscripten imports.
+ */
+typedef struct {
+
+} wasmer_emscripten_globals_t;
+#endif
+
+typedef struct {
+
+} wasmer_import_object_t;
+
+/**
+ * Opaque pointer to `NamedExportDescriptor`.
+ */
+typedef struct {
+
+} wasmer_export_descriptor_t;
+
+/**
+ * Opaque pointer to `NamedExportDescriptors`.
+ */
+typedef struct {
+
+} wasmer_export_descriptors_t;
+
+/**
+ * Opaque pointer to `wasmer_export_t`.
+ */
+typedef struct {
+
+} wasmer_export_func_t;
+
+/**
+ * Represents a WebAssembly value.
+ *
+ * This is a [Rust union][rust-union], which is equivalent to the C
+ * union. See `wasmer_value_t` to get a complete example.
+ *
+ * [rust-union]: https://doc.rust-lang.org/reference/items/unions.html
+ */
+typedef union {
+ int32_t I32;
+ int64_t I64;
+ float F32;
+ double F64;
+} wasmer_value;
+
+/**
+ * Represents a WebAssembly type and value pair,
+ * i.e. `wasmer_value_tag` and `wasmer_value`. Since the latter is an
+ * union, it's the safe way to read or write a WebAssembly value in
+ * C.
+ *
+ * Example:
+ *
+ * ```c
+ * // Create a WebAssembly value.
+ * wasmer_value_t wasm_value = {
+ * .tag = WASM_I32,
+ * .value.I32 = 42,
+ * };
+ *
+ * // Read a WebAssembly value.
+ * if (wasm_value.tag == WASM_I32) {
+ * int32_t x = wasm_value.value.I32;
+ * // …
+ * }
+ * ```
+ */
+typedef struct {
+ /**
+ * The value type.
+ */
+ wasmer_value_tag tag;
+ /**
+ * The value.
+ */
+ wasmer_value value;
+} wasmer_value_t;
+
+/**
+ * Opaque pointer to `NamedExport`.
+ */
+typedef struct {
+
+} wasmer_export_t;
+
+/**
+ * Opaque pointer to a `wasmer_runtime::Memory` value in Rust.
+ *
+ * A `wasmer_runtime::Memory` represents a WebAssembly memory. It is
+ * possible to create one with `wasmer_memory_new()` and pass it as
+ * imports of an instance, or to read it from exports of an instance
+ * with `wasmer_export_to_memory()`.
+ */
+typedef struct {
+
+} wasmer_memory_t;
+
+/**
+ * Opaque pointer to the opaque structure `crate::NamedExports`,
+ * which is a wrapper around a vector of the opaque structure
+ * `crate::NamedExport`.
+ *
+ * Check the `wasmer_instance_exports()` function to learn more.
+ */
+typedef struct {
+
+} wasmer_exports_t;
+
+typedef struct {
+
+} wasmer_global_t;
+
+typedef struct {
+ bool mutable_;
+ wasmer_value_tag kind;
+} wasmer_global_descriptor_t;
+
+typedef struct {
+
+} wasmer_import_descriptor_t;
+
+typedef struct {
+
+} wasmer_import_descriptors_t;
+
+typedef struct {
+
+} wasmer_import_func_t;
+
+typedef struct {
+
+} wasmer_table_t;
+
+/**
+ * Union of import/export value.
+ */
+typedef union {
+ const wasmer_import_func_t *func;
+ const wasmer_table_t *table;
+ const wasmer_memory_t *memory;
+ const wasmer_global_t *global;
+} wasmer_import_export_value;
+
+typedef struct {
+ wasmer_byte_array module_name;
+ wasmer_byte_array import_name;
+ wasmer_import_export_kind tag;
+ wasmer_import_export_value value;
+} wasmer_import_t;
+
+typedef struct {
+
+} wasmer_import_object_iter_t;
+
+/**
+ * Opaque pointer to a `wasmer_runtime::Ctx` value in Rust.
+ *
+ * An instance context is passed to any host function (aka imported
+ * function) as the first argument. It is necessary to read the
+ * instance data or the memory, respectively with the
+ * `wasmer_instance_context_data_get()` function, and the
+ * `wasmer_instance_context_memory()` function.
+ *
+ * It is also possible to get the instance context outside a host
+ * function by using the `wasmer_instance_context_get()`
+ * function. See also `wasmer_instance_context_data_set()` to set the
+ * instance context data.
+ *
+ * Example:
+ *
+ * ```c
+ * // A host function that prints data from the WebAssembly memory to
+ * // the standard output.
+ * void print(wasmer_instance_context_t *context, int32_t pointer, int32_t length) {
+ * // Use `wasmer_instance_context` to get back the first instance memory.
+ * const wasmer_memory_t *memory = wasmer_instance_context_memory(context, 0);
+ *
+ * // Continue…
+ * }
+ * ```
+ */
+typedef struct {
+
+} wasmer_instance_context_t;
+
+/**
+ * The `wasmer_limit_option_t` struct represents an optional limit
+ * for `wasmer_limits_t`.
+ */
+typedef struct {
+ /**
+ * Whether the limit is set.
+ */
+ bool has_some;
+ /**
+ * The limit value.
+ */
+ uint32_t some;
+} wasmer_limit_option_t;
+
+/**
+ * The `wasmer_limits_t` struct is a type that describes a memory
+ * options. See the `wasmer_memory_t` struct or the
+ * `wasmer_memory_new()` function to get more information.
+ */
+typedef struct {
+ /**
+ * The minimum number of allowed pages.
+ */
+ uint32_t min;
+ /**
+ * The maximum number of allowed pages.
+ */
+ wasmer_limit_option_t max;
+} wasmer_limits_t;
+
+typedef struct {
+
+} wasmer_serialized_module_t;
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+typedef struct {
+
+} wasmer_trampoline_buffer_builder_t;
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+typedef struct {
+
+} wasmer_trampoline_callable_t;
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+typedef struct {
+
+} wasmer_trampoline_buffer_t;
+#endif
+
+#if defined(WASMER_WASI_ENABLED)
+/**
+ * Opens a directory that's visible to the WASI module as `alias` but
+ * is backed by the host file at `host_file_path`
+ */
+typedef struct {
+ /**
+ * What the WASI module will see in its virtual root
+ */
+ wasmer_byte_array alias;
+ /**
+ * The backing file that the WASI module will interact with via the alias
+ */
+ wasmer_byte_array host_file_path;
+} wasmer_wasi_map_dir_entry_t;
+#endif
+
+/**
+ * Creates a new Module from the given wasm bytes.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_compile(wasmer_module_t **module,
+ uint8_t *wasm_bytes,
+ uint32_t wasm_bytes_len);
+
+#if defined(WASMER_EMSCRIPTEN_ENABLED)
+/**
+ * Convenience function for setting up arguments and calling the Emscripten
+ * main function.
+ *
+ * WARNING:
+ *
+ * Do not call this function on untrusted code when operating without
+ * additional sandboxing in place.
+ * Emscripten has access to many host system calls and therefore may do very
+ * bad things.
+ */
+wasmer_result_t wasmer_emscripten_call_main(wasmer_instance_t *instance,
+ const wasmer_byte_array *args,
+ unsigned int args_len);
+#endif
+
+#if defined(WASMER_EMSCRIPTEN_ENABLED)
+/**
+ * Destroy `wasmer_emscrpten_globals_t` created by
+ * `wasmer_emscripten_get_emscripten_globals`.
+ */
+void wasmer_emscripten_destroy_globals(wasmer_emscripten_globals_t *globals);
+#endif
+
+#if defined(WASMER_EMSCRIPTEN_ENABLED)
+/**
+ * Create a `wasmer_import_object_t` with Emscripten imports, use
+ * `wasmer_emscripten_get_emscripten_globals` to get a
+ * `wasmer_emscripten_globals_t` from a `wasmer_module_t`.
+ *
+ * WARNING:
+ *
+ * This `import_object_t` contains thin-wrappers around host system calls.
+ * Do not use this to execute untrusted code without additional sandboxing.
+ */
+wasmer_import_object_t *wasmer_emscripten_generate_import_object(wasmer_emscripten_globals_t *globals);
+#endif
+
+#if defined(WASMER_EMSCRIPTEN_ENABLED)
+/**
+ * Create a `wasmer_emscripten_globals_t` from a Wasm module.
+ */
+wasmer_emscripten_globals_t *wasmer_emscripten_get_globals(const wasmer_module_t *module);
+#endif
+
+#if defined(WASMER_EMSCRIPTEN_ENABLED)
+/**
+ * Execute global constructors (required if the module is compiled from C++)
+ * and sets up the internal environment.
+ *
+ * This function sets the data pointer in the same way that
+ * [`wasmer_instance_context_data_set`] does.
+ */
+wasmer_result_t wasmer_emscripten_set_up(wasmer_instance_t *instance,
+ wasmer_emscripten_globals_t *globals);
+#endif
+
+/**
+ * Gets export descriptor kind
+ */
+wasmer_import_export_kind wasmer_export_descriptor_kind(wasmer_export_descriptor_t *export_);
+
+/**
+ * Gets name for the export descriptor
+ */
+wasmer_byte_array wasmer_export_descriptor_name(wasmer_export_descriptor_t *export_descriptor);
+
+/**
+ * Gets export descriptors for the given module
+ *
+ * The caller owns the object and should call `wasmer_export_descriptors_destroy` to free it.
+ */
+void wasmer_export_descriptors(const wasmer_module_t *module,
+ wasmer_export_descriptors_t **export_descriptors);
+
+/**
+ * Frees the memory for the given export descriptors
+ */
+void wasmer_export_descriptors_destroy(wasmer_export_descriptors_t *export_descriptors);
+
+/**
+ * Gets export descriptor by index
+ */
+wasmer_export_descriptor_t *wasmer_export_descriptors_get(wasmer_export_descriptors_t *export_descriptors,
+ int idx);
+
+/**
+ * Gets the length of the export descriptors
+ */
+int wasmer_export_descriptors_len(wasmer_export_descriptors_t *exports);
+
+/**
+ * Calls a `func` with the provided parameters.
+ * Results are set using the provided `results` pointer.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_export_func_call(const wasmer_export_func_t *func,
+ const wasmer_value_t *params,
+ unsigned int params_len,
+ wasmer_value_t *results,
+ unsigned int results_len);
+
+/**
+ * Sets the params buffer to the parameter types of the given wasmer_export_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_export_func_params(const wasmer_export_func_t *func,
+ wasmer_value_tag *params,
+ uint32_t params_len);
+
+/**
+ * Sets the result parameter to the arity of the params of the wasmer_export_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_export_func_params_arity(const wasmer_export_func_t *func, uint32_t *result);
+
+/**
+ * Sets the returns buffer to the parameter types of the given wasmer_export_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_export_func_returns(const wasmer_export_func_t *func,
+ wasmer_value_tag *returns,
+ uint32_t returns_len);
+
+/**
+ * Sets the result parameter to the arity of the returns of the wasmer_export_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_export_func_returns_arity(const wasmer_export_func_t *func,
+ uint32_t *result);
+
+/**
+ * Gets wasmer_export kind
+ */
+wasmer_import_export_kind wasmer_export_kind(wasmer_export_t *export_);
+
+/**
+ * Gets name from wasmer_export
+ */
+wasmer_byte_array wasmer_export_name(wasmer_export_t *export_);
+
+/**
+ * Gets export func from export
+ */
+const wasmer_export_func_t *wasmer_export_to_func(const wasmer_export_t *export_);
+
+/**
+ * Gets a memory pointer from an export pointer.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_export_to_memory(const wasmer_export_t *export_, wasmer_memory_t **memory);
+
+/**
+ * Frees the memory for the given exports.
+ *
+ * Check the `wasmer_instance_exports()` function to get a complete
+ * example.
+ *
+ * If `exports` is a null pointer, this function does nothing.
+ *
+ * Example:
+ *
+ * ```c
+ * // Get some exports.
+ * wasmer_exports_t *exports = NULL;
+ * wasmer_instance_exports(instance, &exports);
+ *
+ * // Destroy the exports.
+ * wasmer_exports_destroy(exports);
+ * ```
+ */
+void wasmer_exports_destroy(wasmer_exports_t *exports);
+
+/**
+ * Gets wasmer_export by index
+ */
+wasmer_export_t *wasmer_exports_get(wasmer_exports_t *exports, int idx);
+
+/**
+ * Gets the length of the exports
+ */
+int wasmer_exports_len(wasmer_exports_t *exports);
+
+/**
+ * Frees memory for the given Global
+ */
+void wasmer_global_destroy(wasmer_global_t *global);
+
+/**
+ * Gets the value stored by the given Global
+ */
+wasmer_value_t wasmer_global_get(wasmer_global_t *global);
+
+/**
+ * Returns a descriptor (type, mutability) of the given Global
+ */
+wasmer_global_descriptor_t wasmer_global_get_descriptor(wasmer_global_t *global);
+
+/**
+ * Creates a new Global and returns a pointer to it.
+ * The caller owns the object and should call `wasmer_global_destroy` to free it.
+ */
+wasmer_global_t *wasmer_global_new(wasmer_value_t value, bool mutable_);
+
+/**
+ * Sets the value stored by the given Global
+ */
+void wasmer_global_set(wasmer_global_t *global, wasmer_value_t value);
+
+/**
+ * Gets export descriptor kind
+ */
+wasmer_import_export_kind wasmer_import_descriptor_kind(wasmer_import_descriptor_t *export_);
+
+/**
+ * Gets module name for the import descriptor
+ */
+wasmer_byte_array wasmer_import_descriptor_module_name(wasmer_import_descriptor_t *import_descriptor);
+
+/**
+ * Gets name for the import descriptor
+ */
+wasmer_byte_array wasmer_import_descriptor_name(wasmer_import_descriptor_t *import_descriptor);
+
+/**
+ * Gets import descriptors for the given module
+ *
+ * The caller owns the object and should call `wasmer_import_descriptors_destroy` to free it.
+ */
+void wasmer_import_descriptors(const wasmer_module_t *module,
+ wasmer_import_descriptors_t **import_descriptors);
+
+/**
+ * Frees the memory for the given import descriptors
+ */
+void wasmer_import_descriptors_destroy(wasmer_import_descriptors_t *import_descriptors);
+
+/**
+ * Gets import descriptor by index
+ */
+wasmer_import_descriptor_t *wasmer_import_descriptors_get(wasmer_import_descriptors_t *import_descriptors,
+ unsigned int idx);
+
+/**
+ * Gets the length of the import descriptors
+ */
+unsigned int wasmer_import_descriptors_len(wasmer_import_descriptors_t *exports);
+
+/**
+ * Frees memory for the given Func
+ */
+void wasmer_import_func_destroy(wasmer_import_func_t *func);
+
+/**
+ * Creates new host function, aka imported function. `func` is a
+ * function pointer, where the first argument is the famous `vm::Ctx`
+ * (in Rust), or `wasmer_instance_context_t` (in C). All arguments
+ * must be typed with compatible WebAssembly native types:
+ *
+ * | WebAssembly type | C/C++ type |
+ * | ---------------- | ---------- |
+ * | `i32` | `int32_t` |
+ * | `i64` | `int64_t` |
+ * | `f32` | `float` |
+ * | `f64` | `double` |
+ *
+ * The function pointer must have a lifetime greater than the
+ * WebAssembly instance lifetime.
+ *
+ * The caller owns the object and should call
+ * `wasmer_import_func_destroy` to free it.
+ */
+wasmer_import_func_t *wasmer_import_func_new(void (*func)(void *data),
+ const wasmer_value_tag *params,
+ unsigned int params_len,
+ const wasmer_value_tag *returns,
+ unsigned int returns_len);
+
+/**
+ * Sets the params buffer to the parameter types of the given wasmer_import_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_import_func_params(const wasmer_import_func_t *func,
+ wasmer_value_tag *params,
+ unsigned int params_len);
+
+/**
+ * Sets the result parameter to the arity of the params of the wasmer_import_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_import_func_params_arity(const wasmer_import_func_t *func, uint32_t *result);
+
+/**
+ * Sets the returns buffer to the parameter types of the given wasmer_import_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_import_func_returns(const wasmer_import_func_t *func,
+ wasmer_value_tag *returns,
+ unsigned int returns_len);
+
+/**
+ * Sets the result parameter to the arity of the returns of the wasmer_import_func_t
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_import_func_returns_arity(const wasmer_import_func_t *func,
+ uint32_t *result);
+
+/**
+ * Frees memory of the given ImportObject
+ */
+void wasmer_import_object_destroy(wasmer_import_object_t *import_object);
+
+/**
+ * Extends an existing import object with new imports
+ */
+wasmer_result_t wasmer_import_object_extend(wasmer_import_object_t *import_object,
+ const wasmer_import_t *imports,
+ unsigned int imports_len);
+
+/**
+ * Gets an entry from an ImportObject at the name and namespace.
+ * Stores `name`, `namespace`, and `import_export_value` in `import`.
+ * Thus these must remain valid for the lifetime of `import`.
+ *
+ * The caller owns all data involved.
+ * `import_export_value` will be written to based on `tag`.
+ */
+wasmer_result_t wasmer_import_object_get_import(const wasmer_import_object_t *import_object,
+ wasmer_byte_array namespace_,
+ wasmer_byte_array name,
+ wasmer_import_t *import,
+ wasmer_import_export_value *import_export_value,
+ uint32_t tag);
+
+/**
+ * Frees the memory allocated in `wasmer_import_object_iter_next`
+ *
+ * This function does not free the memory in `wasmer_import_object_t`;
+ * it only frees memory allocated while querying a `wasmer_import_object_t`.
+ */
+void wasmer_import_object_imports_destroy(wasmer_import_t *imports, uint32_t imports_len);
+
+/**
+ * Returns true if further calls to `wasmer_import_object_iter_next` will
+ * not return any new data
+ */
+bool wasmer_import_object_iter_at_end(wasmer_import_object_iter_t *import_object_iter);
+
+/**
+ * Frees the memory allocated by `wasmer_import_object_iterate_functions`
+ */
+void wasmer_import_object_iter_destroy(wasmer_import_object_iter_t *import_object_iter);
+
+/**
+ * Writes the next value to `import`. `WASMER_ERROR` is returned if there
+ * was an error or there's nothing left to return.
+ *
+ * To free the memory allocated here, pass the import to `wasmer_import_object_imports_destroy`.
+ * To check if the iterator is done, use `wasmer_import_object_iter_at_end`.
+ */
+wasmer_result_t wasmer_import_object_iter_next(wasmer_import_object_iter_t *import_object_iter,
+ wasmer_import_t *import);
+
+/**
+ * Create an iterator over the functions in the import object.
+ * Get the next import with `wasmer_import_object_iter_next`
+ * Free the iterator with `wasmer_import_object_iter_destroy`
+ */
+wasmer_import_object_iter_t *wasmer_import_object_iterate_functions(const wasmer_import_object_t *import_object);
+
+/**
+ * Creates a new empty import object.
+ * See also `wasmer_import_object_append`
+ */
+wasmer_import_object_t *wasmer_import_object_new(void);
+
+/**
+ * Calls an exported function of a WebAssembly instance by `name`
+ * with the provided parameters. The exported function results are
+ * stored on the provided `results` pointer.
+ *
+ * This function returns `wasmer_result_t::WASMER_OK` upon success,
+ * `wasmer_result_t::WASMER_ERROR` otherwise. You can use
+ * `wasmer_last_error_message()` to get the generated error message.
+ *
+ * Potential errors are the following:
+ *
+ * * `instance` is a null pointer,
+ * * `name` is a null pointer,
+ * * `params` is a null pointer.
+ *
+ * Example of calling an exported function that needs two parameters, and returns one value:
+ *
+ * ```c
+ * // First argument.
+ * wasmer_value_t argument_one = {
+ * .tag = WASM_I32,
+ * .value.I32 = 3,
+ * };
+ *
+ * // Second argument.
+ * wasmer_value_t argument_two = {
+ * .tag = WASM_I32,
+ * .value.I32 = 4,
+ * };
+ *
+ * // First result.
+ * wasmer_value_t result_one;
+ *
+ * // All arguments and results.
+ * wasmer_value_t arguments[] = {argument_one, argument_two};
+ * wasmer_value_t results[] = {result_one};
+ *
+ * wasmer_result_t call_result = wasmer_instance_call(
+ * instance, // instance pointer
+ * "sum", // the exported function name
+ * arguments, // the arguments
+ * 2, // the number of arguments
+ * results, // the results
+ * 1 // the number of results
+ * );
+ *
+ * if (call_result == WASMER_OK) {
+ * printf("Result is: %d\n", results[0].value.I32);
+ * }
+ * ```
+ */
+wasmer_result_t wasmer_instance_call(wasmer_instance_t *instance,
+ const char *name,
+ const wasmer_value_t *params,
+ uint32_t params_len,
+ wasmer_value_t *results,
+ uint32_t results_len);
+
+/**
+ * Gets the data that can be hold by an instance.
+ *
+ * This function is complementary of
+ * `wasmer_instance_context_data_set()`. Please read its
+ * documentation. You can also read the documentation of
+ * `wasmer_instance_context_t` to get other examples.
+ *
+ * This function returns nothing if `ctx` is a null pointer.
+ */
+void *wasmer_instance_context_data_get(const wasmer_instance_context_t *ctx);
+
+/**
+ * Sets the data that can be hold by an instance context.
+ *
+ * An instance context (represented by the opaque
+ * `wasmer_instance_context_t` structure) can hold user-defined
+ * data. This function sets the data. This function is complementary
+ * of `wasmer_instance_context_data_get()`.
+ *
+ * This function does nothing if `instance` is a null pointer.
+ *
+ * Example:
+ *
+ * ```c
+ * // Define your own data.
+ * typedef struct {
+ * // …
+ * } my_data;
+ *
+ * // Allocate them and set them on the given instance.
+ * my_data *data = malloc(sizeof(my_data));
+ * data->… = …;
+ * wasmer_instance_context_data_set(instance, (void*) my_data);
+ *
+ * // You can read your data.
+ * {
+ * my_data *data = (my_data*) wasmer_instance_context_data_get(wasmer_instance_context_get(instance));
+ * // …
+ * }
+ * ```
+ */
+void wasmer_instance_context_data_set(wasmer_instance_t *instance,
+ void *data_ptr);
+
+/**
+ * Returns the instance context. Learn more by looking at the
+ * `wasmer_instance_context_t` struct.
+ *
+ * This function returns `null` if `instance` is a null pointer.
+ *
+ * Example:
+ *
+ * ```c
+ * const wasmer_instance_context_get *context = wasmer_instance_context_get(instance);
+ * my_data *data = (my_data *) wasmer_instance_context_data_get(context);
+ * // Do something with `my_data`.
+ * ```
+ *
+ * It is often useful with `wasmer_instance_context_data_set()`.
+ */
+const wasmer_instance_context_t *wasmer_instance_context_get(wasmer_instance_t *instance);
+
+/**
+ * Gets the `memory_idx`th memory of the instance.
+ *
+ * Note that the index is always `0` until multiple memories are supported.
+ *
+ * This function is mostly used inside host functions (aka imported
+ * functions) to read the instance memory.
+ *
+ * Example of a _host function_ that reads and prints a string based on a pointer and a length:
+ *
+ * ```c
+ * void print_string(const wasmer_instance_context_t *context, int32_t pointer, int32_t length) {
+ * // Get the 0th memory.
+ * const wasmer_memory_t *memory = wasmer_instance_context_memory(context, 0);
+ *
+ * // Get the memory data as a pointer.
+ * uint8_t *memory_bytes = wasmer_memory_data(memory);
+ *
+ * // Print what we assumed to be a string!
+ * printf("%.*s", length, memory_bytes + pointer);
+ * }
+ * ```
+ */
+const wasmer_memory_t *wasmer_instance_context_memory(const wasmer_instance_context_t *ctx,
+ uint32_t _memory_idx);
+
+/**
+ * Frees memory for the given `wasmer_instance_t`.
+ *
+ * Check the `wasmer_instantiate()` function to get a complete
+ * example.
+ *
+ * If `instance` is a null pointer, this function does nothing.
+ *
+ * Example:
+ *
+ * ```c
+ * // Get an instance.
+ * wasmer_instance_t *instance = NULL;
+ * wasmer_instantiate(&instance, bytes, bytes_length, imports, 0);
+ *
+ * // Destroy the instance.
+ * wasmer_instance_destroy(instance);
+ * ```
+ */
+void wasmer_instance_destroy(wasmer_instance_t *instance);
+
+/**
+ * Gets all the exports of the given WebAssembly instance.
+ *
+ * This function stores a Rust vector of exports into `exports` as an
+ * opaque pointer of kind `wasmer_exports_t`.
+ *
+ * As is, you can do anything with `exports` except using the
+ * companion functions, like `wasmer_exports_len()`,
+ * `wasmer_exports_get()` or `wasmer_export_kind()`. See the example below.
+ *
+ * **Warning**: The caller owns the object and should call
+ * `wasmer_exports_destroy()` to free it.
+ *
+ * Example:
+ *
+ * ```c
+ * // Get the exports.
+ * wasmer_exports_t *exports = NULL;
+ * wasmer_instance_exports(instance, &exports);
+ *
+ * // Get the number of exports.
+ * int exports_length = wasmer_exports_len(exports);
+ * printf("Number of exports: %d\n", exports_length);
+ *
+ * // Read the first export.
+ * wasmer_export_t *export = wasmer_exports_get(exports, 0);
+ *
+ * // Get the kind of the export.
+ * wasmer_import_export_kind export_kind = wasmer_export_kind(export);
+ *
+ * // Assert it is a function (why not).
+ * assert(export_kind == WASM_FUNCTION);
+ *
+ * // Read the export name.
+ * wasmer_byte_array name_bytes = wasmer_export_name(export);
+ *
+ * assert(name_bytes.bytes_len == sizeof("sum") - 1);
+ * assert(memcmp(name_bytes.bytes, "sum", sizeof("sum") - 1) == 0);
+ *
+ * // Destroy the exports.
+ * wasmer_exports_destroy(exports);
+ * ```
+ */
+void wasmer_instance_exports(wasmer_instance_t *instance, wasmer_exports_t **exports);
+
+/**
+ * Creates a new WebAssembly instance from the given bytes and imports.
+ *
+ * The result is stored in the first argument `instance` if
+ * successful, i.e. when the function returns
+ * `wasmer_result_t::WASMER_OK`. Otherwise
+ * `wasmer_result_t::WASMER_ERROR` is returned, and
+ * `wasmer_last_error_length()` with `wasmer_last_error_message()` must
+ * be used to read the error message.
+ *
+ * The caller is responsible to free the instance with
+ * `wasmer_instance_destroy()`.
+ *
+ * Example:
+ *
+ * ```c
+ * // 1. Read a WebAssembly module from a file.
+ * FILE *file = fopen("sum.wasm", "r");
+ * fseek(file, 0, SEEK_END);
+ * long bytes_length = ftell(file);
+ * uint8_t *bytes = malloc(bytes_length);
+ * fseek(file, 0, SEEK_SET);
+ * fread(bytes, 1, bytes_length, file);
+ * fclose(file);
+ *
+ * // 2. Declare the imports (here, none).
+ * wasmer_import_t imports[] = {};
+ *
+ * // 3. Instantiate the WebAssembly module.
+ * wasmer_instance_t *instance = NULL;
+ * wasmer_result_t result = wasmer_instantiate(&instance, bytes, bytes_length, imports, 0);
+ *
+ * // 4. Check for errors.
+ * if (result != WASMER_OK) {
+ * int error_length = wasmer_last_error_length();
+ * char *error = malloc(error_length);
+ * wasmer_last_error_message(error, error_length);
+ * // Do something with `error`…
+ * }
+ *
+ * // 5. Free the memory!
+ * wasmer_instance_destroy(instance);
+ * ```
+ */
+wasmer_result_t wasmer_instantiate(wasmer_instance_t **instance,
+ uint8_t *wasm_bytes,
+ uint32_t wasm_bytes_len,
+ wasmer_import_t *imports,
+ int imports_len);
+
+/**
+ * Gets the length in bytes of the last error if any.
+ *
+ * This can be used to dynamically allocate a buffer with the correct number of
+ * bytes needed to store a message.
+ *
+ * See `wasmer_last_error_message()` to get a full example.
+ */
+int wasmer_last_error_length(void);
+
+/**
+ * Gets the last error message if any into the provided buffer
+ * `buffer` up to the given `length`.
+ *
+ * The `length` parameter must be large enough to store the last
+ * error message. Ideally, the value should come from
+ * `wasmer_last_error_length()`.
+ *
+ * The function returns the length of the string in bytes, `-1` if an
+ * error occurs. Potential errors are:
+ *
+ * * The buffer is a null pointer,
+ * * The buffer is too smal to hold the error message.
+ *
+ * Note: The error message always has a trailing null character.
+ *
+ * Example:
+ *
+ * ```c
+ * int error_length = wasmer_last_error_length();
+ *
+ * if (error_length > 0) {
+ * char *error_message = malloc(error_length);
+ * wasmer_last_error_message(error_message, error_length);
+ * printf("Error message: `%s`\n", error_message);
+ * } else {
+ * printf("No error message\n");
+ * }
+ * ```
+ */
+int wasmer_last_error_message(char *buffer, int length);
+
+/**
+ * Gets a pointer to the beginning of the contiguous memory data
+ * bytes.
+ *
+ * The function returns `NULL` if `memory` is a null pointer.
+ *
+ * Note that when the memory grows, it can be reallocated, and thus
+ * the returned pointer can be invalidated.
+ *
+ * Example:
+ *
+ * ```c
+ * uint8_t *memory_data = wasmer_memory_data(memory);
+ * char *str = (char*) malloc(sizeof(char) * 7);
+ *
+ * for (uint32_t nth = 0; nth < 7; ++nth) {
+ * str[nth] = (char) memory_data[nth];
+ * }
+ * ```
+ */
+uint8_t *wasmer_memory_data(const wasmer_memory_t *memory);
+
+/**
+ * Gets the size in bytes of the memory data.
+ *
+ * This function returns 0 if `memory` is a null pointer.
+ *
+ * Example:
+ *
+ * ```c
+ * uint32_t memory_data_length = wasmer_memory_data_length(memory);
+ * ```
+ */
+uint32_t wasmer_memory_data_length(wasmer_memory_t *memory);
+
+/**
+ * Frees memory for the given `wasmer_memory_t`.
+ *
+ * Check the `wasmer_memory_new()` function to get a complete
+ * example.
+ *
+ * If `memory` is a null pointer, this function does nothing.
+ *
+ * Example:
+ *
+ * ```c
+ * // Get a memory.
+ * wasmer_memory_t *memory = NULL;
+ * wasmer_result_t result = wasmer_memory_new(&memory, memory_descriptor);
+ *
+ * // Destroy the memory.
+ * wasmer_memory_destroy(memory);
+ * ```
+ */
+void wasmer_memory_destroy(wasmer_memory_t *memory);
+
+/**
+ * Grows a memory by the given number of pages (of 65Kb each).
+ *
+ * The functions return `wasmer_result_t::WASMER_OK` upon success,
+ * `wasmer_result_t::WASMER_ERROR` otherwise. Use
+ * `wasmer_last_error_length()` with `wasmer_last_error_message()` to
+ * read the error message.
+ *
+ * Example:
+ *
+ * ```c
+ * wasmer_result_t result = wasmer_memory_grow(memory, 10);
+ *
+ * if (result != WASMER_OK) {
+ * // …
+ * }
+ * ```
+ */
+wasmer_result_t wasmer_memory_grow(wasmer_memory_t *memory, uint32_t delta);
+
+/**
+ * Reads the current length (in pages) of the given memory.
+ *
+ * The function returns zero if `memory` is a null pointer.
+ *
+ * Example:
+ *
+ * ```c
+ * uint32_t memory_length = wasmer_memory_length(memory);
+ *
+ * printf("Memory pages length: %d\n", memory_length);
+ * ```
+ */
+uint32_t wasmer_memory_length(const wasmer_memory_t *memory);
+
+/**
+ * Creates a new empty WebAssembly memory for the given descriptor.
+ *
+ * The result is stored in the first argument `memory` if successful,
+ * i.e. when the function returns
+ * `wasmer_result_t::WASMER_OK`. Otherwise,
+ * `wasmer_result_t::WASMER_ERROR` is returned, and
+ * `wasmer_last_error_length()` with `wasmer_last_error_message()`
+ * must be used to read the error message.
+ *
+ * The caller owns the memory and is responsible to free it with
+ * `wasmer_memory_destroy()`.
+ *
+ * Example:
+ *
+ * ```c
+ * // 1. The memory object.
+ * wasmer_memory_t *memory = NULL;
+ *
+ * // 2. The memory descriptor.
+ * wasmer_limits_t memory_descriptor = {
+ * .min = 10,
+ * .max = {
+ * .has_some = true,
+ * .some = 15,
+ * },
+ * };
+ *
+ * // 3. Initialize the memory.
+ * wasmer_result_t result = wasmer_memory_new(&memory, memory_descriptor);
+ *
+ * if (result != WASMER_OK) {
+ * int error_length = wasmer_last_error_length();
+ * char *error = malloc(error_length);
+ * wasmer_last_error_message(error, error_length);
+ * // Do something with `error`…
+ * }
+ *
+ * // 4. Free the memory!
+ * wasmer_memory_destroy(memory);
+ * ```
+ */
+wasmer_result_t wasmer_memory_new(wasmer_memory_t **memory, wasmer_limits_t limits);
+
+/**
+ * Deserialize the given serialized module.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_module_deserialize(wasmer_module_t **module,
+ const wasmer_serialized_module_t *serialized_module);
+
+/**
+ * Frees memory for the given Module
+ */
+void wasmer_module_destroy(wasmer_module_t *module);
+
+/**
+ * Given:
+ * * A prepared `wasmer` import-object
+ * * A compiled wasmer module
+ *
+ * Instantiates a wasmer instance
+ */
+wasmer_result_t wasmer_module_import_instantiate(wasmer_instance_t **instance,
+ const wasmer_module_t *module,
+ const wasmer_import_object_t *import_object);
+
+/**
+ * Creates a new Instance from the given module and imports.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_module_instantiate(const wasmer_module_t *module,
+ wasmer_instance_t **instance,
+ wasmer_import_t *imports,
+ int imports_len);
+
+/**
+ * Serialize the given Module.
+ *
+ * The caller owns the object and should call `wasmer_serialized_module_destroy` to free it.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_module_serialize(wasmer_serialized_module_t **serialized_module,
+ const wasmer_module_t *module);
+
+/**
+ * Get bytes of the serialized module.
+ */
+wasmer_byte_array wasmer_serialized_module_bytes(const wasmer_serialized_module_t *serialized_module);
+
+/**
+ * Frees memory for the given serialized Module.
+ */
+void wasmer_serialized_module_destroy(wasmer_serialized_module_t *serialized_module);
+
+/**
+ * Transform a sequence of bytes into a serialized module.
+ *
+ * The caller owns the object and should call `wasmer_serialized_module_destroy` to free it.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_serialized_module_from_bytes(wasmer_serialized_module_t **serialized_module,
+ const uint8_t *serialized_module_bytes,
+ uint32_t serialized_module_bytes_length);
+
+/**
+ * Frees memory for the given Table
+ */
+void wasmer_table_destroy(wasmer_table_t *table);
+
+/**
+ * Grows a Table by the given number of elements.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_table_grow(wasmer_table_t *table, uint32_t delta);
+
+/**
+ * Returns the current length of the given Table
+ */
+uint32_t wasmer_table_length(wasmer_table_t *table);
+
+/**
+ * Creates a new Table for the given descriptor and initializes the given
+ * pointer to pointer to a pointer to the new Table.
+ *
+ * The caller owns the object and should call `wasmer_table_destroy` to free it.
+ *
+ * Returns `wasmer_result_t::WASMER_OK` upon success.
+ *
+ * Returns `wasmer_result_t::WASMER_ERROR` upon failure. Use `wasmer_last_error_length`
+ * and `wasmer_last_error_message` to get an error message.
+ */
+wasmer_result_t wasmer_table_new(wasmer_table_t **table, wasmer_limits_t limits);
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+/**
+ * Adds a callinfo trampoline to the builder.
+ */
+uintptr_t wasmer_trampoline_buffer_builder_add_callinfo_trampoline(wasmer_trampoline_buffer_builder_t *builder,
+ const wasmer_trampoline_callable_t *func,
+ const void *ctx,
+ uint32_t num_params);
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+/**
+ * Adds a context trampoline to the builder.
+ */
+uintptr_t wasmer_trampoline_buffer_builder_add_context_trampoline(wasmer_trampoline_buffer_builder_t *builder,
+ const wasmer_trampoline_callable_t *func,
+ const void *ctx);
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+/**
+ * Finalizes the trampoline builder into an executable buffer.
+ */
+wasmer_trampoline_buffer_t *wasmer_trampoline_buffer_builder_build(wasmer_trampoline_buffer_builder_t *builder);
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+/**
+ * Creates a new trampoline builder.
+ */
+wasmer_trampoline_buffer_builder_t *wasmer_trampoline_buffer_builder_new(void);
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+/**
+ * Destroys the trampoline buffer if not null.
+ */
+void wasmer_trampoline_buffer_destroy(wasmer_trampoline_buffer_t *buffer);
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+/**
+ * Returns the callable pointer for the trampoline with index `idx`.
+ */
+const wasmer_trampoline_callable_t *wasmer_trampoline_buffer_get_trampoline(const wasmer_trampoline_buffer_t *buffer,
+ uintptr_t idx);
+#endif
+
+#if (!defined(_WIN32) && defined(ARCH_X86_64))
+/**
+ * Returns the context added by `add_context_trampoline`, from within the callee function.
+ */
+void *wasmer_trampoline_get_context(void);
+#endif
+
+/**
+ * Stop the execution of a host function, aka imported function. The
+ * function must be used _only_ inside a host function.
+ *
+ * The pointer to `wasmer_instance_context_t` is received by the host
+ * function as its first argument. Just passing it to `ctx` is fine.
+ *
+ * The error message must have a greater lifetime than the host
+ * function itself since the error is read outside the host function
+ * with `wasmer_last_error_message`.
+ *
+ * This function returns `wasmer_result_t::WASMER_ERROR` if `ctx` or
+ * `error_message` are null.
+ *
+ * This function never returns otherwise.
+ */
+wasmer_result_t wasmer_trap(const wasmer_instance_context_t *ctx, const char *error_message);
+
+/**
+ * Validates a sequence of bytes hoping it represents a valid WebAssembly module.
+ *
+ * The function returns true if the bytes are valid, false otherwise.
+ *
+ * Example:
+ *
+ * ```c
+ * bool result = wasmer_validate(bytes, bytes_length);
+ *
+ * if (false == result) {
+ * // Do something…
+ * }
+ * ```
+ */
+bool wasmer_validate(const uint8_t *wasm_bytes, uint32_t wasm_bytes_len);
+
+#if defined(WASMER_WASI_ENABLED)
+/**
+ * Convenience function that creates a WASI import object with no arguments,
+ * environment variables, preopened files, or mapped directories.
+ *
+ * This function is the same as calling [`wasmer_wasi_generate_import_object`] with all
+ * empty values.
+ */
+wasmer_import_object_t *wasmer_wasi_generate_default_import_object(void);
+#endif
+
+#if defined(WASMER_WASI_ENABLED)
+/**
+ * Creates a WASI import object.
+ *
+ * This function treats null pointers as empty collections.
+ * For example, passing null for a string in `args`, will lead to a zero
+ * length argument in that position.
+ */
+wasmer_import_object_t *wasmer_wasi_generate_import_object(const wasmer_byte_array *args,
+ unsigned int args_len,
+ const wasmer_byte_array *envs,
+ unsigned int envs_len,
+ const wasmer_byte_array *preopened_files,
+ unsigned int preopened_files_len,
+ const wasmer_wasi_map_dir_entry_t *mapped_dirs,
+ unsigned int mapped_dirs_len);
+#endif
+
+#if defined(WASMER_WASI_ENABLED)
+/**
+ * Creates a WASI import object for a specific version.
+ *
+ * This function is similar to `wasmer_wasi_generate_import_object`
+ * except that the first argument describes the WASI version.
+ *
+ * The version is expected to be of kind `Version`.
+ */
+wasmer_import_object_t *wasmer_wasi_generate_import_object_for_version(unsigned char version,
+ const wasmer_byte_array *args,
+ unsigned int args_len,
+ const wasmer_byte_array *envs,
+ unsigned int envs_len,
+ const wasmer_byte_array *preopened_files,
+ unsigned int preopened_files_len,
+ const wasmer_wasi_map_dir_entry_t *mapped_dirs,
+ unsigned int mapped_dirs_len);
+#endif
+
+#if defined(WASMER_WASI_ENABLED)
+/**
+ * Find the version of WASI used by the module.
+ *
+ * In case of error, the returned version is `Version::Unknown`.
+ */
+Version wasmer_wasi_get_version(const wasmer_module_t *module);
+#endif
+
+#endif /* WASMER_H */
diff --git a/vendor/github.com/yashtewari/glob-intersection/LICENSE b/vendor/github.com/yashtewari/glob-intersection/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/yashtewari/glob-intersection/README.md b/vendor/github.com/yashtewari/glob-intersection/README.md
new file mode 100644
index 00000000..618a8506
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/README.md
@@ -0,0 +1,26 @@
+# glob-intersection
+Go package to check if the set of non-empty strings matched by the intersection of two regexp-style globs is non-empty.
+
+### Examples
+- `gintersect.NonEmpty("a.a.", ".b.b")` is `true` because both globs match the string `abab`.
+- `gintersect.NonEmpty("[a-z]+", "[0-9]*)` is `false` because there are no non-empty strings that both globs match.
+
+### Limitations
+
+- It is assumed that all input is rooted at the beginning and the end, i.e, starts and ends with the regexp symbols `^` and `$` respectively. This is done because any non-rooted expressions will always match a non-empty set of non-empty strings.
+- The only special symbols are:
+ - `.` for any character.
+ - `+` for 1 or more of the preceding expression.
+ - `*` for 0 or more of the preceding expression.
+ - `[` and `]` to define regexp-style character classes.
+ - `-` to specify Unicode ranges inside character class definitions.
+ - `\` escapes any special symbol, including itself.
+
+### Complexity
+
+Complexity is exponential in the number of flags (`+` or `*`) present in the glob with the smaller flag count.
+Benchmarks (see [`non_empty_bench_test.go`](/non_empty_bench_test.go)) reveal that inputs where one of the globs has <= 10 flags, and both globs have 100s of characters, will run in less than a nanosecond. This should be ok for most use cases.
+
+### Acknowledgements
+
+[This StackOverflow discussion](https://stackoverflow.com/questions/18695727/algorithm-to-find-out-whether-the-matches-for-two-glob-patterns-or-regular-expr) for fleshing out the logic.
diff --git a/vendor/github.com/yashtewari/glob-intersection/glob.go b/vendor/github.com/yashtewari/glob-intersection/glob.go
new file mode 100644
index 00000000..54d4729b
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/glob.go
@@ -0,0 +1,182 @@
+// Package gintersect provides methods to check whether the intersection of several globs matches a non-empty set of strings.
+package gintersect
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Glob represents a glob.
+type Glob []Token
+
+// NewGlob constructs a Glob from the given string by tokenizing and then simplifying it, or reports errors if any.
+func NewGlob(input string) (Glob, error) {
+ tokens, err := Tokenize([]rune(input))
+ if err != nil {
+ return nil, err
+ }
+
+ tokens = Simplify(tokens)
+
+ return Glob(tokens), nil
+}
+
+// TokenType is the type of a Token.
+type TokenType uint
+
+const (
+ TTCharacter TokenType = iota
+ TTDot
+ TTSet
+)
+
+// Flag applies to a token.
+type Flag uint
+
+func (f Flag) String() (s string) {
+ for r, flag := range flagRunes {
+ if f == flag {
+ s = string(r)
+ break
+ }
+ }
+ return
+}
+
+const (
+ FlagNone = iota
+ FlagPlus
+ FlagStar
+)
+
+// Token is the element that makes up a Glob.
+type Token interface {
+ Type() TokenType
+ Flag() Flag
+ SetFlag(Flag)
+ // Equal describes whether the given Token is exactly equal to this one, barring differences in flags.
+ Equal(Token) bool
+ String() string
+}
+
+// token is the base for all structs implementing Token.
+type token struct {
+ ttype TokenType
+ flag Flag
+}
+
+func (t token) Type() TokenType {
+ return t.ttype
+}
+
+func (t token) Flag() Flag {
+ return t.flag
+}
+
+func (t *token) SetFlag(f Flag) {
+ t.flag = f
+}
+
+// character is a specific rune. It implements Token.
+type character struct {
+ token
+ r rune
+}
+
+func NewCharacter(r rune) Token {
+ return &character{
+ token: token{ttype: TTCharacter},
+ r: r,
+ }
+}
+
+func (c character) Equal(other Token) bool {
+ if c.Type() != other.Type() {
+ return false
+ }
+
+ o := other.(*character)
+ return c.Rune() == o.Rune()
+}
+
+func (c character) String() string {
+ return fmt.Sprintf("{character: %s flag: %s}", string(c.Rune()), c.Flag().String())
+}
+
+func (c character) Rune() rune {
+ return c.r
+}
+
+// dot is any character. It implements Token.
+type dot struct {
+ token
+}
+
+func NewDot() Token {
+ return &dot{
+ token: token{ttype: TTDot},
+ }
+}
+
+func (d dot) Equal(other Token) bool {
+ if d.Type() != other.Type() {
+ return false
+ }
+
+ return true
+}
+
+func (d dot) String() string {
+ return fmt.Sprintf("{dot flag: %s}", d.Flag().String())
+}
+
+// set is a set of characters (similar to regexp character class).
+// It implements Token.
+type set struct {
+ token
+ runes map[rune]bool
+}
+
+func NewSet(runes []rune) Token {
+ m := map[rune]bool{}
+ for _, r := range runes {
+ m[r] = true
+ }
+ return &set{
+ token: token{ttype: TTSet},
+ runes: m,
+ }
+}
+
+func (s set) Equal(other Token) bool {
+ if s.Type() != other.Type() {
+ return false
+ }
+
+ o := other.(*set)
+ r1, r2 := s.Runes(), o.Runes()
+
+ if len(r1) != len(r2) {
+ return false
+ }
+
+ for k, _ := range r1 {
+ if _, ok := r2[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (s set) String() string {
+ rs := make([]string, 0, 30)
+ for r, _ := range s.Runes() {
+ rs = append(rs, string(r))
+ }
+ return fmt.Sprintf("{set: %s flag: %s}", strings.Join(rs, ""), s.Flag().String())
+}
+
+func (s set) Runes() map[rune]bool {
+ return s.runes
+}
diff --git a/vendor/github.com/yashtewari/glob-intersection/match.go b/vendor/github.com/yashtewari/glob-intersection/match.go
new file mode 100644
index 00000000..45a988a8
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/match.go
@@ -0,0 +1,91 @@
+package gintersect
+
+import (
+ "github.com/pkg/errors"
+)
+
+var (
+ errBadImplementation = errors.New("this logical path is invalid")
+)
+
+// Match implements single-Token matching, ignoring flags.
+// Example: [a-d] and [b-e] match, while [a-z] and [0-9] do not.
+func Match(t1 Token, t2 Token) bool {
+ var temp Token
+ if t1.Type() > t2.Type() {
+ temp = t1
+ t1 = t2
+ t2 = temp
+ }
+
+ switch t1.Type() {
+ case TTCharacter:
+ ch := t1.(*character)
+
+ switch t2.Type() {
+ case TTCharacter:
+ return matchCharacters(ch, t2.(*character))
+ case TTDot:
+ return matchCharacterDot(ch, t2.(*dot))
+ case TTSet:
+ return matchCharacterSet(ch, t2.(*set))
+ default:
+ panic(errBadImplementation)
+ }
+
+ case TTDot:
+ d := t1.(*dot)
+
+ switch t2.Type() {
+ case TTDot:
+ return matchDots(d, t2.(*dot))
+ case TTSet:
+ return matchDotSet(d, t2.(*set))
+ default:
+ panic(errBadImplementation)
+ }
+
+ case TTSet:
+ switch t2.Type() {
+ case TTSet:
+ return matchSets(t1.(*set), t2.(*set))
+ default:
+ panic(errBadImplementation)
+ }
+
+ default:
+ panic(errBadImplementation)
+
+ }
+}
+
+func matchCharacters(a *character, b *character) bool {
+ return a.Rune() == b.Rune()
+}
+
+func matchCharacterDot(a *character, b *dot) bool {
+ return true
+}
+
+func matchCharacterSet(a *character, b *set) bool {
+ _, ok := b.Runes()[a.Rune()]
+ return ok
+}
+
+func matchDots(a *dot, b *dot) bool {
+ return true
+}
+
+func matchDotSet(a *dot, b *set) bool {
+ return true
+}
+
+func matchSets(a *set, b *set) bool {
+ for k, _ := range a.Runes() {
+ if _, ok := b.Runes()[k]; ok {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/yashtewari/glob-intersection/non_empty.go b/vendor/github.com/yashtewari/glob-intersection/non_empty.go
new file mode 100644
index 00000000..91cbdbde
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/non_empty.go
@@ -0,0 +1,154 @@
+package gintersect
+
+// NonEmpty is true if the intersection of lhs and rhs matches a non-empty set of non-empty str1ngs.
+func NonEmpty(lhs string, rhs string) (bool, error) {
+ g1, err := NewGlob(lhs)
+ if err != nil {
+ return false, err
+ }
+
+ g2, err := NewGlob(rhs)
+ if err != nil {
+ return false, err
+ }
+
+ var match bool
+ g1, g2, match = trimGlobs(g1, g2)
+ if !match {
+ return false, nil
+ }
+
+ return intersectNormal(g1, g2), nil
+}
+
+// trimGlobs removes matching prefixes and suffixes from g1, g2, or returns false if prefixes/suffixes don't match.
+func trimGlobs(g1, g2 Glob) (Glob, Glob, bool) {
+ var l, r1, r2 int
+
+ // Trim from the beginning until a flagged Token or a mismatch is found.
+ for l = 0; l < len(g1) && l < len(g2) && g1[l].Flag() == FlagNone && g2[l].Flag() == FlagNone; l++ {
+ if !Match(g1[l], g2[l]) {
+ return nil, nil, false
+ }
+ }
+
+ // Leave one prefix Token untrimmed to avoid empty Globs because those will break the algorithm.
+ if l > 0 {
+ l--
+ }
+
+ // Trim from the end until a flagged Token or a mismatch is found.
+ for r1, r2 = len(g1)-1, len(g2)-1; r1 >= 0 && r1 >= l && r2 >= 0 && r2 >= l && g1[r1].Flag() == FlagNone && g2[r2].Flag() == FlagNone; r1, r2 = r1-1, r2-1 {
+ if !Match(g1[r1], g2[r2]) {
+ return nil, nil, false
+ }
+ }
+
+ // Leave one suffix Token untrimmed to avoid empty Globs because those will break the algorithm.
+ if r1 < len(g1)-1 {
+ r1++
+ r2++
+ }
+
+ return g1[l : r1+1], g2[l : r2+1], true
+}
+
+// All uses of `intersection exists` below mean that the intersection of the globs matches a non-empty set of non-empty strings.
+
+// intersectNormal accepts two globs and returns a boolean describing whether their intersection exists.
+// It traverses g1, g2 while ensuring that their Tokens match.
+// If a flagged Token is encountered, flow of control is handed off to intersectSpecial.
+func intersectNormal(g1, g2 Glob) bool {
+ var i, j int
+ for i, j = 0, 0; i < len(g1) && j < len(g2); i, j = i+1, j+1 {
+ if g1[i].Flag() == FlagNone && g2[j].Flag() == FlagNone {
+ if !Match(g1[i], g2[j]) {
+ return false
+ }
+ } else {
+ return intersectSpecial(g1[i:], g2[j:])
+ }
+ }
+
+ if i == len(g1) && j == len(g2) {
+ return true
+ }
+
+ return false
+}
+
+// intersectSpecial accepts two globs such that at least one starts with a flagged Token.
+// It returns a boolean describing whether their intersection exists.
+// It hands flow of control to intersectPlus or intersectStar correctly.
+func intersectSpecial(g1, g2 Glob) bool {
+ if g1[0].Flag() != FlagNone { // If g1 starts with a Token having a Flag.
+ switch g1[0].Flag() {
+ case FlagPlus:
+ return intersectPlus(g1, g2)
+ case FlagStar:
+ return intersectStar(g1, g2)
+ }
+ } else { // If g2 starts with a Token having a Flag.
+ switch g2[0].Flag() {
+ case FlagPlus:
+ return intersectPlus(g2, g1)
+ case FlagStar:
+ return intersectStar(g2, g1)
+ }
+ }
+
+ return false
+}
+
+// intersectPlus accepts two globs such that plussed[0].Flag() == FlagPlus.
+// It returns a boolean describing whether their intersection exists.
+// It ensures that at least one token in other maches plussed[0], before handing flow of control to intersectSpecial.
+func intersectPlus(plussed, other Glob) bool {
+ if !Match(plussed[0], other[0]) {
+ return false
+ }
+ return intersectStar(plussed, other[1:])
+}
+
+// intersectStar accepts two globs such that starred[0].Flag() == FlagStar.
+// It returns a boolean describing whether their intersection exists.
+// It gobbles up Tokens from other until the Tokens remaining in other intersect with starred[1:]
+func intersectStar(starred, other Glob) bool {
+ // starToken, nextToken are the token having FlagStar and the one that follows immediately after, respectively.
+ var starToken, nextToken Token
+
+ starToken = starred[0]
+ if len(starred) > 1 {
+ nextToken = starred[1]
+ }
+
+ for i, t := range other {
+ // Start gobbl1ng up tokens in other while they match starToken.
+ if nextToken != nil && Match(t, nextToken) {
+ // When a token in other matches the token after starToken, stop gobbl1ng and try to match the two all the way.
+ allTheWay := intersectNormal(starred[1:], other[i:])
+ // If they match all the way, the Globs intersect.
+ if allTheWay {
+ return true
+ } else {
+ // If they don't match all the way, then the current token from other should still match starToken.
+ if !Match(t, starToken) {
+ return false
+ }
+ }
+ } else {
+ // Only move forward if this token can be gobbled up by starToken.
+ if !Match(t, starToken) {
+ return false
+ }
+ }
+ }
+
+ // If there was no token following starToken, and everything from other was gobbled, the Globs intersect.
+ if nextToken == nil {
+ return true
+ }
+
+ // If everything from other was gobbles but there was a nextToken to match, they don't intersect.
+ return false
+}
diff --git a/vendor/github.com/yashtewari/glob-intersection/simplify.go b/vendor/github.com/yashtewari/glob-intersection/simplify.go
new file mode 100644
index 00000000..7704fb10
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/simplify.go
@@ -0,0 +1,43 @@
+package gintersect
+
+// Simplify accepts a Token slice and returns a equivalient Token slice that is shorter/simpler.
+// The only simplification currently applied is removing redundant flagged Tokens.
+// TODO: Remove unflagged Tokens next to equivalen Tokens with FlagPlus. Example: tt+t == t+
+func Simplify(tokens []Token) []Token {
+ if len(tokens) == 0 {
+ return tokens
+ }
+ simple := make([]Token, 1, len(tokens))
+ simple[0] = tokens[0]
+
+ latest := simple[0]
+
+ for i := 1; i < len(tokens); i++ {
+ handled := false
+ // Possible simplifications to apply if there is a flag.
+ if tokens[i].Flag() != FlagNone && latest.Flag() != FlagNone {
+ // If the token contents are the same, then apply simplification.
+ if tokens[i].Equal(latest) {
+ var flag Flag
+ // FlagPlus takes precedence, because:
+ // t+t* == t+
+ // t*t+ == t+
+ if tokens[i].Flag() == FlagPlus || latest.Flag() == FlagPlus {
+ flag = FlagPlus
+ } else {
+ flag = FlagStar
+ }
+
+ simple[len(simple)-1].SetFlag(flag)
+ handled = true
+ }
+ }
+
+ if !handled {
+ latest = tokens[i]
+ simple = append(simple, tokens[i])
+ }
+ }
+
+ return simple
+}
diff --git a/vendor/github.com/yashtewari/glob-intersection/test_samples.go b/vendor/github.com/yashtewari/glob-intersection/test_samples.go
new file mode 100644
index 00000000..5d2922c7
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/test_samples.go
@@ -0,0 +1,84 @@
+package gintersect
+
+var (
+ samplesInitialized = false
+
+ testCharacters map[rune]Token
+ testCharactersPlus map[rune]Token
+ testCharactersStar map[rune]Token
+
+ testDot, testDotPlus, testDotStar Token
+
+ testLowerAlphaSet, testLowerAlphaSetPlus, lowerAplhaSetStar Token
+ testUpperAlphaSet, testUpperAlphaSetPlus, testUpperAlphaSetStar Token
+ testNumSet, testNumSetPlus, testNumSetStar Token
+ testSymbolSet, testSymbolSetPlus, testSymbolSetStar Token
+
+ testEmptySet Token
+)
+
+func initializeTestSamples() {
+ if samplesInitialized {
+ return
+ }
+
+ testCharacters, testCharactersPlus, testCharactersStar = make(map[rune]Token), make(map[rune]Token), make(map[rune]Token)
+
+ testDot, testDotPlus, testDotStar = NewDot(), NewDot(), NewDot()
+ testDotPlus.SetFlag(FlagPlus)
+ testDotStar.SetFlag(FlagStar)
+
+ var runes []rune
+ runes = makeRunes('a', 'z')
+
+ testLowerAlphaSet, testLowerAlphaSetPlus, lowerAplhaSetStar = NewSet(runes), NewSet(runes), NewSet(runes)
+ testLowerAlphaSetPlus.SetFlag(FlagPlus)
+ lowerAplhaSetStar.SetFlag(FlagStar)
+
+ runes = makeRunes('A', 'Z')
+
+ testUpperAlphaSet, testUpperAlphaSetPlus, testUpperAlphaSetStar = NewSet(runes), NewSet(runes), NewSet(runes)
+ testUpperAlphaSetPlus.SetFlag(FlagPlus)
+ testUpperAlphaSetStar.SetFlag(FlagStar)
+
+ runes = makeRunes('0', '9')
+
+ testNumSet, testNumSetPlus, testNumSetStar = NewSet(runes), NewSet(runes), NewSet(runes)
+ testNumSetPlus.SetFlag(FlagPlus)
+ testNumSetStar.SetFlag(FlagStar)
+
+ runes = makeRunes('!', '/')
+
+ testSymbolSet, testSymbolSetPlus, testSymbolSetStar = NewSet(runes), NewSet(runes), NewSet(runes)
+ testSymbolSetPlus.SetFlag(FlagPlus)
+ testSymbolSetStar.SetFlag(FlagStar)
+
+ testEmptySet = NewSet([]rune{})
+
+ samplesInitialized = true
+}
+
+func makeRunes(from rune, to rune) []rune {
+ runes := make([]rune, 0, 30)
+ for r := from; r <= to; r++ {
+ runes = append(runes, r)
+ addToCharacters(r)
+ }
+
+ return runes
+}
+
+func addToCharacters(r rune) {
+ var t Token
+
+ t = NewCharacter(r)
+ testCharacters[r] = t
+
+ t = NewCharacter(r)
+ t.SetFlag(FlagPlus)
+ testCharactersPlus[r] = t
+
+ t = NewCharacter(r)
+ t.SetFlag(FlagStar)
+ testCharactersStar[r] = t
+}
diff --git a/vendor/github.com/yashtewari/glob-intersection/tokenize.go b/vendor/github.com/yashtewari/glob-intersection/tokenize.go
new file mode 100644
index 00000000..0b674316
--- /dev/null
+++ b/vendor/github.com/yashtewari/glob-intersection/tokenize.go
@@ -0,0 +1,251 @@
+package gintersect
+
+import (
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+// Modifier is a special character that affects lexical analysis.
+type Modifier uint
+
+const (
+ ModifierBackslash Modifier = iota
+)
+
+var (
+ // Special runes.
+ tokenTypeRunes = map[rune]TokenType{
+ '.': TTDot,
+ '[': TTSet,
+ ']': TTSet,
+ }
+ flagRunes = map[rune]Flag{
+ '+': FlagPlus,
+ '*': FlagStar,
+ }
+ modifierRunes = map[rune]Modifier{
+ '\\': ModifierBackslash,
+ }
+
+ // Errors.
+ ErrInvalidInput = errors.New("the input provided is invalid")
+ errEndOfInput = errors.New("reached end of input")
+)
+
+// Tokenize converts a rune slice into a Token slice.
+func Tokenize(input []rune) ([]Token, error) {
+ tokens := []Token{}
+ for i, t, err := nextToken(0, input); err != errEndOfInput; i, t, err = nextToken(i, input) {
+ if err != nil {
+ return nil, err
+ }
+
+ tokens = append(tokens, t)
+ }
+
+ return tokens, nil
+}
+
+// nextToken yields the Token starting at the given index of input, and newIndex at which the next Token should start.
+func nextToken(index int, input []rune) (newIndex int, token Token, err error) {
+ var r rune
+ var escaped bool
+
+ newIndex, r, escaped, err = nextRune(index, input)
+ if err != nil {
+ return
+ }
+
+ if !escaped {
+ if ttype, ok := tokenTypeRunes[r]; ok {
+ switch ttype {
+ case TTDot:
+ token = NewDot()
+
+ case TTSet:
+ if r == ']' {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, newIndex, "set-close ']' with no preceding '['"))
+ return
+ }
+
+ newIndex, token, err = nextTokenSet(newIndex, input)
+ if err != nil {
+ return
+ }
+
+ default:
+ panic(errors.Wrapf(errBadImplementation, "encountered unhandled token type: %v", ttype))
+ }
+
+ } else if _, ok := flagRunes[r]; ok {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, newIndex, "flag '%s' must be preceded by a non-flag", string(r)))
+ return
+
+ } else if m, ok := modifierRunes[r]; ok {
+ panic(errors.Wrapf(errBadImplementation, "encountered unhandled modifier: %v", m))
+ } else {
+ // Nothing special to do.
+ token = NewCharacter(r)
+ }
+ } else {
+ // Nothing special to do.
+ token = NewCharacter(r)
+ }
+
+ var f Flag
+ newIndex, f, err = nextFlag(newIndex, input)
+ if err == errEndOfInput {
+ // Let this err be passed in the next cycle, after the current token is consumed.
+ err = nil
+ } else if err != nil {
+ return
+ }
+
+ token.SetFlag(f)
+
+ return
+}
+
+// nextTokenSet yields a Token having type TokenSet and starting at the given index of input.
+// The next Token/Flag should start at newIndex.
+func nextTokenSet(index int, input []rune) (newIndex int, t Token, err error) {
+ var r, prev rune
+ var escaped bool
+
+ runes := make([]rune, 0, 30)
+ complete, prevExists := false, false
+
+ newIndex, r, escaped, err = nextRune(index, input)
+ // If errEndOfInput is encountered, flow of control proceeds to the end of the function,
+ // where the error is handled.
+ if err != nil && err != errEndOfInput {
+ return
+ }
+
+ for ; err != errEndOfInput; newIndex, r, escaped, err = nextRune(newIndex, input) {
+ if err != nil {
+ return
+ }
+
+ if !escaped {
+ // Handle symbols.
+ switch r {
+ case '-':
+ if !prevExists {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, newIndex, "range character '-' must be preceded by a Unicode character"))
+ return
+ }
+ if newIndex >= len(input)-1 {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, newIndex, "range character '-' must be followed by a Unicode character"))
+ return
+ }
+
+ // Get the next rune to know the extent of the range.
+ newIndex, r, escaped, err = nextRune(newIndex, input)
+
+ if !escaped {
+ if r == ']' || r == '-' {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, newIndex, "range character '-' cannot be followed by a special symbol"))
+ return
+ }
+ }
+ if r < prev {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, newIndex, "range is out of order: '%s' comes before '%s' in Unicode", string(r), string(prev)))
+ return
+ }
+
+ for x := prev; x <= r; x++ {
+ runes = append(runes, x)
+ }
+
+ prevExists = false
+
+ case ']':
+ complete = true
+
+ // Nothing special to do.
+ default:
+ runes = append(runes, r)
+ prev, prevExists = r, true
+ }
+ } else {
+ // Nothing special to do.
+ runes = append(runes, r)
+ prev, prevExists = r, true
+ }
+
+ // Don't move the index forward if the set is complete.
+ if complete {
+ break
+ }
+ }
+
+ // End of input is reached before the set completes.
+ if !complete {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, newIndex, "found [ without matching ]"))
+ } else {
+ t = NewSet(runes)
+ }
+
+ return
+}
+
+// nextFlag yields the Flag starting at the given index of input, if any.
+// The next Token should start at newIndex.
+func nextFlag(index int, input []rune) (newIndex int, f Flag, err error) {
+ var escaped, ok bool
+ var r rune
+
+ f = FlagNone
+
+ newIndex, r, escaped, err = nextRune(index, input)
+ if err != nil {
+ return
+ }
+
+ if !escaped {
+ // Revert back to index for later consumption.
+ if f, ok = flagRunes[r]; !ok {
+ newIndex = index
+ }
+ } else {
+ // Revert back to index for later consumption.
+ newIndex = index
+ }
+
+ return
+}
+
+// nextRune yields the rune starting (with modifiers) at the given index of input, with boolean escaped describing whether the rune is escaped.
+// The next rune should start at newIndex.
+func nextRune(index int, input []rune) (newIndex int, r rune, escaped bool, err error) {
+ if index >= len(input) {
+ newIndex = index
+ err = errEndOfInput
+ return
+ }
+
+ if m, ok := modifierRunes[input[index]]; ok {
+ switch m {
+
+ case ModifierBackslash:
+ if index < len(input)-1 {
+ newIndex, r, escaped = index+2, input[index+1], true
+ } else if index == len(input)-1 {
+ err = errors.Wrap(ErrInvalidInput, invalidInputMessage(input, index, "input ends with a \\ (escape) character"))
+ }
+ default:
+ panic(errors.Wrapf(errBadImplementation, "encountered unhandled modifier: %v", m))
+ }
+ } else {
+ newIndex, r, escaped = index+1, input[index], false
+ }
+
+ return
+}
+
+// invalidInputMessage wraps the message describing invalid input with the input itself and index at which it is invalid.
+func invalidInputMessage(input []rune, index int, message string, args ...interface{}) string {
+ return fmt.Sprintf("input:%s, pos:%d, %s", string(input), index, fmt.Sprintf(message, args...))
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
index d1b4fca3..2ffb97bf 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -113,6 +113,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
}
const (
+ keyCtrlC = 3
keyCtrlD = 4
keyCtrlU = 21
keyEnter = '\r'
@@ -151,8 +152,12 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
switch b[0] {
case 1: // ^A
return keyHome, b[1:]
+ case 2: // ^B
+ return keyLeft, b[1:]
case 5: // ^E
return keyEnd, b[1:]
+ case 6: // ^F
+ return keyRight, b[1:]
case 8: // ^H
return keyBackspace, b[1:]
case 11: // ^K
@@ -738,6 +743,9 @@ func (t *Terminal) readLine() (line string, err error) {
return "", io.EOF
}
}
+ if key == keyCtrlC {
+ return "", io.EOF
+ }
if key == keyPasteStart {
t.pasteActive = true
if len(t.line) == 0 {
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index f4d9b5ec..3a67636f 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
// dialCall is an in-flight Transport dial call to a host.
type dialCall struct {
+ _ incomparable
p *clientConnPool
done chan struct{} // closed when done
res *ClientConn // valid after done is closed
@@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn)
}
type addConnCall struct {
+ _ incomparable
p *clientConnPool
done chan struct{} // closed when done
err error
@@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
close(c.done)
}
-func (p *clientConnPool) addConn(key string, cc *ClientConn) {
- p.mu.Lock()
- p.addConnLocked(key, cc)
- p.mu.Unlock()
-}
-
// p.mu must be held
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
for _, v := range p.conns[key] {
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
index cea601fc..b51f0e0c 100644
--- a/vendor/golang.org/x/net/http2/flow.go
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -8,6 +8,8 @@ package http2
// flow is the flow control window's size.
type flow struct {
+ _ incomparable
+
// n is the number of DATA bytes we're allowed to send.
// A flow is kept both on a conn and a per-stream.
n int32
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
index 1565cf27..97f17831 100644
--- a/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -150,7 +150,7 @@ func appendIndexed(dst []byte, i uint64) []byte {
// extended buffer.
//
// If f.Sensitive is true, "Never Indexed" representation is used. If
-// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
// representation is used.
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
index b412a96c..a1ab2f05 100644
--- a/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
return nil
}
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type incomparable [0]func()
+
type node struct {
+ _ incomparable
+
// children is non-nil for internal nodes
children *[256]*node
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index bdaba1d4..5571ccfd 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -19,7 +19,6 @@ package http2 // import "golang.org/x/net/http2"
import (
"bufio"
"crypto/tls"
- "errors"
"fmt"
"io"
"net/http"
@@ -173,11 +172,6 @@ func (s SettingID) String() string {
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
}
-var (
- errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
- errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
-)
-
// validWireHeaderFieldName reports whether v is a valid header field
// name (key). See httpguts.ValidHeaderName for the base rules.
//
@@ -247,6 +241,7 @@ func (cw closeWaiter) Wait() {
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
+ _ incomparable
w io.Writer // immutable
bw *bufio.Writer // non-nil when data is buffered
}
@@ -319,6 +314,7 @@ func bodyAllowedForStatus(status int) bool {
}
type httpError struct {
+ _ incomparable
msg string
timeout bool
}
@@ -382,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) {
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
+
+// incomparable is a zero-width, non-comparable type. Adding it to a struct
+// makes that struct also non-comparable, and generally doesn't add
+// any size (as long as it's first).
+type incomparable [0]func()
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index a6140099..2a5399ec 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -17,6 +17,7 @@ type pipe struct {
mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu
b pipeBuffer // nil when done reading
+ unread int // bytes unread when done
err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error
@@ -33,7 +34,7 @@ func (p *pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.b == nil {
- return 0
+ return p.unread
}
return p.b.Len()
}
@@ -80,6 +81,7 @@ func (p *pipe) Write(d []byte) (n int, err error) {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
+ p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
@@ -117,6 +119,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
}
p.readFn = fn
if dst == &p.breakErr {
+ if p.b != nil {
+ p.unread += p.b.Len()
+ }
p.b = nil
}
*dst = err
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 5e01ce9a..2aa859f7 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -252,7 +252,7 @@ func ConfigureServer(s *http.Server, conf *Server) error {
}
}
if !haveRequired {
- return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.")
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).")
}
}
@@ -322,7 +322,7 @@ type ServeConnOpts struct {
}
func (o *ServeConnOpts) context() context.Context {
- if o.Context != nil {
+ if o != nil && o.Context != nil {
return o.Context
}
return context.Background()
@@ -581,13 +581,10 @@ type stream struct {
cancelCtx func()
// owned by serverConn's serve loop:
- bodyBytes int64 // body bytes seen so far
- declBodyBytes int64 // or -1 if undeclared
- flow flow // limits writing from Handler to client
- inflow flow // what the client is allowed to POST/etc to us
- parent *stream // or nil
- numTrailerValues int64
- weight uint8
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
state streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
@@ -764,6 +761,7 @@ func (sc *serverConn) readFrames() {
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
type frameWriteResult struct {
+ _ incomparable
wr FrameWriteRequest // what was written (or attempted)
err error // result of the writeFrame call
}
@@ -774,7 +772,7 @@ type frameWriteResult struct {
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
err := wr.write.writeFrame(sc)
- sc.wroteFrameCh <- frameWriteResult{wr, err}
+ sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
}
func (sc *serverConn) closeAllStreamsOnConnClose() {
@@ -1164,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
if wr.write.staysWithinBuffer(sc.bw.Available()) {
sc.writingFrameAsync = false
err := wr.write.writeFrame(sc)
- sc.wroteFrame(frameWriteResult{wr, err})
+ sc.wroteFrame(frameWriteResult{wr: wr, err: err})
} else {
sc.writingFrameAsync = true
go sc.writeFrameAsync(wr)
@@ -1696,6 +1694,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
if len(data) > 0 {
wrote, err := st.body.Write(data)
if err != nil {
+ sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return streamError(id, ErrCodeStreamClosed)
}
if wrote != len(data) {
@@ -2022,7 +2021,11 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
if bodyOpen {
if vv, ok := rp.header["Content-Length"]; ok {
- req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
+ req.ContentLength = int64(cl)
+ } else {
+ req.ContentLength = 0
+ }
} else {
req.ContentLength = -1
}
@@ -2060,7 +2063,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var trailer http.Header
for _, v := range rp.header["Trailer"] {
for _, key := range strings.Split(v, ",") {
- key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ key = http.CanonicalHeaderKey(textproto.TrimString(key))
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
// Bogus. (copy of http1 rules)
@@ -2278,6 +2281,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
// requestBody is the Handler's Request.Body type.
// Read and Close may be called concurrently.
type requestBody struct {
+ _ incomparable
stream *stream
conn *serverConn
closed bool // for use by Close only
@@ -2404,9 +2408,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var ctype, clen string
if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
rws.snapHeader.Del("Content-Length")
- clen64, err := strconv.ParseInt(clen, 10, 64)
- if err == nil && clen64 >= 0 {
- rws.sentContentLen = clen64
+ if cl, err := strconv.ParseUint(clen, 10, 63); err == nil {
+ rws.sentContentLen = int64(cl)
} else {
clen = ""
}
@@ -2415,7 +2418,11 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
- if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
+ // If the Content-Encoding is non-blank, we shouldn't
+ // sniff the body. See Issue golang.org/issue/31753.
+ ce := rws.snapHeader.Get("Content-Encoding")
+ hasCE := len(ce) > 0
+ if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
ctype = http.DetectContentType(p)
}
var date string
@@ -2524,7 +2531,7 @@ const TrailerPrefix = "Trailer:"
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
// and gRPC libraries permit setting trailers mid-stream without
-// predeclarnig them. So: change of plans. We still permit the old
+// predeclaring them. So: change of plans. We still permit the old
// way, but we also permit this hack: if a Header() key begins with
// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
// invalid token byte anyway, there is no ambiguity. (And it's already
@@ -2824,7 +2831,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
// is in either the "open" or "half-closed (remote)" state.
if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
- // responseWriter.Push checks that the stream is peer-initiaed.
+ // responseWriter.Push checks that the stream is peer-initiated.
msg.done <- errStreamClosed
return
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index aeac7d8a..4ec32669 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -93,7 +93,7 @@ type Transport struct {
// send in the initial settings frame. It is how many bytes
// of response headers are allowed. Unlike the http2 spec, zero here
// means to use a default limit (currently 10MB). If you actually
- // want to advertise an ulimited value to the peer, Transport
+ // want to advertise an unlimited value to the peer, Transport
// interprets the highest possible value here (0xffffffff or 1<<32-1)
// to mean no limit.
MaxHeaderListSize uint32
@@ -108,6 +108,19 @@ type Transport struct {
// waiting for their turn.
StrictMaxConcurrentStreams bool
+ // ReadIdleTimeout is the timeout after which a health check using ping
+ // frame will be carried out if no frame is received on the connection.
+ // Note that a ping response will is considered a received frame, so if
+ // there is no other traffic on the connection, the health check will
+ // be performed every ReadIdleTimeout interval.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to Ping is not received.
+ // Defaults to 15s.
+ PingTimeout time.Duration
+
// t1, if non-nil, is the standard library Transport using
// this transport. Its settings are used (but not its
// RoundTrip method, etc).
@@ -131,6 +144,14 @@ func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
+func (t *Transport) pingTimeout() time.Duration {
+ if t.PingTimeout == 0 {
+ return 15 * time.Second
+ }
+ return t.PingTimeout
+
+}
+
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled.
func ConfigureTransport(t1 *http.Transport) error {
@@ -227,6 +248,7 @@ type ClientConn struct {
br *bufio.Reader
fr *Framer
lastActive time.Time
+ lastIdle time.Time // time last idle
// Settings from peer: (also guarded by mu)
maxFrameSize uint32
maxConcurrentStreams uint32
@@ -603,7 +625,7 @@ func (t *Transport) expectContinueTimeout() time.Duration {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, false)
+ return t.newClientConn(c, t.disableKeepAlives())
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
@@ -674,6 +696,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
return cc, nil
}
+func (cc *ClientConn) healthCheck() {
+ pingTimeout := cc.t.pingTimeout()
+ // We don't need to periodically ping in the health check, because the readLoop of ClientConn will
+ // trigger the healthCheck again if there is no frame received.
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
+ defer cancel()
+ err := cc.Ping(ctx)
+ if err != nil {
+ cc.closeForLostPing()
+ cc.t.connPool().MarkDead(cc)
+ return
+ }
+}
+
func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -736,7 +772,8 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
- int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
return
}
@@ -746,6 +783,16 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
return st.canTakeNewRequest
}
+// tooIdleLocked reports whether this connection has been been sitting idle
+// for too much wall time.
+func (cc *ClientConn) tooIdleLocked() bool {
+ // The Round(0) strips the monontonic clock reading so the
+ // times are compared based on their wall time. We don't want
+ // to reuse a connection that's been sitting idle during
+ // VM/laptop suspend if monotonic time was also frozen.
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+}
+
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
@@ -834,14 +881,12 @@ func (cc *ClientConn) sendGoAway() error {
return nil
}
-// Close closes the client connection immediately.
-//
-// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
-func (cc *ClientConn) Close() error {
+// closes the client connection immediately. In-flight requests are interrupted.
+// err is sent to streams.
+func (cc *ClientConn) closeForError(err error) error {
cc.mu.Lock()
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
- err := errors.New("http2: client connection force closed via ClientConn.Close")
for id, cs := range cc.streams {
select {
case cs.resc <- resAndError{err: err}:
@@ -854,6 +899,20 @@ func (cc *ClientConn) Close() error {
return cc.tconn.Close()
}
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *ClientConn) Close() error {
+ err := errors.New("http2: client connection force closed via ClientConn.Close")
+ return cc.closeForError(err)
+}
+
+// closes the client connection immediately. In-flight requests are interrupted.
+func (cc *ClientConn) closeForLostPing() error {
+ err := errors.New("http2: client connection lost")
+ return cc.closeForError(err)
+}
+
const maxAllocFrameSize = 512 << 10
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
@@ -904,7 +963,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
k = http.CanonicalHeaderKey(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
- return "", &badStringError{"invalid Trailer key", k}
+ return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
@@ -1150,6 +1209,7 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
}
return errClientConnUnusable
}
+ cc.lastIdle = time.Time{}
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
if waitingForConn != nil {
close(waitingForConn)
@@ -1216,6 +1276,8 @@ var (
// abort request body write, but send stream reset of cancel.
errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+
+ errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
)
func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
@@ -1238,10 +1300,32 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
req := cs.req
hasTrailers := req.Trailer != nil
+ remainLen := actualContentLength(req)
+ hasContentLen := remainLen != -1
var sawEOF bool
for !sawEOF {
- n, err := body.Read(buf)
+ n, err := body.Read(buf[:len(buf)-1])
+ if hasContentLen {
+ remainLen -= int64(n)
+ if remainLen == 0 && err == nil {
+ // The request body's Content-Length was predeclared and
+ // we just finished reading it all, but the underlying io.Reader
+ // returned the final chunk with a nil error (which is one of
+ // the two valid things a Reader can do at EOF). Because we'd prefer
+ // to send the END_STREAM bit early, double-check that we're actually
+ // at EOF. Subsequent reads should return (0, EOF) at this point.
+ // If either value is different, we return an error in one of two ways below.
+ var n1 int
+ n1, err = body.Read(buf[n:])
+ remainLen -= int64(n1)
+ }
+ if remainLen < 0 {
+ err = errReqBodyTooLong
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
+ return err
+ }
+ }
if err == io.EOF {
sawEOF = true
err = nil
@@ -1357,13 +1441,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
-type badStringError struct {
- what string
- str string
-}
-
-func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
-
// requires cc.mu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
@@ -1454,7 +1531,29 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if vv[0] == "" {
continue
}
-
+ } else if strings.EqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
}
for _, v := range vv {
@@ -1557,6 +1656,7 @@ func (cc *ClientConn) writeHeader(name, value string) {
}
type resAndError struct {
+ _ incomparable
res *http.Response
err error
}
@@ -1592,6 +1692,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
delete(cc.streams, id)
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = time.Now()
}
close(cs.done)
// Wake up checkResetOrDone via clientStream.awaitFlowControl and
@@ -1603,6 +1704,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type clientConnReadLoop struct {
+ _ incomparable
cc *ClientConn
closeWhenIdle bool
}
@@ -1682,8 +1784,17 @@ func (rl *clientConnReadLoop) run() error {
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
gotReply := false // ever saw a HEADERS reply
gotSettings := false
+ readIdleTimeout := cc.t.ReadIdleTimeout
+ var t *time.Timer
+ if readIdleTimeout != 0 {
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
+ defer t.Stop()
+ }
for {
f, err := cc.fr.ReadFrame()
+ if t != nil {
+ t.Reset(readIdleTimeout)
+ }
if err != nil {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
@@ -1832,7 +1943,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
}
- header := make(http.Header)
+ regularFields := f.RegularFields()
+ strs := make([]string, len(regularFields))
+ header := make(http.Header, len(regularFields))
res := &http.Response{
Proto: "HTTP/2.0",
ProtoMajor: 2,
@@ -1840,7 +1953,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
StatusCode: statusCode,
Status: status + " " + http.StatusText(statusCode),
}
- for _, hf := range f.RegularFields() {
+ for _, hf := range regularFields {
key := http.CanonicalHeaderKey(hf.Name)
if key == "Trailer" {
t := res.Trailer
@@ -1852,7 +1965,18 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
t[http.CanonicalHeaderKey(v)] = nil
})
} else {
- header[key] = append(header[key], hf.Value)
+ vv := header[key]
+ if vv == nil && len(strs) > 0 {
+ // More than likely this will be a single-element key.
+ // Most headers aren't multi-valued.
+ // Set the capacity on strs[0] to 1, so any future append
+ // won't extend the slice into the other strings.
+ vv, strs = strs[:1:1], strs[1:]
+ vv[0] = hf.Value
+ header[key] = vv
+ } else {
+ header[key] = append(vv, hf.Value)
+ }
}
}
@@ -1882,8 +2006,8 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
if !streamEnded || isHead {
res.ContentLength = -1
if clens := res.Header["Content-Length"]; len(clens) == 1 {
- if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
- res.ContentLength = clen64
+ if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
+ res.ContentLength = int64(cl)
} else {
// TODO: care? unlike http/1, it won't mess up our framing, so it's
// more safe smuggling-wise to ignore.
@@ -2138,8 +2262,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
return nil
}
-var errInvalidTrailers = errors.New("http2: invalid trailers")
-
func (rl *clientConnReadLoop) endStream(cs *clientStream) {
// TODO: check that any declared content-length matches, like
// server.go's (*stream).endStream method.
@@ -2370,7 +2492,6 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error)
var (
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
- errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
)
func (cc *ClientConn) logf(format string, args ...interface{}) {
@@ -2404,11 +2525,13 @@ func strSliceContains(ss []string, s string) bool {
type erringRoundTripper struct{ err error }
+func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
// gzipReader wraps a response body so it can lazily
// call gzip.NewReader on the first call to Read
type gzipReader struct {
+ _ incomparable
body io.ReadCloser // underlying Response.Body
zr *gzip.Reader // lazily-initialized gzip reader
zerr error // sticky error
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
index 848fed6e..2618b2c1 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -149,7 +149,7 @@ func (n *priorityNode) addBytes(b int64) {
}
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
-// with a non-empty write queue. When f returns true, this funcion returns true and the
+// with a non-empty write queue. When f returns true, this function returns true and the
// walk halts. tmp is used as scratch space for sorting.
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go
index c515d7ad..8ce0811f 100644
--- a/vendor/golang.org/x/net/idna/tables11.0.0.go
+++ b/vendor/golang.org/x/net/idna/tables11.0.0.go
@@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-// +build go1.13
+// +build go1.13,!go1.14
package idna
diff --git a/vendor/golang.org/x/net/idna/tables12.00.go b/vendor/golang.org/x/net/idna/tables12.00.go
new file mode 100644
index 00000000..f4b8ea36
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/tables12.00.go
@@ -0,0 +1,4733 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.14
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "12.0.0"
+
+var mappings string = "" + // Size: 8178 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多" +
+ "\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販" +
+ "\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打" +
+ "\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕" +
+ "\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你" +
+ "\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內" +
+ "\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉" +
+ "\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟" +
+ "\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙" +
+ "\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型" +
+ "\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮" +
+ "\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍" +
+ "\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰" +
+ "\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹" +
+ "\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞" +
+ "\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢" +
+ "\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙" +
+ "\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓" +
+ "\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛" +
+ "\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派" +
+ "\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆" +
+ "\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀" +
+ "\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾" +
+ "\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌" +
+ "\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒" +
+ "\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺" +
+ "\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋" +
+ "\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著" +
+ "\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜" +
+ "\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠" +
+ "\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁" +
+ "\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘" +
+ "\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲" +
+ "\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭" +
+ "\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4862 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" +
+ "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" +
+ "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" +
+ "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" +
+ "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" +
+ "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" +
+ "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" +
+ "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" +
+ "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" +
+ "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" +
+ "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" +
+ "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" +
+ "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" +
+ "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" +
+ "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" +
+ "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" +
+ "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" +
+ "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" +
+ "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" +
+ "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" +
+ "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" +
+ "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" +
+ "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" +
+ "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" +
+ "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" +
+ "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" +
+ "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" +
+ "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" +
+ "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" +
+ "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" +
+ "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" +
+ "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" +
+ "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" +
+ "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" +
+ "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" +
+ "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" +
+ "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" +
+ "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" +
+ "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" +
+ "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" +
+ "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" +
+ "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" +
+ "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" +
+ "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" +
+ "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" +
+ "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" +
+ "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" +
+ "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" +
+ "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." +
+ "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" +
+ "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" +
+ "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" +
+ "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" +
+ "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" +
+ "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" +
+ "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" +
+ "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03" +
+ "\x09\x0c\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06" +
+ "!3\x03\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05" +
+ "\x03\x07<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" +
+ "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" +
+ "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" +
+ "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" +
+ "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" +
+ "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" +
+ ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" +
+ "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" +
+ "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" +
+ "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" +
+ "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" +
+ "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" +
+ "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" +
+ "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" +
+ "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" +
+ "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" +
+ "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" +
+ "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" +
+ ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29708 bytes (29.01 KiB). Checksum: c3ecc76d8fffa6e6.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008,
+ 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866,
+ 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e,
+ 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe,
+ 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e,
+ 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e,
+ 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde,
+ 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e,
+ 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5,
+ 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875,
+ 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935,
+ 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5,
+ 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15,
+ 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75,
+ 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95,
+ 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75,
+ 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5,
+ 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55,
+ 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15,
+ 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95,
+ 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5,
+ 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5,
+ 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5,
+ 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5,
+ 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275,
+ 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5,
+ 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5,
+ 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475,
+ 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535,
+ 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5,
+ 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795,
+ 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855,
+ 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915,
+ 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5,
+ 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95,
+ 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55,
+ 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5,
+ 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95,
+ 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d,
+ 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d,
+ 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05,
+ 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95,
+ 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd,
+ 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55,
+ 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5,
+ 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015,
+ 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x4045, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x4065, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x40a5, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40c5, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x4105,
+ 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x41a5, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6419, 0x10a2: 0x4205, 0x10a3: 0x4225,
+ 0x10a4: 0x4245, 0x10a5: 0x6431, 0x10a6: 0x4265, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x4305, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x4325, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x4345, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x43a5, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x4425,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008,
+ 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad,
+ 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d,
+ 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008,
+ 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d,
+ 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d,
+ 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d,
+ 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d,
+ 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed,
+ 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d,
+ 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d,
+ 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d8d,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7dad,
+ 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d,
+ 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e,
+ 0x158c: 0x7fae, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7fcd,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7ecd,
+ 0x159e: 0x7f2d, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x800e, 0x15b1: 0xb009, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040,
+ 0x15b6: 0x806e, 0x15b7: 0xb031, 0x15b8: 0x808e, 0x15b9: 0xb059, 0x15ba: 0x80ae, 0x15bb: 0xb081,
+ 0x15bc: 0x80ce, 0x15bd: 0xb0a9, 0x15be: 0x80ee, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d,
+ 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d,
+ 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd,
+ 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d,
+ 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d,
+ 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d,
+ 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd,
+ 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d,
+ 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d,
+ 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d,
+ 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d,
+ 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed,
+ 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d,
+ 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed,
+ 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d,
+ 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d,
+ 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d,
+ 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x8a0e,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d,
+ 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b3d, 0x1c5f: 0x8b3d, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0xc1c1, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1f1, 0x1dc1: 0xc229, 0x1dc2: 0xc261, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc281, 0x1dd1: 0xc2a1,
+ 0x1dd2: 0xc2c1, 0x1dd3: 0xc2e1, 0x1dd4: 0xc301, 0x1dd5: 0xc321, 0x1dd6: 0xc341, 0x1dd7: 0xc361,
+ 0x1dd8: 0xc381, 0x1dd9: 0xc3a1, 0x1dda: 0xc3c1, 0x1ddb: 0xc3e1, 0x1ddc: 0xc401, 0x1ddd: 0xc421,
+ 0x1dde: 0xc441, 0x1ddf: 0xc461, 0x1de0: 0xc481, 0x1de1: 0xc4a1, 0x1de2: 0xc4c1, 0x1de3: 0xc4e1,
+ 0x1de4: 0xc501, 0x1de5: 0xc521, 0x1de6: 0xc541, 0x1de7: 0xc561, 0x1de8: 0xc581, 0x1de9: 0xc5a1,
+ 0x1dea: 0xc5c1, 0x1deb: 0xc5e1, 0x1dec: 0xc601, 0x1ded: 0xc621, 0x1dee: 0xc641, 0x1def: 0xc661,
+ 0x1df0: 0xc681, 0x1df1: 0xc6a1, 0x1df2: 0xc6c1, 0x1df3: 0xc6e1, 0x1df4: 0xc701, 0x1df5: 0xc721,
+ 0x1df6: 0xc741, 0x1df7: 0xc761, 0x1df8: 0xc781, 0x1df9: 0xc7a1, 0x1dfa: 0xc7c1, 0x1dfb: 0xc7e1,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcb11, 0x1e01: 0xcb31, 0x1e02: 0xcb51, 0x1e03: 0x8b55, 0x1e04: 0xcb71, 0x1e05: 0xcb91,
+ 0x1e06: 0xcbb1, 0x1e07: 0xcbd1, 0x1e08: 0xcbf1, 0x1e09: 0xcc11, 0x1e0a: 0xcc31, 0x1e0b: 0xcc51,
+ 0x1e0c: 0xcc71, 0x1e0d: 0x8b75, 0x1e0e: 0xcc91, 0x1e0f: 0xccb1, 0x1e10: 0xccd1, 0x1e11: 0xccf1,
+ 0x1e12: 0x8b95, 0x1e13: 0xcd11, 0x1e14: 0xcd31, 0x1e15: 0xc441, 0x1e16: 0x8bb5, 0x1e17: 0xcd51,
+ 0x1e18: 0xcd71, 0x1e19: 0xcd91, 0x1e1a: 0xcdb1, 0x1e1b: 0xcdd1, 0x1e1c: 0x8bd5, 0x1e1d: 0xcdf1,
+ 0x1e1e: 0xce11, 0x1e1f: 0xce31, 0x1e20: 0xce51, 0x1e21: 0xce71, 0x1e22: 0xc7a1, 0x1e23: 0xce91,
+ 0x1e24: 0xceb1, 0x1e25: 0xced1, 0x1e26: 0xcef1, 0x1e27: 0xcf11, 0x1e28: 0xcf31, 0x1e29: 0xcf51,
+ 0x1e2a: 0xcf71, 0x1e2b: 0xcf91, 0x1e2c: 0xcfb1, 0x1e2d: 0xcfd1, 0x1e2e: 0xcff1, 0x1e2f: 0xd011,
+ 0x1e30: 0xd031, 0x1e31: 0xd051, 0x1e32: 0xd051, 0x1e33: 0xd051, 0x1e34: 0x8bf5, 0x1e35: 0xd071,
+ 0x1e36: 0xd091, 0x1e37: 0xd0b1, 0x1e38: 0x8c15, 0x1e39: 0xd0d1, 0x1e3a: 0xd0f1, 0x1e3b: 0xd111,
+ 0x1e3c: 0xd131, 0x1e3d: 0xd151, 0x1e3e: 0xd171, 0x1e3f: 0xd191,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd1b1, 0x1e41: 0xd1d1, 0x1e42: 0xd1f1, 0x1e43: 0xd211, 0x1e44: 0xd231, 0x1e45: 0xd251,
+ 0x1e46: 0xd251, 0x1e47: 0xd271, 0x1e48: 0xd291, 0x1e49: 0xd2b1, 0x1e4a: 0xd2d1, 0x1e4b: 0xd2f1,
+ 0x1e4c: 0xd311, 0x1e4d: 0xd331, 0x1e4e: 0xd351, 0x1e4f: 0xd371, 0x1e50: 0xd391, 0x1e51: 0xd3b1,
+ 0x1e52: 0xd3d1, 0x1e53: 0xd3f1, 0x1e54: 0xd411, 0x1e55: 0xd431, 0x1e56: 0xd451, 0x1e57: 0xd471,
+ 0x1e58: 0xd491, 0x1e59: 0x8c35, 0x1e5a: 0xd4b1, 0x1e5b: 0xd4d1, 0x1e5c: 0xd4f1, 0x1e5d: 0xc321,
+ 0x1e5e: 0xd511, 0x1e5f: 0xd531, 0x1e60: 0x8c55, 0x1e61: 0x8c75, 0x1e62: 0xd551, 0x1e63: 0xd571,
+ 0x1e64: 0xd591, 0x1e65: 0xd5b1, 0x1e66: 0xd5d1, 0x1e67: 0xd5f1, 0x1e68: 0x2040, 0x1e69: 0xd611,
+ 0x1e6a: 0xd631, 0x1e6b: 0xd631, 0x1e6c: 0x8c95, 0x1e6d: 0xd651, 0x1e6e: 0xd671, 0x1e6f: 0xd691,
+ 0x1e70: 0xd6b1, 0x1e71: 0x8cb5, 0x1e72: 0xd6d1, 0x1e73: 0xd6f1, 0x1e74: 0x2040, 0x1e75: 0xd711,
+ 0x1e76: 0xd731, 0x1e77: 0xd751, 0x1e78: 0xd771, 0x1e79: 0xd791, 0x1e7a: 0xd7b1, 0x1e7b: 0x8cd5,
+ 0x1e7c: 0xd7d1, 0x1e7d: 0x8cf5, 0x1e7e: 0xd7f1, 0x1e7f: 0xd811,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd831, 0x1e81: 0xd851, 0x1e82: 0xd871, 0x1e83: 0xd891, 0x1e84: 0xd8b1, 0x1e85: 0xd8d1,
+ 0x1e86: 0xd8f1, 0x1e87: 0xd911, 0x1e88: 0xd931, 0x1e89: 0x8d15, 0x1e8a: 0xd951, 0x1e8b: 0xd971,
+ 0x1e8c: 0xd991, 0x1e8d: 0xd9b1, 0x1e8e: 0xd9d1, 0x1e8f: 0x8d35, 0x1e90: 0xd9f1, 0x1e91: 0x8d55,
+ 0x1e92: 0x8d75, 0x1e93: 0xda11, 0x1e94: 0xda31, 0x1e95: 0xda31, 0x1e96: 0xda51, 0x1e97: 0x8d95,
+ 0x1e98: 0x8db5, 0x1e99: 0xda71, 0x1e9a: 0xda91, 0x1e9b: 0xdab1, 0x1e9c: 0xdad1, 0x1e9d: 0xdaf1,
+ 0x1e9e: 0xdb11, 0x1e9f: 0xdb31, 0x1ea0: 0xdb51, 0x1ea1: 0xdb71, 0x1ea2: 0xdb91, 0x1ea3: 0xdbb1,
+ 0x1ea4: 0x8dd5, 0x1ea5: 0xdbd1, 0x1ea6: 0xdbf1, 0x1ea7: 0xdc11, 0x1ea8: 0xdc31, 0x1ea9: 0xdc11,
+ 0x1eaa: 0xdc51, 0x1eab: 0xdc71, 0x1eac: 0xdc91, 0x1ead: 0xdcb1, 0x1eae: 0xdcd1, 0x1eaf: 0xdcf1,
+ 0x1eb0: 0xdd11, 0x1eb1: 0xdd31, 0x1eb2: 0xdd51, 0x1eb3: 0xdd71, 0x1eb4: 0xdd91, 0x1eb5: 0xddb1,
+ 0x1eb6: 0xddd1, 0x1eb7: 0xddf1, 0x1eb8: 0x8df5, 0x1eb9: 0xde11, 0x1eba: 0xde31, 0x1ebb: 0xde51,
+ 0x1ebc: 0xde71, 0x1ebd: 0xde91, 0x1ebe: 0x8e15, 0x1ebf: 0xdeb1,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe5b1, 0x1ec1: 0xe5d1, 0x1ec2: 0xe5f1, 0x1ec3: 0xe611, 0x1ec4: 0xe631, 0x1ec5: 0xe651,
+ 0x1ec6: 0x8f35, 0x1ec7: 0xe671, 0x1ec8: 0xe691, 0x1ec9: 0xe6b1, 0x1eca: 0xe6d1, 0x1ecb: 0xe6f1,
+ 0x1ecc: 0xe711, 0x1ecd: 0x8f55, 0x1ece: 0xe731, 0x1ecf: 0xe751, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95,
+ 0x1ed2: 0xe771, 0x1ed3: 0xe791, 0x1ed4: 0xe7b1, 0x1ed5: 0xe7d1, 0x1ed6: 0xe7f1, 0x1ed7: 0xe811,
+ 0x1ed8: 0xe831, 0x1ed9: 0xe851, 0x1eda: 0xe871, 0x1edb: 0x8fb5, 0x1edc: 0xe891, 0x1edd: 0x8fd5,
+ 0x1ede: 0xe8b1, 0x1edf: 0x2040, 0x1ee0: 0xe8d1, 0x1ee1: 0xe8f1, 0x1ee2: 0xe911, 0x1ee3: 0x8ff5,
+ 0x1ee4: 0xe931, 0x1ee5: 0xe951, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0xe971, 0x1ee9: 0xe991,
+ 0x1eea: 0xe9b1, 0x1eeb: 0xe9d1, 0x1eec: 0xe9f1, 0x1eed: 0xe9f1, 0x1eee: 0xea11, 0x1eef: 0xea31,
+ 0x1ef0: 0xea51, 0x1ef1: 0xea71, 0x1ef2: 0xea91, 0x1ef3: 0xeab1, 0x1ef4: 0xead1, 0x1ef5: 0x9055,
+ 0x1ef6: 0xeaf1, 0x1ef7: 0x9075, 0x1ef8: 0xeb11, 0x1ef9: 0x9095, 0x1efa: 0xeb31, 0x1efb: 0x90b5,
+ 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0xeb51, 0x1eff: 0xeb71,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb91, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0xebb1,
+ 0x1f06: 0xebd1, 0x1f07: 0xebd1, 0x1f08: 0xebf1, 0x1f09: 0xec11, 0x1f0a: 0xec31, 0x1f0b: 0xec51,
+ 0x1f0c: 0xec71, 0x1f0d: 0x9195, 0x1f0e: 0xec91, 0x1f0f: 0xecb1, 0x1f10: 0xecd1, 0x1f11: 0xecf1,
+ 0x1f12: 0x91b5, 0x1f13: 0xed11, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0xed31, 0x1f17: 0xed51,
+ 0x1f18: 0xed71, 0x1f19: 0xed91, 0x1f1a: 0xedb1, 0x1f1b: 0xedd1, 0x1f1c: 0x9215, 0x1f1d: 0x9235,
+ 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0xedf1, 0x1f21: 0x9275, 0x1f22: 0xee11, 0x1f23: 0xee31,
+ 0x1f24: 0xee51, 0x1f25: 0x9295, 0x1f26: 0xee71, 0x1f27: 0xee91, 0x1f28: 0xeeb1, 0x1f29: 0xeed1,
+ 0x1f2a: 0xeef1, 0x1f2b: 0x92b5, 0x1f2c: 0xef11, 0x1f2d: 0xef31, 0x1f2e: 0xef51, 0x1f2f: 0xef71,
+ 0x1f30: 0xef91, 0x1f31: 0xefb1, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0xefd1, 0x1f35: 0x9315,
+ 0x1f36: 0xeff1, 0x1f37: 0x9335, 0x1f38: 0xf011, 0x1f39: 0xf031, 0x1f3a: 0xf051, 0x1f3b: 0x9355,
+ 0x1f3c: 0x9375, 0x1f3d: 0xf071, 0x1f3e: 0x9395, 0x1f3f: 0xf091,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6d1, 0x1f41: 0xf6f1, 0x1f42: 0xf711, 0x1f43: 0xf731, 0x1f44: 0xf751, 0x1f45: 0x9555,
+ 0x1f46: 0xf771, 0x1f47: 0xf791, 0x1f48: 0xf7b1, 0x1f49: 0xf7d1, 0x1f4a: 0xf7f1, 0x1f4b: 0x9575,
+ 0x1f4c: 0x9595, 0x1f4d: 0xf811, 0x1f4e: 0xf831, 0x1f4f: 0xf851, 0x1f50: 0xf871, 0x1f51: 0xf891,
+ 0x1f52: 0xf8b1, 0x1f53: 0x95b5, 0x1f54: 0xf8d1, 0x1f55: 0xf8f1, 0x1f56: 0xf911, 0x1f57: 0xf931,
+ 0x1f58: 0x95d5, 0x1f59: 0x95f5, 0x1f5a: 0xf951, 0x1f5b: 0xf971, 0x1f5c: 0xf991, 0x1f5d: 0x9615,
+ 0x1f5e: 0xf9b1, 0x1f5f: 0xf9d1, 0x1f60: 0x684d, 0x1f61: 0x9635, 0x1f62: 0xf9f1, 0x1f63: 0xfa11,
+ 0x1f64: 0xfa31, 0x1f65: 0x9655, 0x1f66: 0xfa51, 0x1f67: 0xfa71, 0x1f68: 0xfa91, 0x1f69: 0xfab1,
+ 0x1f6a: 0xfad1, 0x1f6b: 0xfaf1, 0x1f6c: 0xfb11, 0x1f6d: 0x9675, 0x1f6e: 0xfb31, 0x1f6f: 0xfb51,
+ 0x1f70: 0xfb71, 0x1f71: 0x9695, 0x1f72: 0xfb91, 0x1f73: 0xfbb1, 0x1f74: 0xfbd1, 0x1f75: 0xfbf1,
+ 0x1f76: 0x7b6d, 0x1f77: 0x96b5, 0x1f78: 0xfc11, 0x1f79: 0xfc31, 0x1f7a: 0xfc51, 0x1f7b: 0x96d5,
+ 0x1f7c: 0xfc71, 0x1f7d: 0x96f5, 0x1f7e: 0xfc91, 0x1f7f: 0xfc91,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfcb1, 0x1f81: 0x9715, 0x1f82: 0xfcd1, 0x1f83: 0xfcf1, 0x1f84: 0xfd11, 0x1f85: 0xfd31,
+ 0x1f86: 0xfd51, 0x1f87: 0xfd71, 0x1f88: 0xfd91, 0x1f89: 0x9735, 0x1f8a: 0xfdb1, 0x1f8b: 0xfdd1,
+ 0x1f8c: 0xfdf1, 0x1f8d: 0xfe11, 0x1f8e: 0xfe31, 0x1f8f: 0xfe51, 0x1f90: 0x9755, 0x1f91: 0xfe71,
+ 0x1f92: 0x9775, 0x1f93: 0x9795, 0x1f94: 0x97b5, 0x1f95: 0xfe91, 0x1f96: 0xfeb1, 0x1f97: 0xfed1,
+ 0x1f98: 0xfef1, 0x1f99: 0xff11, 0x1f9a: 0xff31, 0x1f9b: 0xff51, 0x1f9c: 0xff71, 0x1f9d: 0x97d5,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0x9b,
+ 0x1b0: 0xd0, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4,
+ 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe0,
+ 0x1c8: 0xe1, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe2,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe5, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe6, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe7,
+ 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef,
+ 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf8, 0x31f: 0xf9,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd,
+ 0x3a8: 0x47, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x100, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x101, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9f, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d,
+ 0x3d0: 0x10e, 0x3d1: 0x9f, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xba, 0x3e6: 0x11a, 0x3e7: 0x11b,
+ 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x5b, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x121, 0x3f1: 0x122, 0x3f2: 0x123, 0x3f3: 0x124, 0x3f4: 0x125, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x127, 0x3fd: 0x128, 0x3fe: 0xba, 0x3ff: 0x129,
+ // Block 0x10, offset 0x400
+ 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131,
+ 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a,
+ 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0x143, 0x427: 0x144,
+ 0x428: 0x145, 0x429: 0x146, 0x42a: 0x147, 0x42b: 0x148, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x149, 0x431: 0x14a, 0x432: 0x14b, 0x433: 0xba, 0x434: 0x14c, 0x435: 0x14d, 0x436: 0x14e, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14f, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0x150,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x151, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x152, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x153, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x154, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x155, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x148, 0x529: 0x156, 0x52a: 0xba, 0x52b: 0x157, 0x52c: 0x158, 0x52d: 0x159, 0x52e: 0x15a, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0x15b, 0x53a: 0x15c, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15d, 0x53e: 0x15e, 0x53f: 0x15f,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x160,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x161, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x162, 0x585: 0x163, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x164, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x165, 0x5b2: 0x166, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x167, 0x5c4: 0x168, 0x5c5: 0x169, 0x5c6: 0x16a, 0x5c7: 0x16b,
+ 0x5c8: 0x9b, 0x5c9: 0x16c, 0x5ca: 0xba, 0x5cb: 0x16d, 0x5cc: 0x9b, 0x5cd: 0x16e, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x16f, 0x5e9: 0x170, 0x5ea: 0x171, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x172, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0x173, 0x605: 0x174, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0x175, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x121, 0x621: 0x121, 0x622: 0x121, 0x623: 0x176, 0x624: 0x6f, 0x625: 0x177, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0x178, 0x632: 0x179, 0x633: 0xba, 0x634: 0x17a, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x17b, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x17c, 0x641: 0x9b, 0x642: 0x17d, 0x643: 0x17e, 0x644: 0x73, 0x645: 0x74, 0x646: 0x17f, 0x647: 0x180,
+ 0x648: 0x75, 0x649: 0x181, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x182, 0x65c: 0x9b, 0x65d: 0x183, 0x65e: 0x9b, 0x65f: 0x184,
+ 0x660: 0x185, 0x661: 0x186, 0x662: 0x187, 0x663: 0xba, 0x664: 0x188, 0x665: 0x189, 0x666: 0x18a, 0x667: 0x18b,
+ 0x668: 0x9b, 0x669: 0x18c, 0x66a: 0x18d, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x18e, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x18f, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x190, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x191, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x192,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x193, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x194, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x195, 0x841: 0x196, 0x842: 0xba, 0x843: 0xba, 0x844: 0x197, 0x845: 0x197, 0x846: 0x197, 0x847: 0x198,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 284 entries, 568 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x26c, 0x27d, 0x281, 0x28c, 0x290, 0x299, 0x2a1, 0x2a7, 0x2ac, 0x2af, 0x2b3, 0x2b9, 0x2bd, 0x2c1, 0x2c5, 0x2cb, 0x2d3, 0x2da, 0x2e5, 0x2ef, 0x2f3, 0x2f6, 0x2fc, 0x300, 0x302, 0x305, 0x307, 0x30a, 0x314, 0x317, 0x326, 0x32a, 0x32f, 0x332, 0x336, 0x33b, 0x340, 0x346, 0x352, 0x361, 0x367, 0x36b, 0x37a, 0x37f, 0x387, 0x391, 0x39c, 0x3a4, 0x3b5, 0x3be, 0x3ce, 0x3db, 0x3e5, 0x3ea, 0x3f7, 0x3fb, 0x400, 0x402, 0x406, 0x408, 0x40c, 0x415, 0x41b, 0x41f, 0x42f, 0x439, 0x43e, 0x441, 0x447, 0x44e, 0x453, 0x457, 0x45d, 0x462, 0x46b, 0x470, 0x476, 0x47d, 0x484, 0x48b, 0x48f, 0x494, 0x497, 0x49c, 0x4a8, 0x4ae, 0x4b3, 0x4ba, 0x4c2, 0x4c7, 0x4cb, 0x4db, 0x4e2, 0x4e6, 0x4ea, 0x4f1, 0x4f3, 0x4f6, 0x4f9, 0x4fd, 0x506, 0x50a, 0x512, 0x51a, 0x51e, 0x524, 0x52d, 0x539, 0x540, 0x549, 0x553, 0x55a, 0x568, 0x575, 0x582, 0x58b, 0x58f, 0x59f, 0x5a7, 0x5b2, 0x5bb, 0x5c1, 0x5c9, 0x5d2, 0x5dd, 0x5e0, 0x5ec, 0x5f5, 0x5f8, 0x5fd, 0x602, 0x60f, 0x61a, 0x623, 0x62d, 0x630, 0x63a, 0x643, 0x64f, 0x65c, 0x669, 0x677, 0x67e, 0x682, 0x685, 0x68a, 0x68d, 0x692, 0x695, 0x69c, 0x6a3, 0x6a7, 0x6b2, 0x6b5, 0x6b8, 0x6bb, 0x6c1, 0x6c7, 0x6cd, 0x6d0, 0x6d3, 0x6d6, 0x6dd, 0x6e0, 0x6e5, 0x6ef, 0x6f2, 0x6f6, 0x705, 0x711, 0x715, 0x71a, 0x71e, 0x723, 0x727, 0x72c, 0x735, 0x740, 0x746, 0x74c, 0x752, 0x758, 0x761, 0x764, 0x767, 0x76b, 0x76f, 0x773, 0x779, 0x77f, 0x784, 0x787, 0x797, 0x79e, 0x7a1, 0x7a6, 0x7aa, 0x7b0, 0x7b5, 0x7b9, 0x7bf, 0x7c5, 0x7c9, 0x7d2, 0x7d7, 0x7da, 0x7dd, 0x7e1, 0x7e5, 0x7e8, 0x7f8, 0x809, 0x80e, 0x810, 0x812}
+
+// idnaSparseValues: 2069 entries, 8276 bytes
+var idnaSparseValues = [2069]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x86
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x94
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb2
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbe
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xca
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xdb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x10a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x111
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12b
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x143
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x145
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x14a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x150
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x169
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x171
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x177
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x182
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x187
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x194
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x199
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x34, offset 0x1d3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1de
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1eb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fe
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x206
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x222
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x224
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x246
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x252
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x25a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25f
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x059d, lo: 0x90, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x059d, lo: 0xbd, hi: 0xbf},
+ // Block 0x45, offset 0x26c
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x46, offset 0x27d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x281
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x28c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x290
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x299
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x2a1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09dd, lo: 0xa9, hi: 0xa9},
+ {value: 0x09fd, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2ac
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2af
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2b3
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e7e, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e9e, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2bd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2c1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xbf},
+ // Block 0x53, offset 0x2c5
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ebd, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x54, offset 0x2cb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2d3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x56, offset 0x2da
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2e5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x58, offset 0x2ef
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x59, offset 0x2f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0x5a, offset 0x2f6
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0ef5, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5b, offset 0x2fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0f15, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5c, offset 0x300
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f35, lo: 0x80, hi: 0xbf},
+ // Block 0x5d, offset 0x302
+ {value: 0x0020, lo: 0x02},
+ {value: 0x1735, lo: 0x80, hi: 0x8f},
+ {value: 0x1915, lo: 0x90, hi: 0xbf},
+ // Block 0x5e, offset 0x305
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1f15, lo: 0x80, hi: 0xbf},
+ // Block 0x5f, offset 0x307
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x60, offset 0x30a
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x61, offset 0x314
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x62, offset 0x317
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a35, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a55, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a75, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a95, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a75, lo: 0xb5, hi: 0xb5},
+ {value: 0x2ab5, lo: 0xb6, hi: 0xb6},
+ {value: 0x2ad5, lo: 0xb7, hi: 0xb7},
+ {value: 0x2af5, lo: 0xb8, hi: 0xb9},
+ {value: 0x2b15, lo: 0xba, hi: 0xbb},
+ {value: 0x2b35, lo: 0xbc, hi: 0xbd},
+ {value: 0x2b15, lo: 0xbe, hi: 0xbf},
+ // Block 0x63, offset 0x326
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x64, offset 0x32a
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x65, offset 0x32f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x66, offset 0x332
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x67, offset 0x336
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x68, offset 0x33b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x69, offset 0x340
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6a, offset 0x346
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0xe00d, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x83},
+ {value: 0x03f5, lo: 0x84, hi: 0x84},
+ {value: 0x1329, lo: 0x85, hi: 0x85},
+ {value: 0x447d, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6b, offset 0x352
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x361
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6d, offset 0x367
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6e, offset 0x36b
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x6f, offset 0x37a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x70, offset 0x37f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x71, offset 0x387
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x72, offset 0x391
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x73, offset 0x39c
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x74, offset 0x3a4
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x75, offset 0x3b5
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3be
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x77, offset 0x3ce
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x78, offset 0x3db
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x449d, lo: 0x9c, hi: 0x9c},
+ {value: 0x44b5, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x44cd, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3e5
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44ed, lo: 0x80, hi: 0x8f},
+ {value: 0x450d, lo: 0x90, hi: 0x9f},
+ {value: 0x452d, lo: 0xa0, hi: 0xaf},
+ {value: 0x450d, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3ea
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7b, offset 0x3f7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7c, offset 0x3fb
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7d, offset 0x400
+ {value: 0x0020, lo: 0x01},
+ {value: 0x454d, lo: 0x80, hi: 0xbf},
+ // Block 0x7e, offset 0x402
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d4d, lo: 0x80, hi: 0x94},
+ {value: 0x4b0d, lo: 0x95, hi: 0x95},
+ {value: 0x4fed, lo: 0x96, hi: 0xbf},
+ // Block 0x7f, offset 0x406
+ {value: 0x0020, lo: 0x01},
+ {value: 0x552d, lo: 0x80, hi: 0xbf},
+ // Block 0x80, offset 0x408
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5d2d, lo: 0x80, hi: 0x84},
+ {value: 0x568d, lo: 0x85, hi: 0x85},
+ {value: 0x5dcd, lo: 0x86, hi: 0xbf},
+ // Block 0x81, offset 0x40c
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b8d, lo: 0x80, hi: 0x8f},
+ {value: 0x6d4d, lo: 0x90, hi: 0x90},
+ {value: 0x6d8d, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70ed, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x710d, lo: 0xb0, hi: 0xbf},
+ // Block 0x82, offset 0x415
+ {value: 0x0020, lo: 0x05},
+ {value: 0x730d, lo: 0x80, hi: 0xad},
+ {value: 0x656d, lo: 0xae, hi: 0xae},
+ {value: 0x78cd, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f8d, lo: 0xb6, hi: 0xb6},
+ {value: 0x79ad, lo: 0xb7, hi: 0xbf},
+ // Block 0x83, offset 0x41b
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x84, offset 0x41f
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x85, offset 0x42f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x86, offset 0x439
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x87, offset 0x43e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x88, offset 0x441
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x89, offset 0x447
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8a, offset 0x44e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8b, offset 0x453
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8c, offset 0x457
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8d, offset 0x45d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8e, offset 0x462
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8f, offset 0x46b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x90, offset 0x470
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x91, offset 0x476
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8b0d, lo: 0x98, hi: 0x9f},
+ {value: 0x8b25, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x92, offset 0x47d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8b25, lo: 0xb0, hi: 0xb7},
+ {value: 0x8b0d, lo: 0xb8, hi: 0xbf},
+ // Block 0x93, offset 0x484
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x94, offset 0x48b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x95, offset 0x48f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x494
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x97, offset 0x497
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x98, offset 0x49c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x99, offset 0x4a8
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9a, offset 0x4ae
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9b, offset 0x4b3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9c, offset 0x4ba
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9d, offset 0x4c2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9e, offset 0x4c7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0x9f, offset 0x4cb
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa0, offset 0x4db
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa1, offset 0x4e2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa2, offset 0x4e6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa3, offset 0x4ea
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa4, offset 0x4f1
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa5, offset 0x4f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa6, offset 0x4f6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa7, offset 0x4f9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa8, offset 0x4fd
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x506
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xaa, offset 0x50a
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xab, offset 0x512
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xac, offset 0x51a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xad, offset 0x51e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xae, offset 0x524
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xaf, offset 0x52d
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb0, offset 0x539
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb1, offset 0x540
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb2, offset 0x549
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb3, offset 0x553
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb4, offset 0x55a
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb5, offset 0x568
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb6, offset 0x575
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb7, offset 0x582
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb8, offset 0x58b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb9, offset 0x58f
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xba, offset 0x59f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xbb, offset 0x5a7
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbc, offset 0x5b2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbd, offset 0x5bb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbe, offset 0x5c1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbf, offset 0x5c9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc0, offset 0x5d2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc1, offset 0x5dd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc2, offset 0x5e0
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc3, offset 0x5ec
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc4, offset 0x5f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc5, offset 0x5f8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5fd
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xc7, offset 0x602
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x99},
+ {value: 0x3308, lo: 0x9a, hi: 0x9b},
+ {value: 0x3008, lo: 0x9c, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xbf},
+ // Block 0xc8, offset 0x60f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc9, offset 0x61a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xca, offset 0x623
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xcb, offset 0x62d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xcc, offset 0x630
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xcd, offset 0x63a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xce, offset 0x643
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xcf, offset 0x64f
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xd0, offset 0x65c
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xd1, offset 0x669
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd2, offset 0x677
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd3, offset 0x67e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xd4, offset 0x682
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xd5, offset 0x685
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xd6, offset 0x68a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xd7, offset 0x68d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0340, lo: 0xb0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd8, offset 0x692
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd9, offset 0x695
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xda, offset 0x69c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xdb, offset 0x6a3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xdc, offset 0x6a7
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xdd, offset 0x6b2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xde, offset 0x6b5
+ {value: 0x0000, lo: 0x02},
+ {value: 0xe105, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xdf, offset 0x6b8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xe0, offset 0x6bb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbf},
+ // Block 0xe1, offset 0x6c1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xe2, offset 0x6c7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0018, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xe3, offset 0x6cd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xe4, offset 0x6d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xe5, offset 0x6d3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe6, offset 0x6d6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0xa3},
+ {value: 0x0008, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xe7, offset 0x6dd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xe8, offset 0x6e0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xe9, offset 0x6e5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xea, offset 0x6ef
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xeb, offset 0x6f2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xec, offset 0x6f6
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xed, offset 0x705
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xee, offset 0x711
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xef, offset 0x715
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xf0, offset 0x71a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf1, offset 0x71e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xf2, offset 0x723
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xf3, offset 0x727
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xf4, offset 0x72c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xf5, offset 0x735
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xf6, offset 0x740
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xf7, offset 0x746
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xf8, offset 0x74c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xf9, offset 0x752
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xfa, offset 0x758
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0b08, lo: 0x8b, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xfb, offset 0x761
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0xfc, offset 0x764
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xfd, offset 0x767
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0818, lo: 0x81, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xfe, offset 0x76b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xff, offset 0x76f
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x100, offset 0x773
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x101, offset 0x779
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x102, offset 0x77f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1d9, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0x103, offset 0x784
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0x104, offset 0x787
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc801, lo: 0x80, hi: 0x80},
+ {value: 0xc851, lo: 0x81, hi: 0x81},
+ {value: 0xc8a1, lo: 0x82, hi: 0x82},
+ {value: 0xc8f1, lo: 0x83, hi: 0x83},
+ {value: 0xc941, lo: 0x84, hi: 0x84},
+ {value: 0xc991, lo: 0x85, hi: 0x85},
+ {value: 0xc9e1, lo: 0x86, hi: 0x86},
+ {value: 0xca31, lo: 0x87, hi: 0x87},
+ {value: 0xca81, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcad1, lo: 0x90, hi: 0x90},
+ {value: 0xcaf1, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0x105, offset 0x797
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x106, offset 0x79e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x107, offset 0x7a1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x108, offset 0x7a6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x109, offset 0x7aa
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x10a, offset 0x7b0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x10b, offset 0x7b5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x10c, offset 0x7b9
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0x10d, offset 0x7bf
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xa4},
+ {value: 0x0018, lo: 0xa5, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xbf},
+ // Block 0x10e, offset 0x7c5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x10f, offset 0x7c9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x110, offset 0x7d2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x111, offset 0x7d7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x112, offset 0x7da
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x113, offset 0x7dd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x114, offset 0x7e1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x115, offset 0x7e5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x116, offset 0x7e8
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xded1, lo: 0x80, hi: 0x89},
+ {value: 0x8e35, lo: 0x8a, hi: 0x8a},
+ {value: 0xe011, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e55, lo: 0x9d, hi: 0x9d},
+ {value: 0xe251, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e75, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2f1, lo: 0xa4, hi: 0xab},
+ {value: 0x7f0d, lo: 0xac, hi: 0xac},
+ {value: 0xe3f1, lo: 0xad, hi: 0xaf},
+ {value: 0x8e95, lo: 0xb0, hi: 0xb0},
+ {value: 0xe451, lo: 0xb1, hi: 0xb6},
+ {value: 0x8eb5, lo: 0xb7, hi: 0xb9},
+ {value: 0xe511, lo: 0xba, hi: 0xba},
+ {value: 0x8f15, lo: 0xbb, hi: 0xbb},
+ {value: 0xe531, lo: 0xbc, hi: 0xbf},
+ // Block 0x117, offset 0x7f8
+ {value: 0x0020, lo: 0x10},
+ {value: 0x93b5, lo: 0x80, hi: 0x80},
+ {value: 0xf0b1, lo: 0x81, hi: 0x86},
+ {value: 0x93d5, lo: 0x87, hi: 0x8a},
+ {value: 0xda11, lo: 0x8b, hi: 0x8b},
+ {value: 0xf171, lo: 0x8c, hi: 0x96},
+ {value: 0x9455, lo: 0x97, hi: 0x97},
+ {value: 0xf2d1, lo: 0x98, hi: 0xa3},
+ {value: 0x9475, lo: 0xa4, hi: 0xa6},
+ {value: 0xf451, lo: 0xa7, hi: 0xaa},
+ {value: 0x94d5, lo: 0xab, hi: 0xab},
+ {value: 0xf4d1, lo: 0xac, hi: 0xac},
+ {value: 0x94f5, lo: 0xad, hi: 0xad},
+ {value: 0xf4f1, lo: 0xae, hi: 0xaf},
+ {value: 0x9515, lo: 0xb0, hi: 0xb1},
+ {value: 0xf531, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x118, offset 0x809
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x119, offset 0x80e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x11a, offset 0x810
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x11b, offset 0x812
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42780 bytes (41KiB); checksum: 29936AB9
diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go
index e409d76f..1adb6073 100644
--- a/vendor/golang.org/x/sys/windows/memory_windows.go
+++ b/vendor/golang.org/x/sys/windows/memory_windows.go
@@ -16,13 +16,19 @@ const (
MEM_RESET_UNDO = 0x01000000
MEM_LARGE_PAGES = 0x20000000
- PAGE_NOACCESS = 0x01
- PAGE_READONLY = 0x02
- PAGE_READWRITE = 0x04
- PAGE_WRITECOPY = 0x08
- PAGE_EXECUTE_READ = 0x20
- PAGE_EXECUTE_READWRITE = 0x40
- PAGE_EXECUTE_WRITECOPY = 0x80
+ PAGE_NOACCESS = 0x00000001
+ PAGE_READONLY = 0x00000002
+ PAGE_READWRITE = 0x00000004
+ PAGE_WRITECOPY = 0x00000008
+ PAGE_EXECUTE = 0x00000010
+ PAGE_EXECUTE_READ = 0x00000020
+ PAGE_EXECUTE_READWRITE = 0x00000040
+ PAGE_EXECUTE_WRITECOPY = 0x00000080
+ PAGE_GUARD = 0x00000100
+ PAGE_NOCACHE = 0x00000200
+ PAGE_WRITECOMBINE = 0x00000400
+ PAGE_TARGETS_INVALID = 0x40000000
+ PAGE_TARGETS_NO_UPDATE = 0x40000000
QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002
QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 88cff4e0..598e8ce5 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -259,6 +259,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore
//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore
//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore
+//sys CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore
//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain
//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain
//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index a1c801d1..5d0a54e6 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -138,6 +138,7 @@ var (
procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore")
procCertCloseStore = modcrypt32.NewProc("CertCloseStore")
procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext")
+ procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore")
procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore")
procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain")
procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext")
@@ -1125,6 +1126,14 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en
return
}
+func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
+ r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) {
r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0)
context = (*CertContext)(unsafe.Pointer(r0))
diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go
index 520b9ada..48ec64b4 100644
--- a/vendor/golang.org/x/text/transform/transform.go
+++ b/vendor/golang.org/x/text/transform/transform.go
@@ -648,7 +648,8 @@ func String(t Transformer, s string) (result string, n int, err error) {
// Transform the remaining input, growing dst and src buffers as necessary.
for {
n := copy(src, s[pSrc:])
- nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
+ atEOF := pSrc+n == len(s)
+ nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF)
pDst += nDst
pSrc += nSrc
@@ -659,6 +660,9 @@ func String(t Transformer, s string) (result string, n int, err error) {
dst = grow(dst, pDst)
}
} else if err == ErrShortSrc {
+ if atEOF {
+ return string(dst[:pDst]), pSrc, err
+ }
if nSrc == 0 {
src = grow(src, 0)
}
diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go
index 48d14400..50deb660 100644
--- a/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -480,15 +480,15 @@ func (s *isolatingRunSequence) resolveWeakTypes() {
// Rule W1.
// Changes all NSMs.
- preceedingCharacterType := s.sos
+ precedingCharacterType := s.sos
for i, t := range s.types {
if t == NSM {
- s.types[i] = preceedingCharacterType
+ s.types[i] = precedingCharacterType
} else {
if t.in(LRI, RLI, FSI, PDI) {
- preceedingCharacterType = ON
+ precedingCharacterType = ON
}
- preceedingCharacterType = t
+ precedingCharacterType = t
}
}
diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
index 022e3c69..16b11db5 100644
--- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
+++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go
@@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-// +build go1.13
+// +build go1.13,!go1.14
package bidi
diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
new file mode 100644
index 00000000..7ffa3651
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go
@@ -0,0 +1,1923 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.14
+
+package bidi
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "12.0.0"
+
+// xorMasks contains masks to be xor-ed with brackets to get the reverse
+// version.
+var xorMasks = []int32{ // 8 elements
+ 0, 1, 6, 7, 3, 15, 29, 63,
+} // Size: 56 bytes
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return bidiValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = bidiIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *bidiTrie) lookupUnsafe(s []byte) uint8 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return bidiValues[c0]
+ }
+ i := bidiIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *bidiTrie) lookupString(s string) (v uint8, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return bidiValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := bidiIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = bidiIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = bidiIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *bidiTrie) lookupStringUnsafe(s string) uint8 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return bidiValues[c0]
+ }
+ i := bidiIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = bidiIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// bidiTrie. Total size: 16896 bytes (16.50 KiB). Checksum: 6f0927067913dc6d.
+type bidiTrie struct{}
+
+func newBidiTrie(i int) *bidiTrie {
+ return &bidiTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 {
+ switch {
+ default:
+ return uint8(bidiValues[n<<6+uint32(b)])
+ }
+}
+
+// bidiValues: 240 blocks, 15360 entries, 15360 bytes
+// The third block is the zero block.
+var bidiValues = [15360]uint8{
+ // Block 0x0, offset 0x0
+ 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b,
+ 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008,
+ 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b,
+ 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b,
+ 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007,
+ 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004,
+ 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a,
+ 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006,
+ 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002,
+ 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a,
+ 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a,
+ // Block 0x1, offset 0x40
+ 0x40: 0x000a,
+ 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a,
+ 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a,
+ 0x7b: 0x005a,
+ 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007,
+ 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b,
+ 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b,
+ 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b,
+ 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b,
+ 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004,
+ 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a,
+ 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a,
+ 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a,
+ 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a,
+ 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a,
+ // Block 0x4, offset 0x100
+ 0x117: 0x000a,
+ 0x137: 0x000a,
+ // Block 0x5, offset 0x140
+ 0x179: 0x000a, 0x17a: 0x000a,
+ // Block 0x6, offset 0x180
+ 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a,
+ 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a,
+ 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a,
+ 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a,
+ 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a,
+ 0x19e: 0x000a, 0x19f: 0x000a,
+ 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a,
+ 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a,
+ 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a,
+ 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a,
+ 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c,
+ 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c,
+ 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c,
+ 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c,
+ 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c,
+ 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c,
+ 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c,
+ 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c,
+ 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c,
+ 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c,
+ 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c,
+ // Block 0x8, offset 0x200
+ 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c,
+ 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c,
+ 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c,
+ 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c,
+ 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c,
+ 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c,
+ 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c,
+ 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c,
+ 0x234: 0x000a, 0x235: 0x000a,
+ 0x23e: 0x000a,
+ // Block 0x9, offset 0x240
+ 0x244: 0x000a, 0x245: 0x000a,
+ 0x247: 0x000a,
+ // Block 0xa, offset 0x280
+ 0x2b6: 0x000a,
+ // Block 0xb, offset 0x2c0
+ 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c,
+ 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c,
+ // Block 0xc, offset 0x300
+ 0x30a: 0x000a,
+ 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c,
+ 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c,
+ 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c,
+ 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c,
+ 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c,
+ 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c,
+ 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c,
+ 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c,
+ 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c,
+ 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001,
+ 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001,
+ 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001,
+ 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001,
+ 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001,
+ 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001,
+ 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001,
+ 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001,
+ 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001,
+ 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001,
+ // Block 0xe, offset 0x380
+ 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005,
+ 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d,
+ 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c,
+ 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c,
+ 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d,
+ 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d,
+ 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d,
+ 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d,
+ 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d,
+ 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d,
+ 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d,
+ 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c,
+ 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c,
+ 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c,
+ 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c,
+ 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005,
+ 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005,
+ 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d,
+ 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d,
+ 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d,
+ 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d,
+ // Block 0x10, offset 0x400
+ 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d,
+ 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d,
+ 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d,
+ 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d,
+ 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d,
+ 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d,
+ 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d,
+ 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d,
+ 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d,
+ 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d,
+ 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d,
+ // Block 0x11, offset 0x440
+ 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d,
+ 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d,
+ 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d,
+ 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c,
+ 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005,
+ 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c,
+ 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a,
+ 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d,
+ 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002,
+ 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d,
+ 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d,
+ // Block 0x12, offset 0x480
+ 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d,
+ 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d,
+ 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c,
+ 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d,
+ 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d,
+ 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d,
+ 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d,
+ 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d,
+ 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c,
+ 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c,
+ 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c,
+ 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d,
+ 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d,
+ 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d,
+ 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d,
+ 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d,
+ 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d,
+ 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d,
+ 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d,
+ 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d,
+ 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d,
+ // Block 0x14, offset 0x500
+ 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d,
+ 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d,
+ 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d,
+ 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d,
+ 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d,
+ 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d,
+ 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c,
+ 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c,
+ 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d,
+ 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d,
+ 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001,
+ 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001,
+ 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001,
+ 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001,
+ 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001,
+ 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001,
+ 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001,
+ 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c,
+ 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001,
+ 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001,
+ 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001,
+ // Block 0x16, offset 0x580
+ 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001,
+ 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001,
+ 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001,
+ 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c,
+ 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c,
+ 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c,
+ 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c,
+ 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001,
+ 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001,
+ 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001,
+ 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001,
+ 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001,
+ 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001,
+ 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001,
+ 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001,
+ 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d,
+ 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d,
+ 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d,
+ 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001,
+ 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001,
+ 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001,
+ // Block 0x18, offset 0x600
+ 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001,
+ 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001,
+ 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001,
+ 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001,
+ 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001,
+ 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d,
+ 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d,
+ 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d,
+ 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d,
+ 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d,
+ 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d,
+ // Block 0x19, offset 0x640
+ 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d,
+ 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d,
+ 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d,
+ 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c,
+ 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c,
+ 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c,
+ 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c,
+ 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c,
+ 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c,
+ 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c,
+ 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c,
+ 0x6ba: 0x000c,
+ 0x6bc: 0x000c,
+ // Block 0x1b, offset 0x6c0
+ 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c,
+ 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c,
+ 0x6cd: 0x000c, 0x6d1: 0x000c,
+ 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c,
+ 0x6e2: 0x000c, 0x6e3: 0x000c,
+ // Block 0x1c, offset 0x700
+ 0x701: 0x000c,
+ 0x73c: 0x000c,
+ // Block 0x1d, offset 0x740
+ 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c,
+ 0x74d: 0x000c,
+ 0x762: 0x000c, 0x763: 0x000c,
+ 0x772: 0x0004, 0x773: 0x0004,
+ 0x77b: 0x0004,
+ 0x77e: 0x000c,
+ // Block 0x1e, offset 0x780
+ 0x781: 0x000c, 0x782: 0x000c,
+ 0x7bc: 0x000c,
+ // Block 0x1f, offset 0x7c0
+ 0x7c1: 0x000c, 0x7c2: 0x000c,
+ 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c,
+ 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c,
+ 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c,
+ // Block 0x20, offset 0x800
+ 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c,
+ 0x807: 0x000c, 0x808: 0x000c,
+ 0x80d: 0x000c,
+ 0x822: 0x000c, 0x823: 0x000c,
+ 0x831: 0x0004,
+ 0x83a: 0x000c, 0x83b: 0x000c,
+ 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c,
+ // Block 0x21, offset 0x840
+ 0x841: 0x000c,
+ 0x87c: 0x000c, 0x87f: 0x000c,
+ // Block 0x22, offset 0x880
+ 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c,
+ 0x88d: 0x000c,
+ 0x896: 0x000c,
+ 0x8a2: 0x000c, 0x8a3: 0x000c,
+ // Block 0x23, offset 0x8c0
+ 0x8c2: 0x000c,
+ // Block 0x24, offset 0x900
+ 0x900: 0x000c,
+ 0x90d: 0x000c,
+ 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a,
+ 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a,
+ // Block 0x25, offset 0x940
+ 0x940: 0x000c, 0x944: 0x000c,
+ 0x97e: 0x000c, 0x97f: 0x000c,
+ // Block 0x26, offset 0x980
+ 0x980: 0x000c,
+ 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c,
+ 0x98c: 0x000c, 0x98d: 0x000c,
+ 0x995: 0x000c, 0x996: 0x000c,
+ 0x9a2: 0x000c, 0x9a3: 0x000c,
+ 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a,
+ 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a,
+ // Block 0x27, offset 0x9c0
+ 0x9cc: 0x000c, 0x9cd: 0x000c,
+ 0x9e2: 0x000c, 0x9e3: 0x000c,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x000c, 0xa01: 0x000c,
+ 0xa3b: 0x000c,
+ 0xa3c: 0x000c,
+ // Block 0x29, offset 0xa40
+ 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c,
+ 0xa4d: 0x000c,
+ 0xa62: 0x000c, 0xa63: 0x000c,
+ // Block 0x2a, offset 0xa80
+ 0xa8a: 0x000c,
+ 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c,
+ // Block 0x2b, offset 0xac0
+ 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c,
+ 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c,
+ 0xaff: 0x0004,
+ // Block 0x2c, offset 0xb00
+ 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c,
+ 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c,
+ // Block 0x2d, offset 0xb40
+ 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c,
+ 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7a: 0x000c, 0xb7b: 0x000c,
+ 0xb7c: 0x000c,
+ // Block 0x2e, offset 0xb80
+ 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c,
+ 0xb8c: 0x000c, 0xb8d: 0x000c,
+ // Block 0x2f, offset 0xbc0
+ 0xbd8: 0x000c, 0xbd9: 0x000c,
+ 0xbf5: 0x000c,
+ 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a,
+ 0xbfc: 0x003a, 0xbfd: 0x002a,
+ // Block 0x30, offset 0xc00
+ 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c,
+ 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c,
+ 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c,
+ 0xc46: 0x000c, 0xc47: 0x000c,
+ 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c,
+ 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c,
+ 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c,
+ 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c,
+ 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c,
+ 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c,
+ 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c,
+ 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c,
+ 0xc7c: 0x000c,
+ // Block 0x32, offset 0xc80
+ 0xc86: 0x000c,
+ // Block 0x33, offset 0xcc0
+ 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c,
+ 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c,
+ 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c,
+ 0xcfd: 0x000c, 0xcfe: 0x000c,
+ // Block 0x34, offset 0xd00
+ 0xd18: 0x000c, 0xd19: 0x000c,
+ 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c,
+ 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c,
+ // Block 0x35, offset 0xd40
+ 0xd42: 0x000c, 0xd45: 0x000c,
+ 0xd46: 0x000c,
+ 0xd4d: 0x000c,
+ 0xd5d: 0x000c,
+ // Block 0x36, offset 0xd80
+ 0xd9d: 0x000c,
+ 0xd9e: 0x000c, 0xd9f: 0x000c,
+ // Block 0x37, offset 0xdc0
+ 0xdd0: 0x000a, 0xdd1: 0x000a,
+ 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a,
+ 0xdd8: 0x000a, 0xdd9: 0x000a,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x000a,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x0009,
+ 0xe5b: 0x007a, 0xe5c: 0x006a,
+ // Block 0x3a, offset 0xe80
+ 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c,
+ 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c,
+ // Block 0x3b, offset 0xec0
+ 0xed2: 0x000c, 0xed3: 0x000c,
+ 0xef2: 0x000c, 0xef3: 0x000c,
+ // Block 0x3c, offset 0xf00
+ 0xf34: 0x000c, 0xf35: 0x000c,
+ 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c,
+ 0xf3c: 0x000c, 0xf3d: 0x000c,
+ // Block 0x3d, offset 0xf40
+ 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c,
+ 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c,
+ 0xf52: 0x000c, 0xf53: 0x000c,
+ 0xf5b: 0x0004, 0xf5d: 0x000c,
+ 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a,
+ 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a,
+ 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c,
+ 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b,
+ // Block 0x3f, offset 0xfc0
+ 0xfc5: 0x000c,
+ 0xfc6: 0x000c,
+ 0xfe9: 0x000c,
+ // Block 0x40, offset 0x1000
+ 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c,
+ 0x1027: 0x000c, 0x1028: 0x000c,
+ 0x1032: 0x000c,
+ 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a,
+ // Block 0x42, offset 0x1080
+ 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a,
+ 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a,
+ 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a,
+ 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a,
+ 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a,
+ 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a,
+ // Block 0x43, offset 0x10c0
+ 0x10d7: 0x000c,
+ 0x10d8: 0x000c, 0x10db: 0x000c,
+ // Block 0x44, offset 0x1100
+ 0x1116: 0x000c,
+ 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c,
+ 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c,
+ 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c,
+ 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c,
+ 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c,
+ 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c,
+ 0x113c: 0x000c, 0x113f: 0x000c,
+ // Block 0x45, offset 0x1140
+ 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c,
+ 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c,
+ 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c,
+ 0x11b4: 0x000c,
+ 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c,
+ 0x11bc: 0x000c,
+ // Block 0x47, offset 0x11c0
+ 0x11c2: 0x000c,
+ 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c,
+ 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x000c, 0x1201: 0x000c,
+ 0x1222: 0x000c, 0x1223: 0x000c,
+ 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c,
+ 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c,
+ // Block 0x49, offset 0x1240
+ 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c,
+ 0x126d: 0x000c, 0x126f: 0x000c,
+ 0x1270: 0x000c, 0x1271: 0x000c,
+ // Block 0x4a, offset 0x1280
+ 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c,
+ 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c,
+ 0x12b6: 0x000c, 0x12b7: 0x000c,
+ // Block 0x4b, offset 0x12c0
+ 0x12d0: 0x000c, 0x12d1: 0x000c,
+ 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c,
+ 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c,
+ 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c,
+ 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c,
+ 0x12ed: 0x000c,
+ 0x12f4: 0x000c,
+ 0x12f8: 0x000c, 0x12f9: 0x000c,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c,
+ 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c,
+ 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c,
+ 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c,
+ 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c,
+ 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c,
+ 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c,
+ 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c,
+ 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c,
+ 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c,
+ 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c,
+ // Block 0x4d, offset 0x1340
+ 0x137d: 0x000a, 0x137f: 0x000a,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x000a, 0x1381: 0x000a,
+ 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a,
+ 0x139d: 0x000a,
+ 0x139e: 0x000a, 0x139f: 0x000a,
+ 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a,
+ 0x13bd: 0x000a, 0x13be: 0x000a,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009,
+ 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b,
+ 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a,
+ 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a,
+ 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a,
+ 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a,
+ 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007,
+ 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006,
+ 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a,
+ 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a,
+ 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a,
+ 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a,
+ 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a,
+ 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a,
+ 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a,
+ 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b,
+ 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e,
+ 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b,
+ 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002,
+ 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003,
+ 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002,
+ 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003,
+ 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a,
+ 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004,
+ 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004,
+ 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004,
+ 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004,
+ 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004,
+ 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004,
+ 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004,
+ 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c,
+ 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c,
+ 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c,
+ 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c,
+ 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c,
+ 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c,
+ 0x14b0: 0x000c,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a,
+ 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a,
+ 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a,
+ 0x14d8: 0x000a,
+ 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a,
+ 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a,
+ 0x14ee: 0x0004,
+ 0x14fa: 0x000a, 0x14fb: 0x000a,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a,
+ 0x150a: 0x000a, 0x150b: 0x000a,
+ 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a,
+ 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a,
+ 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a,
+ 0x151e: 0x000a, 0x151f: 0x000a,
+ // Block 0x55, offset 0x1540
+ 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a,
+ 0x1550: 0x000a, 0x1551: 0x000a,
+ 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a,
+ 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a,
+ 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a,
+ 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a,
+ 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a,
+ 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a,
+ 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a,
+ 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a,
+ 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a,
+ 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a,
+ 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a,
+ 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a,
+ 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a,
+ 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a,
+ 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a,
+ 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a,
+ 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a,
+ 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a,
+ 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a,
+ 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a,
+ 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a,
+ 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a,
+ 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a,
+ 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a,
+ 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a,
+ 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a,
+ 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a,
+ 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a,
+ 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a,
+ 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a,
+ 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a,
+ 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a,
+ 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a,
+ 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a,
+ 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a,
+ 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a,
+ // Block 0x59, offset 0x1640
+ 0x167b: 0x000a,
+ 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a,
+ 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a,
+ 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a,
+ 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a,
+ 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a,
+ 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a,
+ 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a,
+ 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a,
+ 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a,
+ 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a,
+ 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a,
+ 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a,
+ 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a,
+ 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a,
+ 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a,
+ 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a,
+ 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a,
+ 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a,
+ 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a,
+ 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a,
+ 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a,
+ 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a,
+ 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a,
+ 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a,
+ 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002,
+ 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002,
+ 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002,
+ 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002,
+ // Block 0x5e, offset 0x1780
+ 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a,
+ 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a,
+ 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a,
+ 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a,
+ 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a,
+ 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a,
+ 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a,
+ 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a,
+ 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a,
+ 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a,
+ 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a,
+ 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a,
+ 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a,
+ 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a,
+ 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a,
+ 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a,
+ 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a,
+ 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a,
+ 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a,
+ 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a,
+ 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a,
+ 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a,
+ 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a,
+ 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a,
+ 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a,
+ 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a,
+ 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a,
+ 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a,
+ 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a,
+ 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a,
+ 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a,
+ 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a,
+ 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a,
+ 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a,
+ 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a,
+ 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a,
+ 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba,
+ 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a,
+ 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a,
+ 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a,
+ 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a,
+ 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a,
+ 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a,
+ 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a,
+ 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a,
+ 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a,
+ 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a,
+ 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a,
+ 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a,
+ 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a,
+ 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a,
+ 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a,
+ 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a,
+ 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a,
+ 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a,
+ 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a,
+ 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a,
+ 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a,
+ 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a,
+ 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a,
+ 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a,
+ 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a,
+ 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a,
+ 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a,
+ 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a,
+ 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a,
+ 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a,
+ 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a,
+ 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a,
+ 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a,
+ 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a,
+ 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a,
+ 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a,
+ 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a,
+ // Block 0x66, offset 0x1980
+ 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a,
+ 0x19aa: 0x000a, 0x19af: 0x000c,
+ 0x19b0: 0x000c, 0x19b1: 0x000c,
+ 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a,
+ 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a,
+ // Block 0x67, offset 0x19c0
+ 0x19ff: 0x000c,
+ // Block 0x68, offset 0x1a00
+ 0x1a20: 0x000c, 0x1a21: 0x000c, 0x1a22: 0x000c, 0x1a23: 0x000c,
+ 0x1a24: 0x000c, 0x1a25: 0x000c, 0x1a26: 0x000c, 0x1a27: 0x000c, 0x1a28: 0x000c, 0x1a29: 0x000c,
+ 0x1a2a: 0x000c, 0x1a2b: 0x000c, 0x1a2c: 0x000c, 0x1a2d: 0x000c, 0x1a2e: 0x000c, 0x1a2f: 0x000c,
+ 0x1a30: 0x000c, 0x1a31: 0x000c, 0x1a32: 0x000c, 0x1a33: 0x000c, 0x1a34: 0x000c, 0x1a35: 0x000c,
+ 0x1a36: 0x000c, 0x1a37: 0x000c, 0x1a38: 0x000c, 0x1a39: 0x000c, 0x1a3a: 0x000c, 0x1a3b: 0x000c,
+ 0x1a3c: 0x000c, 0x1a3d: 0x000c, 0x1a3e: 0x000c, 0x1a3f: 0x000c,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x000a, 0x1a41: 0x000a, 0x1a42: 0x000a, 0x1a43: 0x000a, 0x1a44: 0x000a, 0x1a45: 0x000a,
+ 0x1a46: 0x000a, 0x1a47: 0x000a, 0x1a48: 0x000a, 0x1a49: 0x000a, 0x1a4a: 0x000a, 0x1a4b: 0x000a,
+ 0x1a4c: 0x000a, 0x1a4d: 0x000a, 0x1a4e: 0x000a, 0x1a4f: 0x000a, 0x1a50: 0x000a, 0x1a51: 0x000a,
+ 0x1a52: 0x000a, 0x1a53: 0x000a, 0x1a54: 0x000a, 0x1a55: 0x000a, 0x1a56: 0x000a, 0x1a57: 0x000a,
+ 0x1a58: 0x000a, 0x1a59: 0x000a, 0x1a5a: 0x000a, 0x1a5b: 0x000a, 0x1a5c: 0x000a, 0x1a5d: 0x000a,
+ 0x1a5e: 0x000a, 0x1a5f: 0x000a, 0x1a60: 0x000a, 0x1a61: 0x000a, 0x1a62: 0x003a, 0x1a63: 0x002a,
+ 0x1a64: 0x003a, 0x1a65: 0x002a, 0x1a66: 0x003a, 0x1a67: 0x002a, 0x1a68: 0x003a, 0x1a69: 0x002a,
+ 0x1a6a: 0x000a, 0x1a6b: 0x000a, 0x1a6c: 0x000a, 0x1a6d: 0x000a, 0x1a6e: 0x000a, 0x1a6f: 0x000a,
+ 0x1a70: 0x000a, 0x1a71: 0x000a, 0x1a72: 0x000a, 0x1a73: 0x000a, 0x1a74: 0x000a, 0x1a75: 0x000a,
+ 0x1a76: 0x000a, 0x1a77: 0x000a, 0x1a78: 0x000a, 0x1a79: 0x000a, 0x1a7a: 0x000a, 0x1a7b: 0x000a,
+ 0x1a7c: 0x000a, 0x1a7d: 0x000a, 0x1a7e: 0x000a, 0x1a7f: 0x000a,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a,
+ 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a,
+ 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a,
+ 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a,
+ 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a,
+ 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a,
+ 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a,
+ 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x000a, 0x1ae3: 0x000a,
+ 0x1ae4: 0x000a, 0x1ae5: 0x000a, 0x1ae6: 0x000a, 0x1ae7: 0x000a, 0x1ae8: 0x000a, 0x1ae9: 0x000a,
+ 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a,
+ 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a,
+ 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a,
+ 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a,
+ 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a,
+ 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a,
+ 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a,
+ 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1a: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a,
+ 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a,
+ 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a,
+ 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a,
+ 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a,
+ 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a,
+ 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a,
+ 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a,
+ 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a,
+ 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0x0009, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a,
+ 0x1b88: 0x003a, 0x1b89: 0x002a, 0x1b8a: 0x003a, 0x1b8b: 0x002a,
+ 0x1b8c: 0x003a, 0x1b8d: 0x002a, 0x1b8e: 0x003a, 0x1b8f: 0x002a, 0x1b90: 0x003a, 0x1b91: 0x002a,
+ 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x003a, 0x1b95: 0x002a, 0x1b96: 0x003a, 0x1b97: 0x002a,
+ 0x1b98: 0x003a, 0x1b99: 0x002a, 0x1b9a: 0x003a, 0x1b9b: 0x002a, 0x1b9c: 0x000a, 0x1b9d: 0x000a,
+ 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a,
+ 0x1baa: 0x000c, 0x1bab: 0x000c, 0x1bac: 0x000c, 0x1bad: 0x000c,
+ 0x1bb0: 0x000a,
+ 0x1bb6: 0x000a, 0x1bb7: 0x000a,
+ 0x1bbd: 0x000a, 0x1bbe: 0x000a, 0x1bbf: 0x000a,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bd9: 0x000c, 0x1bda: 0x000c, 0x1bdb: 0x000a, 0x1bdc: 0x000a,
+ 0x1be0: 0x000a,
+ // Block 0x70, offset 0x1c00
+ 0x1c3b: 0x000a,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0x000a, 0x1c41: 0x000a, 0x1c42: 0x000a, 0x1c43: 0x000a, 0x1c44: 0x000a, 0x1c45: 0x000a,
+ 0x1c46: 0x000a, 0x1c47: 0x000a, 0x1c48: 0x000a, 0x1c49: 0x000a, 0x1c4a: 0x000a, 0x1c4b: 0x000a,
+ 0x1c4c: 0x000a, 0x1c4d: 0x000a, 0x1c4e: 0x000a, 0x1c4f: 0x000a, 0x1c50: 0x000a, 0x1c51: 0x000a,
+ 0x1c52: 0x000a, 0x1c53: 0x000a, 0x1c54: 0x000a, 0x1c55: 0x000a, 0x1c56: 0x000a, 0x1c57: 0x000a,
+ 0x1c58: 0x000a, 0x1c59: 0x000a, 0x1c5a: 0x000a, 0x1c5b: 0x000a, 0x1c5c: 0x000a, 0x1c5d: 0x000a,
+ 0x1c5e: 0x000a, 0x1c5f: 0x000a, 0x1c60: 0x000a, 0x1c61: 0x000a, 0x1c62: 0x000a, 0x1c63: 0x000a,
+ // Block 0x72, offset 0x1c80
+ 0x1c9d: 0x000a,
+ 0x1c9e: 0x000a,
+ // Block 0x73, offset 0x1cc0
+ 0x1cd0: 0x000a, 0x1cd1: 0x000a,
+ 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a,
+ 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a,
+ 0x1cde: 0x000a, 0x1cdf: 0x000a,
+ 0x1cfc: 0x000a, 0x1cfd: 0x000a, 0x1cfe: 0x000a,
+ // Block 0x74, offset 0x1d00
+ 0x1d31: 0x000a, 0x1d32: 0x000a, 0x1d33: 0x000a, 0x1d34: 0x000a, 0x1d35: 0x000a,
+ 0x1d36: 0x000a, 0x1d37: 0x000a, 0x1d38: 0x000a, 0x1d39: 0x000a, 0x1d3a: 0x000a, 0x1d3b: 0x000a,
+ 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, 0x1d3f: 0x000a,
+ // Block 0x75, offset 0x1d40
+ 0x1d4c: 0x000a, 0x1d4d: 0x000a, 0x1d4e: 0x000a, 0x1d4f: 0x000a,
+ // Block 0x76, offset 0x1d80
+ 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a,
+ // Block 0x77, offset 0x1dc0
+ 0x1dde: 0x000a, 0x1ddf: 0x000a,
+ 0x1dff: 0x000a,
+ // Block 0x78, offset 0x1e00
+ 0x1e10: 0x000a, 0x1e11: 0x000a,
+ 0x1e12: 0x000a, 0x1e13: 0x000a, 0x1e14: 0x000a, 0x1e15: 0x000a, 0x1e16: 0x000a, 0x1e17: 0x000a,
+ 0x1e18: 0x000a, 0x1e19: 0x000a, 0x1e1a: 0x000a, 0x1e1b: 0x000a, 0x1e1c: 0x000a, 0x1e1d: 0x000a,
+ 0x1e1e: 0x000a, 0x1e1f: 0x000a, 0x1e20: 0x000a, 0x1e21: 0x000a, 0x1e22: 0x000a, 0x1e23: 0x000a,
+ 0x1e24: 0x000a, 0x1e25: 0x000a, 0x1e26: 0x000a, 0x1e27: 0x000a, 0x1e28: 0x000a, 0x1e29: 0x000a,
+ 0x1e2a: 0x000a, 0x1e2b: 0x000a, 0x1e2c: 0x000a, 0x1e2d: 0x000a, 0x1e2e: 0x000a, 0x1e2f: 0x000a,
+ 0x1e30: 0x000a, 0x1e31: 0x000a, 0x1e32: 0x000a, 0x1e33: 0x000a, 0x1e34: 0x000a, 0x1e35: 0x000a,
+ 0x1e36: 0x000a, 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, 0x1e3b: 0x000a,
+ 0x1e3c: 0x000a, 0x1e3d: 0x000a, 0x1e3e: 0x000a, 0x1e3f: 0x000a,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0x000a, 0x1e41: 0x000a, 0x1e42: 0x000a, 0x1e43: 0x000a, 0x1e44: 0x000a, 0x1e45: 0x000a,
+ 0x1e46: 0x000a,
+ // Block 0x7a, offset 0x1e80
+ 0x1e8d: 0x000a, 0x1e8e: 0x000a, 0x1e8f: 0x000a,
+ // Block 0x7b, offset 0x1ec0
+ 0x1eef: 0x000c,
+ 0x1ef0: 0x000c, 0x1ef1: 0x000c, 0x1ef2: 0x000c, 0x1ef3: 0x000a, 0x1ef4: 0x000c, 0x1ef5: 0x000c,
+ 0x1ef6: 0x000c, 0x1ef7: 0x000c, 0x1ef8: 0x000c, 0x1ef9: 0x000c, 0x1efa: 0x000c, 0x1efb: 0x000c,
+ 0x1efc: 0x000c, 0x1efd: 0x000c, 0x1efe: 0x000a, 0x1eff: 0x000a,
+ // Block 0x7c, offset 0x1f00
+ 0x1f1e: 0x000c, 0x1f1f: 0x000c,
+ // Block 0x7d, offset 0x1f40
+ 0x1f70: 0x000c, 0x1f71: 0x000c,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0x000a, 0x1f81: 0x000a, 0x1f82: 0x000a, 0x1f83: 0x000a, 0x1f84: 0x000a, 0x1f85: 0x000a,
+ 0x1f86: 0x000a, 0x1f87: 0x000a, 0x1f88: 0x000a, 0x1f89: 0x000a, 0x1f8a: 0x000a, 0x1f8b: 0x000a,
+ 0x1f8c: 0x000a, 0x1f8d: 0x000a, 0x1f8e: 0x000a, 0x1f8f: 0x000a, 0x1f90: 0x000a, 0x1f91: 0x000a,
+ 0x1f92: 0x000a, 0x1f93: 0x000a, 0x1f94: 0x000a, 0x1f95: 0x000a, 0x1f96: 0x000a, 0x1f97: 0x000a,
+ 0x1f98: 0x000a, 0x1f99: 0x000a, 0x1f9a: 0x000a, 0x1f9b: 0x000a, 0x1f9c: 0x000a, 0x1f9d: 0x000a,
+ 0x1f9e: 0x000a, 0x1f9f: 0x000a, 0x1fa0: 0x000a, 0x1fa1: 0x000a,
+ // Block 0x7f, offset 0x1fc0
+ 0x1fc8: 0x000a,
+ // Block 0x80, offset 0x2000
+ 0x2002: 0x000c,
+ 0x2006: 0x000c, 0x200b: 0x000c,
+ 0x2025: 0x000c, 0x2026: 0x000c, 0x2028: 0x000a, 0x2029: 0x000a,
+ 0x202a: 0x000a, 0x202b: 0x000a,
+ 0x2038: 0x0004, 0x2039: 0x0004,
+ // Block 0x81, offset 0x2040
+ 0x2074: 0x000a, 0x2075: 0x000a,
+ 0x2076: 0x000a, 0x2077: 0x000a,
+ // Block 0x82, offset 0x2080
+ 0x2084: 0x000c, 0x2085: 0x000c,
+ 0x20a0: 0x000c, 0x20a1: 0x000c, 0x20a2: 0x000c, 0x20a3: 0x000c,
+ 0x20a4: 0x000c, 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a7: 0x000c, 0x20a8: 0x000c, 0x20a9: 0x000c,
+ 0x20aa: 0x000c, 0x20ab: 0x000c, 0x20ac: 0x000c, 0x20ad: 0x000c, 0x20ae: 0x000c, 0x20af: 0x000c,
+ 0x20b0: 0x000c, 0x20b1: 0x000c,
+ 0x20bf: 0x000c,
+ // Block 0x83, offset 0x20c0
+ 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c,
+ 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c,
+ // Block 0x84, offset 0x2100
+ 0x2107: 0x000c, 0x2108: 0x000c, 0x2109: 0x000c, 0x210a: 0x000c, 0x210b: 0x000c,
+ 0x210c: 0x000c, 0x210d: 0x000c, 0x210e: 0x000c, 0x210f: 0x000c, 0x2110: 0x000c, 0x2111: 0x000c,
+ // Block 0x85, offset 0x2140
+ 0x2140: 0x000c, 0x2141: 0x000c, 0x2142: 0x000c,
+ 0x2173: 0x000c,
+ 0x2176: 0x000c, 0x2177: 0x000c, 0x2178: 0x000c, 0x2179: 0x000c,
+ 0x217c: 0x000c, 0x217d: 0x000c,
+ // Block 0x86, offset 0x2180
+ 0x21a5: 0x000c,
+ // Block 0x87, offset 0x21c0
+ 0x21e9: 0x000c,
+ 0x21ea: 0x000c, 0x21eb: 0x000c, 0x21ec: 0x000c, 0x21ed: 0x000c, 0x21ee: 0x000c,
+ 0x21f1: 0x000c, 0x21f2: 0x000c, 0x21f5: 0x000c,
+ 0x21f6: 0x000c,
+ // Block 0x88, offset 0x2200
+ 0x2203: 0x000c,
+ 0x220c: 0x000c,
+ 0x223c: 0x000c,
+ // Block 0x89, offset 0x2240
+ 0x2270: 0x000c, 0x2272: 0x000c, 0x2273: 0x000c, 0x2274: 0x000c,
+ 0x2277: 0x000c, 0x2278: 0x000c,
+ 0x227e: 0x000c, 0x227f: 0x000c,
+ // Block 0x8a, offset 0x2280
+ 0x2281: 0x000c,
+ 0x22ac: 0x000c, 0x22ad: 0x000c,
+ 0x22b6: 0x000c,
+ // Block 0x8b, offset 0x22c0
+ 0x22e5: 0x000c, 0x22e8: 0x000c,
+ 0x22ed: 0x000c,
+ // Block 0x8c, offset 0x2300
+ 0x231d: 0x0001,
+ 0x231e: 0x000c, 0x231f: 0x0001, 0x2320: 0x0001, 0x2321: 0x0001, 0x2322: 0x0001, 0x2323: 0x0001,
+ 0x2324: 0x0001, 0x2325: 0x0001, 0x2326: 0x0001, 0x2327: 0x0001, 0x2328: 0x0001, 0x2329: 0x0003,
+ 0x232a: 0x0001, 0x232b: 0x0001, 0x232c: 0x0001, 0x232d: 0x0001, 0x232e: 0x0001, 0x232f: 0x0001,
+ 0x2330: 0x0001, 0x2331: 0x0001, 0x2332: 0x0001, 0x2333: 0x0001, 0x2334: 0x0001, 0x2335: 0x0001,
+ 0x2336: 0x0001, 0x2337: 0x0001, 0x2338: 0x0001, 0x2339: 0x0001, 0x233a: 0x0001, 0x233b: 0x0001,
+ 0x233c: 0x0001, 0x233d: 0x0001, 0x233e: 0x0001, 0x233f: 0x0001,
+ // Block 0x8d, offset 0x2340
+ 0x2340: 0x0001, 0x2341: 0x0001, 0x2342: 0x0001, 0x2343: 0x0001, 0x2344: 0x0001, 0x2345: 0x0001,
+ 0x2346: 0x0001, 0x2347: 0x0001, 0x2348: 0x0001, 0x2349: 0x0001, 0x234a: 0x0001, 0x234b: 0x0001,
+ 0x234c: 0x0001, 0x234d: 0x0001, 0x234e: 0x0001, 0x234f: 0x0001, 0x2350: 0x000d, 0x2351: 0x000d,
+ 0x2352: 0x000d, 0x2353: 0x000d, 0x2354: 0x000d, 0x2355: 0x000d, 0x2356: 0x000d, 0x2357: 0x000d,
+ 0x2358: 0x000d, 0x2359: 0x000d, 0x235a: 0x000d, 0x235b: 0x000d, 0x235c: 0x000d, 0x235d: 0x000d,
+ 0x235e: 0x000d, 0x235f: 0x000d, 0x2360: 0x000d, 0x2361: 0x000d, 0x2362: 0x000d, 0x2363: 0x000d,
+ 0x2364: 0x000d, 0x2365: 0x000d, 0x2366: 0x000d, 0x2367: 0x000d, 0x2368: 0x000d, 0x2369: 0x000d,
+ 0x236a: 0x000d, 0x236b: 0x000d, 0x236c: 0x000d, 0x236d: 0x000d, 0x236e: 0x000d, 0x236f: 0x000d,
+ 0x2370: 0x000d, 0x2371: 0x000d, 0x2372: 0x000d, 0x2373: 0x000d, 0x2374: 0x000d, 0x2375: 0x000d,
+ 0x2376: 0x000d, 0x2377: 0x000d, 0x2378: 0x000d, 0x2379: 0x000d, 0x237a: 0x000d, 0x237b: 0x000d,
+ 0x237c: 0x000d, 0x237d: 0x000d, 0x237e: 0x000d, 0x237f: 0x000d,
+ // Block 0x8e, offset 0x2380
+ 0x2380: 0x000d, 0x2381: 0x000d, 0x2382: 0x000d, 0x2383: 0x000d, 0x2384: 0x000d, 0x2385: 0x000d,
+ 0x2386: 0x000d, 0x2387: 0x000d, 0x2388: 0x000d, 0x2389: 0x000d, 0x238a: 0x000d, 0x238b: 0x000d,
+ 0x238c: 0x000d, 0x238d: 0x000d, 0x238e: 0x000d, 0x238f: 0x000d, 0x2390: 0x000d, 0x2391: 0x000d,
+ 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d,
+ 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d,
+ 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d,
+ 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d,
+ 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d,
+ 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d,
+ 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d,
+ 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000a, 0x23bf: 0x000a,
+ // Block 0x8f, offset 0x23c0
+ 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d,
+ 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d,
+ 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000b, 0x23d1: 0x000b,
+ 0x23d2: 0x000b, 0x23d3: 0x000b, 0x23d4: 0x000b, 0x23d5: 0x000b, 0x23d6: 0x000b, 0x23d7: 0x000b,
+ 0x23d8: 0x000b, 0x23d9: 0x000b, 0x23da: 0x000b, 0x23db: 0x000b, 0x23dc: 0x000b, 0x23dd: 0x000b,
+ 0x23de: 0x000b, 0x23df: 0x000b, 0x23e0: 0x000b, 0x23e1: 0x000b, 0x23e2: 0x000b, 0x23e3: 0x000b,
+ 0x23e4: 0x000b, 0x23e5: 0x000b, 0x23e6: 0x000b, 0x23e7: 0x000b, 0x23e8: 0x000b, 0x23e9: 0x000b,
+ 0x23ea: 0x000b, 0x23eb: 0x000b, 0x23ec: 0x000b, 0x23ed: 0x000b, 0x23ee: 0x000b, 0x23ef: 0x000b,
+ 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d,
+ 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d,
+ 0x23fc: 0x000d, 0x23fd: 0x000a, 0x23fe: 0x000d, 0x23ff: 0x000d,
+ // Block 0x90, offset 0x2400
+ 0x2400: 0x000c, 0x2401: 0x000c, 0x2402: 0x000c, 0x2403: 0x000c, 0x2404: 0x000c, 0x2405: 0x000c,
+ 0x2406: 0x000c, 0x2407: 0x000c, 0x2408: 0x000c, 0x2409: 0x000c, 0x240a: 0x000c, 0x240b: 0x000c,
+ 0x240c: 0x000c, 0x240d: 0x000c, 0x240e: 0x000c, 0x240f: 0x000c, 0x2410: 0x000a, 0x2411: 0x000a,
+ 0x2412: 0x000a, 0x2413: 0x000a, 0x2414: 0x000a, 0x2415: 0x000a, 0x2416: 0x000a, 0x2417: 0x000a,
+ 0x2418: 0x000a, 0x2419: 0x000a,
+ 0x2420: 0x000c, 0x2421: 0x000c, 0x2422: 0x000c, 0x2423: 0x000c,
+ 0x2424: 0x000c, 0x2425: 0x000c, 0x2426: 0x000c, 0x2427: 0x000c, 0x2428: 0x000c, 0x2429: 0x000c,
+ 0x242a: 0x000c, 0x242b: 0x000c, 0x242c: 0x000c, 0x242d: 0x000c, 0x242e: 0x000c, 0x242f: 0x000c,
+ 0x2430: 0x000a, 0x2431: 0x000a, 0x2432: 0x000a, 0x2433: 0x000a, 0x2434: 0x000a, 0x2435: 0x000a,
+ 0x2436: 0x000a, 0x2437: 0x000a, 0x2438: 0x000a, 0x2439: 0x000a, 0x243a: 0x000a, 0x243b: 0x000a,
+ 0x243c: 0x000a, 0x243d: 0x000a, 0x243e: 0x000a, 0x243f: 0x000a,
+ // Block 0x91, offset 0x2440
+ 0x2440: 0x000a, 0x2441: 0x000a, 0x2442: 0x000a, 0x2443: 0x000a, 0x2444: 0x000a, 0x2445: 0x000a,
+ 0x2446: 0x000a, 0x2447: 0x000a, 0x2448: 0x000a, 0x2449: 0x000a, 0x244a: 0x000a, 0x244b: 0x000a,
+ 0x244c: 0x000a, 0x244d: 0x000a, 0x244e: 0x000a, 0x244f: 0x000a, 0x2450: 0x0006, 0x2451: 0x000a,
+ 0x2452: 0x0006, 0x2454: 0x000a, 0x2455: 0x0006, 0x2456: 0x000a, 0x2457: 0x000a,
+ 0x2458: 0x000a, 0x2459: 0x009a, 0x245a: 0x008a, 0x245b: 0x007a, 0x245c: 0x006a, 0x245d: 0x009a,
+ 0x245e: 0x008a, 0x245f: 0x0004, 0x2460: 0x000a, 0x2461: 0x000a, 0x2462: 0x0003, 0x2463: 0x0003,
+ 0x2464: 0x000a, 0x2465: 0x000a, 0x2466: 0x000a, 0x2468: 0x000a, 0x2469: 0x0004,
+ 0x246a: 0x0004, 0x246b: 0x000a,
+ 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d,
+ 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d,
+ 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000d, 0x247f: 0x000d,
+ // Block 0x92, offset 0x2480
+ 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d,
+ 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d,
+ 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000d, 0x2490: 0x000d, 0x2491: 0x000d,
+ 0x2492: 0x000d, 0x2493: 0x000d, 0x2494: 0x000d, 0x2495: 0x000d, 0x2496: 0x000d, 0x2497: 0x000d,
+ 0x2498: 0x000d, 0x2499: 0x000d, 0x249a: 0x000d, 0x249b: 0x000d, 0x249c: 0x000d, 0x249d: 0x000d,
+ 0x249e: 0x000d, 0x249f: 0x000d, 0x24a0: 0x000d, 0x24a1: 0x000d, 0x24a2: 0x000d, 0x24a3: 0x000d,
+ 0x24a4: 0x000d, 0x24a5: 0x000d, 0x24a6: 0x000d, 0x24a7: 0x000d, 0x24a8: 0x000d, 0x24a9: 0x000d,
+ 0x24aa: 0x000d, 0x24ab: 0x000d, 0x24ac: 0x000d, 0x24ad: 0x000d, 0x24ae: 0x000d, 0x24af: 0x000d,
+ 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d,
+ 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d,
+ 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000b,
+ // Block 0x93, offset 0x24c0
+ 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x0004, 0x24c4: 0x0004, 0x24c5: 0x0004,
+ 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x003a, 0x24c9: 0x002a, 0x24ca: 0x000a, 0x24cb: 0x0003,
+ 0x24cc: 0x0006, 0x24cd: 0x0003, 0x24ce: 0x0006, 0x24cf: 0x0006, 0x24d0: 0x0002, 0x24d1: 0x0002,
+ 0x24d2: 0x0002, 0x24d3: 0x0002, 0x24d4: 0x0002, 0x24d5: 0x0002, 0x24d6: 0x0002, 0x24d7: 0x0002,
+ 0x24d8: 0x0002, 0x24d9: 0x0002, 0x24da: 0x0006, 0x24db: 0x000a, 0x24dc: 0x000a, 0x24dd: 0x000a,
+ 0x24de: 0x000a, 0x24df: 0x000a, 0x24e0: 0x000a,
+ 0x24fb: 0x005a,
+ 0x24fc: 0x000a, 0x24fd: 0x004a, 0x24fe: 0x000a, 0x24ff: 0x000a,
+ // Block 0x94, offset 0x2500
+ 0x2500: 0x000a,
+ 0x251b: 0x005a, 0x251c: 0x000a, 0x251d: 0x004a,
+ 0x251e: 0x000a, 0x251f: 0x00fa, 0x2520: 0x00ea, 0x2521: 0x000a, 0x2522: 0x003a, 0x2523: 0x002a,
+ 0x2524: 0x000a, 0x2525: 0x000a,
+ // Block 0x95, offset 0x2540
+ 0x2560: 0x0004, 0x2561: 0x0004, 0x2562: 0x000a, 0x2563: 0x000a,
+ 0x2564: 0x000a, 0x2565: 0x0004, 0x2566: 0x0004, 0x2568: 0x000a, 0x2569: 0x000a,
+ 0x256a: 0x000a, 0x256b: 0x000a, 0x256c: 0x000a, 0x256d: 0x000a, 0x256e: 0x000a,
+ 0x2570: 0x000b, 0x2571: 0x000b, 0x2572: 0x000b, 0x2573: 0x000b, 0x2574: 0x000b, 0x2575: 0x000b,
+ 0x2576: 0x000b, 0x2577: 0x000b, 0x2578: 0x000b, 0x2579: 0x000a, 0x257a: 0x000a, 0x257b: 0x000a,
+ 0x257c: 0x000a, 0x257d: 0x000a, 0x257e: 0x000b, 0x257f: 0x000b,
+ // Block 0x96, offset 0x2580
+ 0x2581: 0x000a,
+ // Block 0x97, offset 0x25c0
+ 0x25c0: 0x000a, 0x25c1: 0x000a, 0x25c2: 0x000a, 0x25c3: 0x000a, 0x25c4: 0x000a, 0x25c5: 0x000a,
+ 0x25c6: 0x000a, 0x25c7: 0x000a, 0x25c8: 0x000a, 0x25c9: 0x000a, 0x25ca: 0x000a, 0x25cb: 0x000a,
+ 0x25cc: 0x000a, 0x25d0: 0x000a, 0x25d1: 0x000a,
+ 0x25d2: 0x000a, 0x25d3: 0x000a, 0x25d4: 0x000a, 0x25d5: 0x000a, 0x25d6: 0x000a, 0x25d7: 0x000a,
+ 0x25d8: 0x000a, 0x25d9: 0x000a, 0x25da: 0x000a, 0x25db: 0x000a,
+ 0x25e0: 0x000a,
+ // Block 0x98, offset 0x2600
+ 0x263d: 0x000c,
+ // Block 0x99, offset 0x2640
+ 0x2660: 0x000c, 0x2661: 0x0002, 0x2662: 0x0002, 0x2663: 0x0002,
+ 0x2664: 0x0002, 0x2665: 0x0002, 0x2666: 0x0002, 0x2667: 0x0002, 0x2668: 0x0002, 0x2669: 0x0002,
+ 0x266a: 0x0002, 0x266b: 0x0002, 0x266c: 0x0002, 0x266d: 0x0002, 0x266e: 0x0002, 0x266f: 0x0002,
+ 0x2670: 0x0002, 0x2671: 0x0002, 0x2672: 0x0002, 0x2673: 0x0002, 0x2674: 0x0002, 0x2675: 0x0002,
+ 0x2676: 0x0002, 0x2677: 0x0002, 0x2678: 0x0002, 0x2679: 0x0002, 0x267a: 0x0002, 0x267b: 0x0002,
+ // Block 0x9a, offset 0x2680
+ 0x26b6: 0x000c, 0x26b7: 0x000c, 0x26b8: 0x000c, 0x26b9: 0x000c, 0x26ba: 0x000c,
+ // Block 0x9b, offset 0x26c0
+ 0x26c0: 0x0001, 0x26c1: 0x0001, 0x26c2: 0x0001, 0x26c3: 0x0001, 0x26c4: 0x0001, 0x26c5: 0x0001,
+ 0x26c6: 0x0001, 0x26c7: 0x0001, 0x26c8: 0x0001, 0x26c9: 0x0001, 0x26ca: 0x0001, 0x26cb: 0x0001,
+ 0x26cc: 0x0001, 0x26cd: 0x0001, 0x26ce: 0x0001, 0x26cf: 0x0001, 0x26d0: 0x0001, 0x26d1: 0x0001,
+ 0x26d2: 0x0001, 0x26d3: 0x0001, 0x26d4: 0x0001, 0x26d5: 0x0001, 0x26d6: 0x0001, 0x26d7: 0x0001,
+ 0x26d8: 0x0001, 0x26d9: 0x0001, 0x26da: 0x0001, 0x26db: 0x0001, 0x26dc: 0x0001, 0x26dd: 0x0001,
+ 0x26de: 0x0001, 0x26df: 0x0001, 0x26e0: 0x0001, 0x26e1: 0x0001, 0x26e2: 0x0001, 0x26e3: 0x0001,
+ 0x26e4: 0x0001, 0x26e5: 0x0001, 0x26e6: 0x0001, 0x26e7: 0x0001, 0x26e8: 0x0001, 0x26e9: 0x0001,
+ 0x26ea: 0x0001, 0x26eb: 0x0001, 0x26ec: 0x0001, 0x26ed: 0x0001, 0x26ee: 0x0001, 0x26ef: 0x0001,
+ 0x26f0: 0x0001, 0x26f1: 0x0001, 0x26f2: 0x0001, 0x26f3: 0x0001, 0x26f4: 0x0001, 0x26f5: 0x0001,
+ 0x26f6: 0x0001, 0x26f7: 0x0001, 0x26f8: 0x0001, 0x26f9: 0x0001, 0x26fa: 0x0001, 0x26fb: 0x0001,
+ 0x26fc: 0x0001, 0x26fd: 0x0001, 0x26fe: 0x0001, 0x26ff: 0x0001,
+ // Block 0x9c, offset 0x2700
+ 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001,
+ 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001,
+ 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001,
+ 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001,
+ 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001,
+ 0x271e: 0x0001, 0x271f: 0x000a, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001,
+ 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001,
+ 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001,
+ 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001,
+ 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001,
+ 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001,
+ // Block 0x9d, offset 0x2740
+ 0x2740: 0x0001, 0x2741: 0x000c, 0x2742: 0x000c, 0x2743: 0x000c, 0x2744: 0x0001, 0x2745: 0x000c,
+ 0x2746: 0x000c, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001,
+ 0x274c: 0x000c, 0x274d: 0x000c, 0x274e: 0x000c, 0x274f: 0x000c, 0x2750: 0x0001, 0x2751: 0x0001,
+ 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001,
+ 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001,
+ 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001,
+ 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001,
+ 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001,
+ 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001,
+ 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, 0x277b: 0x0001,
+ 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x000c,
+ // Block 0x9e, offset 0x2780
+ 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001,
+ 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001,
+ 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001,
+ 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001,
+ 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001,
+ 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001,
+ 0x27a4: 0x0001, 0x27a5: 0x000c, 0x27a6: 0x000c, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001,
+ 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001,
+ 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001,
+ 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001,
+ 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001,
+ // Block 0x9f, offset 0x27c0
+ 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001,
+ 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001,
+ 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001,
+ 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001,
+ 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001,
+ 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001,
+ 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001,
+ 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001,
+ 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001,
+ 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x000a, 0x27fa: 0x000a, 0x27fb: 0x000a,
+ 0x27fc: 0x000a, 0x27fd: 0x000a, 0x27fe: 0x000a, 0x27ff: 0x000a,
+ // Block 0xa0, offset 0x2800
+ 0x2800: 0x000d, 0x2801: 0x000d, 0x2802: 0x000d, 0x2803: 0x000d, 0x2804: 0x000d, 0x2805: 0x000d,
+ 0x2806: 0x000d, 0x2807: 0x000d, 0x2808: 0x000d, 0x2809: 0x000d, 0x280a: 0x000d, 0x280b: 0x000d,
+ 0x280c: 0x000d, 0x280d: 0x000d, 0x280e: 0x000d, 0x280f: 0x000d, 0x2810: 0x000d, 0x2811: 0x000d,
+ 0x2812: 0x000d, 0x2813: 0x000d, 0x2814: 0x000d, 0x2815: 0x000d, 0x2816: 0x000d, 0x2817: 0x000d,
+ 0x2818: 0x000d, 0x2819: 0x000d, 0x281a: 0x000d, 0x281b: 0x000d, 0x281c: 0x000d, 0x281d: 0x000d,
+ 0x281e: 0x000d, 0x281f: 0x000d, 0x2820: 0x000d, 0x2821: 0x000d, 0x2822: 0x000d, 0x2823: 0x000d,
+ 0x2824: 0x000c, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x000c, 0x2828: 0x000d, 0x2829: 0x000d,
+ 0x282a: 0x000d, 0x282b: 0x000d, 0x282c: 0x000d, 0x282d: 0x000d, 0x282e: 0x000d, 0x282f: 0x000d,
+ 0x2830: 0x0005, 0x2831: 0x0005, 0x2832: 0x0005, 0x2833: 0x0005, 0x2834: 0x0005, 0x2835: 0x0005,
+ 0x2836: 0x0005, 0x2837: 0x0005, 0x2838: 0x0005, 0x2839: 0x0005, 0x283a: 0x000d, 0x283b: 0x000d,
+ 0x283c: 0x000d, 0x283d: 0x000d, 0x283e: 0x000d, 0x283f: 0x000d,
+ // Block 0xa1, offset 0x2840
+ 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001,
+ 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001,
+ 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001,
+ 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001,
+ 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001,
+ 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0005, 0x2861: 0x0005, 0x2862: 0x0005, 0x2863: 0x0005,
+ 0x2864: 0x0005, 0x2865: 0x0005, 0x2866: 0x0005, 0x2867: 0x0005, 0x2868: 0x0005, 0x2869: 0x0005,
+ 0x286a: 0x0005, 0x286b: 0x0005, 0x286c: 0x0005, 0x286d: 0x0005, 0x286e: 0x0005, 0x286f: 0x0005,
+ 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005,
+ 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x0005, 0x287b: 0x0005,
+ 0x287c: 0x0005, 0x287d: 0x0005, 0x287e: 0x0005, 0x287f: 0x0001,
+ // Block 0xa2, offset 0x2880
+ 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001,
+ 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001,
+ 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001,
+ 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001,
+ 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001,
+ 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001,
+ 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001,
+ 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001,
+ 0x28b0: 0x000d, 0x28b1: 0x000d, 0x28b2: 0x000d, 0x28b3: 0x000d, 0x28b4: 0x000d, 0x28b5: 0x000d,
+ 0x28b6: 0x000d, 0x28b7: 0x000d, 0x28b8: 0x000d, 0x28b9: 0x000d, 0x28ba: 0x000d, 0x28bb: 0x000d,
+ 0x28bc: 0x000d, 0x28bd: 0x000d, 0x28be: 0x000d, 0x28bf: 0x000d,
+ // Block 0xa3, offset 0x28c0
+ 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d,
+ 0x28c6: 0x000c, 0x28c7: 0x000c, 0x28c8: 0x000c, 0x28c9: 0x000c, 0x28ca: 0x000c, 0x28cb: 0x000c,
+ 0x28cc: 0x000c, 0x28cd: 0x000c, 0x28ce: 0x000c, 0x28cf: 0x000c, 0x28d0: 0x000c, 0x28d1: 0x000d,
+ 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d,
+ 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d,
+ 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d,
+ 0x28e4: 0x000d, 0x28e5: 0x000d, 0x28e6: 0x000d, 0x28e7: 0x000d, 0x28e8: 0x000d, 0x28e9: 0x000d,
+ 0x28ea: 0x000d, 0x28eb: 0x000d, 0x28ec: 0x000d, 0x28ed: 0x000d, 0x28ee: 0x000d, 0x28ef: 0x000d,
+ 0x28f0: 0x0001, 0x28f1: 0x0001, 0x28f2: 0x0001, 0x28f3: 0x0001, 0x28f4: 0x0001, 0x28f5: 0x0001,
+ 0x28f6: 0x0001, 0x28f7: 0x0001, 0x28f8: 0x0001, 0x28f9: 0x0001, 0x28fa: 0x0001, 0x28fb: 0x0001,
+ 0x28fc: 0x0001, 0x28fd: 0x0001, 0x28fe: 0x0001, 0x28ff: 0x0001,
+ // Block 0xa4, offset 0x2900
+ 0x2901: 0x000c,
+ 0x2938: 0x000c, 0x2939: 0x000c, 0x293a: 0x000c, 0x293b: 0x000c,
+ 0x293c: 0x000c, 0x293d: 0x000c, 0x293e: 0x000c, 0x293f: 0x000c,
+ // Block 0xa5, offset 0x2940
+ 0x2940: 0x000c, 0x2941: 0x000c, 0x2942: 0x000c, 0x2943: 0x000c, 0x2944: 0x000c, 0x2945: 0x000c,
+ 0x2946: 0x000c,
+ 0x2952: 0x000a, 0x2953: 0x000a, 0x2954: 0x000a, 0x2955: 0x000a, 0x2956: 0x000a, 0x2957: 0x000a,
+ 0x2958: 0x000a, 0x2959: 0x000a, 0x295a: 0x000a, 0x295b: 0x000a, 0x295c: 0x000a, 0x295d: 0x000a,
+ 0x295e: 0x000a, 0x295f: 0x000a, 0x2960: 0x000a, 0x2961: 0x000a, 0x2962: 0x000a, 0x2963: 0x000a,
+ 0x2964: 0x000a, 0x2965: 0x000a,
+ 0x297f: 0x000c,
+ // Block 0xa6, offset 0x2980
+ 0x2980: 0x000c, 0x2981: 0x000c,
+ 0x29b3: 0x000c, 0x29b4: 0x000c, 0x29b5: 0x000c,
+ 0x29b6: 0x000c, 0x29b9: 0x000c, 0x29ba: 0x000c,
+ // Block 0xa7, offset 0x29c0
+ 0x29c0: 0x000c, 0x29c1: 0x000c, 0x29c2: 0x000c,
+ 0x29e7: 0x000c, 0x29e8: 0x000c, 0x29e9: 0x000c,
+ 0x29ea: 0x000c, 0x29eb: 0x000c, 0x29ed: 0x000c, 0x29ee: 0x000c, 0x29ef: 0x000c,
+ 0x29f0: 0x000c, 0x29f1: 0x000c, 0x29f2: 0x000c, 0x29f3: 0x000c, 0x29f4: 0x000c,
+ // Block 0xa8, offset 0x2a00
+ 0x2a33: 0x000c,
+ // Block 0xa9, offset 0x2a40
+ 0x2a40: 0x000c, 0x2a41: 0x000c,
+ 0x2a76: 0x000c, 0x2a77: 0x000c, 0x2a78: 0x000c, 0x2a79: 0x000c, 0x2a7a: 0x000c, 0x2a7b: 0x000c,
+ 0x2a7c: 0x000c, 0x2a7d: 0x000c, 0x2a7e: 0x000c,
+ // Block 0xaa, offset 0x2a80
+ 0x2a89: 0x000c, 0x2a8a: 0x000c, 0x2a8b: 0x000c,
+ 0x2a8c: 0x000c,
+ // Block 0xab, offset 0x2ac0
+ 0x2aef: 0x000c,
+ 0x2af0: 0x000c, 0x2af1: 0x000c, 0x2af4: 0x000c,
+ 0x2af6: 0x000c, 0x2af7: 0x000c,
+ 0x2afe: 0x000c,
+ // Block 0xac, offset 0x2b00
+ 0x2b1f: 0x000c, 0x2b23: 0x000c,
+ 0x2b24: 0x000c, 0x2b25: 0x000c, 0x2b26: 0x000c, 0x2b27: 0x000c, 0x2b28: 0x000c, 0x2b29: 0x000c,
+ 0x2b2a: 0x000c,
+ // Block 0xad, offset 0x2b40
+ 0x2b40: 0x000c,
+ 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c,
+ 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c,
+ 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c,
+ // Block 0xae, offset 0x2b80
+ 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c,
+ 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c,
+ // Block 0xaf, offset 0x2bc0
+ 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c,
+ 0x2bc6: 0x000c,
+ 0x2bde: 0x000c,
+ // Block 0xb0, offset 0x2c00
+ 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c,
+ 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c,
+ 0x2c3f: 0x000c,
+ // Block 0xb1, offset 0x2c40
+ 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c,
+ // Block 0xb2, offset 0x2c80
+ 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c,
+ 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c,
+ // Block 0xb3, offset 0x2cc0
+ 0x2cc0: 0x000c,
+ 0x2cdc: 0x000c, 0x2cdd: 0x000c,
+ // Block 0xb4, offset 0x2d00
+ 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c,
+ 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c,
+ 0x2d3d: 0x000c, 0x2d3f: 0x000c,
+ // Block 0xb5, offset 0x2d40
+ 0x2d40: 0x000c,
+ 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a,
+ 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a,
+ 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a,
+ // Block 0xb6, offset 0x2d80
+ 0x2dab: 0x000c, 0x2dad: 0x000c,
+ 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c,
+ 0x2db7: 0x000c,
+ // Block 0xb7, offset 0x2dc0
+ 0x2ddd: 0x000c,
+ 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c,
+ 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c,
+ 0x2dea: 0x000c, 0x2deb: 0x000c,
+ // Block 0xb8, offset 0x2e00
+ 0x2e2f: 0x000c,
+ 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c,
+ 0x2e36: 0x000c, 0x2e37: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c,
+ // Block 0xb9, offset 0x2e40
+ 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c,
+ 0x2e5a: 0x000c, 0x2e5b: 0x000c,
+ 0x2e60: 0x000c,
+ // Block 0xba, offset 0x2e80
+ 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c,
+ 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c,
+ 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c,
+ 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c,
+ 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c,
+ // Block 0xbb, offset 0x2ec0
+ 0x2ec7: 0x000c,
+ 0x2ed1: 0x000c,
+ 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c,
+ 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c,
+ // Block 0xbc, offset 0x2f00
+ 0x2f0a: 0x000c, 0x2f0b: 0x000c,
+ 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c,
+ 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c,
+ 0x2f18: 0x000c, 0x2f19: 0x000c,
+ // Block 0xbd, offset 0x2f40
+ 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c,
+ 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c,
+ 0x2f7c: 0x000c, 0x2f7d: 0x000c,
+ // Block 0xbe, offset 0x2f80
+ 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c,
+ 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c,
+ 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c,
+ 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c,
+ 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c,
+ 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c,
+ 0x2fb6: 0x000c,
+ // Block 0xbf, offset 0x2fc0
+ 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c,
+ 0x2ff6: 0x000c, 0x2ffa: 0x000c,
+ 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c,
+ // Block 0xc0, offset 0x3000
+ 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c,
+ 0x3007: 0x000c,
+ // Block 0xc1, offset 0x3040
+ 0x3050: 0x000c, 0x3051: 0x000c,
+ 0x3055: 0x000c, 0x3057: 0x000c,
+ // Block 0xc2, offset 0x3080
+ 0x30b3: 0x000c, 0x30b4: 0x000c,
+ // Block 0xc3, offset 0x30c0
+ 0x30d5: 0x000a, 0x30d6: 0x000a, 0x30d7: 0x000a,
+ 0x30d8: 0x000a, 0x30d9: 0x000a, 0x30da: 0x000a, 0x30db: 0x000a, 0x30dc: 0x000a, 0x30dd: 0x0004,
+ 0x30de: 0x0004, 0x30df: 0x0004, 0x30e0: 0x0004, 0x30e1: 0x000a, 0x30e2: 0x000a, 0x30e3: 0x000a,
+ 0x30e4: 0x000a, 0x30e5: 0x000a, 0x30e6: 0x000a, 0x30e7: 0x000a, 0x30e8: 0x000a, 0x30e9: 0x000a,
+ 0x30ea: 0x000a, 0x30eb: 0x000a, 0x30ec: 0x000a, 0x30ed: 0x000a, 0x30ee: 0x000a, 0x30ef: 0x000a,
+ 0x30f0: 0x000a, 0x30f1: 0x000a,
+ // Block 0xc4, offset 0x3100
+ 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c,
+ // Block 0xc5, offset 0x3140
+ 0x3170: 0x000c, 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c,
+ 0x3176: 0x000c,
+ // Block 0xc6, offset 0x3180
+ 0x318f: 0x000c,
+ // Block 0xc7, offset 0x31c0
+ 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c,
+ 0x31d2: 0x000c,
+ // Block 0xc8, offset 0x3200
+ 0x3222: 0x000a,
+ // Block 0xc9, offset 0x3240
+ 0x325d: 0x000c,
+ 0x325e: 0x000c, 0x3260: 0x000b, 0x3261: 0x000b, 0x3262: 0x000b, 0x3263: 0x000b,
+ // Block 0xca, offset 0x3280
+ 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c,
+ 0x32b3: 0x000b, 0x32b4: 0x000b, 0x32b5: 0x000b,
+ 0x32b6: 0x000b, 0x32b7: 0x000b, 0x32b8: 0x000b, 0x32b9: 0x000b, 0x32ba: 0x000b, 0x32bb: 0x000c,
+ 0x32bc: 0x000c, 0x32bd: 0x000c, 0x32be: 0x000c, 0x32bf: 0x000c,
+ // Block 0xcb, offset 0x32c0
+ 0x32c0: 0x000c, 0x32c1: 0x000c, 0x32c2: 0x000c, 0x32c5: 0x000c,
+ 0x32c6: 0x000c, 0x32c7: 0x000c, 0x32c8: 0x000c, 0x32c9: 0x000c, 0x32ca: 0x000c, 0x32cb: 0x000c,
+ 0x32ea: 0x000c, 0x32eb: 0x000c, 0x32ec: 0x000c, 0x32ed: 0x000c,
+ // Block 0xcc, offset 0x3300
+ 0x3300: 0x000a, 0x3301: 0x000a, 0x3302: 0x000c, 0x3303: 0x000c, 0x3304: 0x000c, 0x3305: 0x000a,
+ // Block 0xcd, offset 0x3340
+ 0x3340: 0x000a, 0x3341: 0x000a, 0x3342: 0x000a, 0x3343: 0x000a, 0x3344: 0x000a, 0x3345: 0x000a,
+ 0x3346: 0x000a, 0x3347: 0x000a, 0x3348: 0x000a, 0x3349: 0x000a, 0x334a: 0x000a, 0x334b: 0x000a,
+ 0x334c: 0x000a, 0x334d: 0x000a, 0x334e: 0x000a, 0x334f: 0x000a, 0x3350: 0x000a, 0x3351: 0x000a,
+ 0x3352: 0x000a, 0x3353: 0x000a, 0x3354: 0x000a, 0x3355: 0x000a, 0x3356: 0x000a,
+ // Block 0xce, offset 0x3380
+ 0x339b: 0x000a,
+ // Block 0xcf, offset 0x33c0
+ 0x33d5: 0x000a,
+ // Block 0xd0, offset 0x3400
+ 0x340f: 0x000a,
+ // Block 0xd1, offset 0x3440
+ 0x3449: 0x000a,
+ // Block 0xd2, offset 0x3480
+ 0x3483: 0x000a,
+ 0x348e: 0x0002, 0x348f: 0x0002, 0x3490: 0x0002, 0x3491: 0x0002,
+ 0x3492: 0x0002, 0x3493: 0x0002, 0x3494: 0x0002, 0x3495: 0x0002, 0x3496: 0x0002, 0x3497: 0x0002,
+ 0x3498: 0x0002, 0x3499: 0x0002, 0x349a: 0x0002, 0x349b: 0x0002, 0x349c: 0x0002, 0x349d: 0x0002,
+ 0x349e: 0x0002, 0x349f: 0x0002, 0x34a0: 0x0002, 0x34a1: 0x0002, 0x34a2: 0x0002, 0x34a3: 0x0002,
+ 0x34a4: 0x0002, 0x34a5: 0x0002, 0x34a6: 0x0002, 0x34a7: 0x0002, 0x34a8: 0x0002, 0x34a9: 0x0002,
+ 0x34aa: 0x0002, 0x34ab: 0x0002, 0x34ac: 0x0002, 0x34ad: 0x0002, 0x34ae: 0x0002, 0x34af: 0x0002,
+ 0x34b0: 0x0002, 0x34b1: 0x0002, 0x34b2: 0x0002, 0x34b3: 0x0002, 0x34b4: 0x0002, 0x34b5: 0x0002,
+ 0x34b6: 0x0002, 0x34b7: 0x0002, 0x34b8: 0x0002, 0x34b9: 0x0002, 0x34ba: 0x0002, 0x34bb: 0x0002,
+ 0x34bc: 0x0002, 0x34bd: 0x0002, 0x34be: 0x0002, 0x34bf: 0x0002,
+ // Block 0xd3, offset 0x34c0
+ 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c,
+ 0x34c6: 0x000c, 0x34c7: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c,
+ 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c,
+ 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c,
+ 0x34d8: 0x000c, 0x34d9: 0x000c, 0x34da: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c,
+ 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e2: 0x000c, 0x34e3: 0x000c,
+ 0x34e4: 0x000c, 0x34e5: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c,
+ 0x34ea: 0x000c, 0x34eb: 0x000c, 0x34ec: 0x000c, 0x34ed: 0x000c, 0x34ee: 0x000c, 0x34ef: 0x000c,
+ 0x34f0: 0x000c, 0x34f1: 0x000c, 0x34f2: 0x000c, 0x34f3: 0x000c, 0x34f4: 0x000c, 0x34f5: 0x000c,
+ 0x34f6: 0x000c, 0x34fb: 0x000c,
+ 0x34fc: 0x000c, 0x34fd: 0x000c, 0x34fe: 0x000c, 0x34ff: 0x000c,
+ // Block 0xd4, offset 0x3500
+ 0x3500: 0x000c, 0x3501: 0x000c, 0x3502: 0x000c, 0x3503: 0x000c, 0x3504: 0x000c, 0x3505: 0x000c,
+ 0x3506: 0x000c, 0x3507: 0x000c, 0x3508: 0x000c, 0x3509: 0x000c, 0x350a: 0x000c, 0x350b: 0x000c,
+ 0x350c: 0x000c, 0x350d: 0x000c, 0x350e: 0x000c, 0x350f: 0x000c, 0x3510: 0x000c, 0x3511: 0x000c,
+ 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x000c,
+ 0x3518: 0x000c, 0x3519: 0x000c, 0x351a: 0x000c, 0x351b: 0x000c, 0x351c: 0x000c, 0x351d: 0x000c,
+ 0x351e: 0x000c, 0x351f: 0x000c, 0x3520: 0x000c, 0x3521: 0x000c, 0x3522: 0x000c, 0x3523: 0x000c,
+ 0x3524: 0x000c, 0x3525: 0x000c, 0x3526: 0x000c, 0x3527: 0x000c, 0x3528: 0x000c, 0x3529: 0x000c,
+ 0x352a: 0x000c, 0x352b: 0x000c, 0x352c: 0x000c,
+ 0x3535: 0x000c,
+ // Block 0xd5, offset 0x3540
+ 0x3544: 0x000c,
+ 0x355b: 0x000c, 0x355c: 0x000c, 0x355d: 0x000c,
+ 0x355e: 0x000c, 0x355f: 0x000c, 0x3561: 0x000c, 0x3562: 0x000c, 0x3563: 0x000c,
+ 0x3564: 0x000c, 0x3565: 0x000c, 0x3566: 0x000c, 0x3567: 0x000c, 0x3568: 0x000c, 0x3569: 0x000c,
+ 0x356a: 0x000c, 0x356b: 0x000c, 0x356c: 0x000c, 0x356d: 0x000c, 0x356e: 0x000c, 0x356f: 0x000c,
+ // Block 0xd6, offset 0x3580
+ 0x3580: 0x000c, 0x3581: 0x000c, 0x3582: 0x000c, 0x3583: 0x000c, 0x3584: 0x000c, 0x3585: 0x000c,
+ 0x3586: 0x000c, 0x3588: 0x000c, 0x3589: 0x000c, 0x358a: 0x000c, 0x358b: 0x000c,
+ 0x358c: 0x000c, 0x358d: 0x000c, 0x358e: 0x000c, 0x358f: 0x000c, 0x3590: 0x000c, 0x3591: 0x000c,
+ 0x3592: 0x000c, 0x3593: 0x000c, 0x3594: 0x000c, 0x3595: 0x000c, 0x3596: 0x000c, 0x3597: 0x000c,
+ 0x3598: 0x000c, 0x359b: 0x000c, 0x359c: 0x000c, 0x359d: 0x000c,
+ 0x359e: 0x000c, 0x359f: 0x000c, 0x35a0: 0x000c, 0x35a1: 0x000c, 0x35a3: 0x000c,
+ 0x35a4: 0x000c, 0x35a6: 0x000c, 0x35a7: 0x000c, 0x35a8: 0x000c, 0x35a9: 0x000c,
+ 0x35aa: 0x000c,
+ // Block 0xd7, offset 0x35c0
+ 0x35ec: 0x000c, 0x35ed: 0x000c, 0x35ee: 0x000c, 0x35ef: 0x000c,
+ 0x35ff: 0x0004,
+ // Block 0xd8, offset 0x3600
+ 0x3600: 0x0001, 0x3601: 0x0001, 0x3602: 0x0001, 0x3603: 0x0001, 0x3604: 0x0001, 0x3605: 0x0001,
+ 0x3606: 0x0001, 0x3607: 0x0001, 0x3608: 0x0001, 0x3609: 0x0001, 0x360a: 0x0001, 0x360b: 0x0001,
+ 0x360c: 0x0001, 0x360d: 0x0001, 0x360e: 0x0001, 0x360f: 0x0001, 0x3610: 0x000c, 0x3611: 0x000c,
+ 0x3612: 0x000c, 0x3613: 0x000c, 0x3614: 0x000c, 0x3615: 0x000c, 0x3616: 0x000c, 0x3617: 0x0001,
+ 0x3618: 0x0001, 0x3619: 0x0001, 0x361a: 0x0001, 0x361b: 0x0001, 0x361c: 0x0001, 0x361d: 0x0001,
+ 0x361e: 0x0001, 0x361f: 0x0001, 0x3620: 0x0001, 0x3621: 0x0001, 0x3622: 0x0001, 0x3623: 0x0001,
+ 0x3624: 0x0001, 0x3625: 0x0001, 0x3626: 0x0001, 0x3627: 0x0001, 0x3628: 0x0001, 0x3629: 0x0001,
+ 0x362a: 0x0001, 0x362b: 0x0001, 0x362c: 0x0001, 0x362d: 0x0001, 0x362e: 0x0001, 0x362f: 0x0001,
+ 0x3630: 0x0001, 0x3631: 0x0001, 0x3632: 0x0001, 0x3633: 0x0001, 0x3634: 0x0001, 0x3635: 0x0001,
+ 0x3636: 0x0001, 0x3637: 0x0001, 0x3638: 0x0001, 0x3639: 0x0001, 0x363a: 0x0001, 0x363b: 0x0001,
+ 0x363c: 0x0001, 0x363d: 0x0001, 0x363e: 0x0001, 0x363f: 0x0001,
+ // Block 0xd9, offset 0x3640
+ 0x3640: 0x0001, 0x3641: 0x0001, 0x3642: 0x0001, 0x3643: 0x0001, 0x3644: 0x000c, 0x3645: 0x000c,
+ 0x3646: 0x000c, 0x3647: 0x000c, 0x3648: 0x000c, 0x3649: 0x000c, 0x364a: 0x000c, 0x364b: 0x0001,
+ 0x364c: 0x0001, 0x364d: 0x0001, 0x364e: 0x0001, 0x364f: 0x0001, 0x3650: 0x0001, 0x3651: 0x0001,
+ 0x3652: 0x0001, 0x3653: 0x0001, 0x3654: 0x0001, 0x3655: 0x0001, 0x3656: 0x0001, 0x3657: 0x0001,
+ 0x3658: 0x0001, 0x3659: 0x0001, 0x365a: 0x0001, 0x365b: 0x0001, 0x365c: 0x0001, 0x365d: 0x0001,
+ 0x365e: 0x0001, 0x365f: 0x0001, 0x3660: 0x0001, 0x3661: 0x0001, 0x3662: 0x0001, 0x3663: 0x0001,
+ 0x3664: 0x0001, 0x3665: 0x0001, 0x3666: 0x0001, 0x3667: 0x0001, 0x3668: 0x0001, 0x3669: 0x0001,
+ 0x366a: 0x0001, 0x366b: 0x0001, 0x366c: 0x0001, 0x366d: 0x0001, 0x366e: 0x0001, 0x366f: 0x0001,
+ 0x3670: 0x0001, 0x3671: 0x0001, 0x3672: 0x0001, 0x3673: 0x0001, 0x3674: 0x0001, 0x3675: 0x0001,
+ 0x3676: 0x0001, 0x3677: 0x0001, 0x3678: 0x0001, 0x3679: 0x0001, 0x367a: 0x0001, 0x367b: 0x0001,
+ 0x367c: 0x0001, 0x367d: 0x0001, 0x367e: 0x0001, 0x367f: 0x0001,
+ // Block 0xda, offset 0x3680
+ 0x3680: 0x000d, 0x3681: 0x000d, 0x3682: 0x000d, 0x3683: 0x000d, 0x3684: 0x000d, 0x3685: 0x000d,
+ 0x3686: 0x000d, 0x3687: 0x000d, 0x3688: 0x000d, 0x3689: 0x000d, 0x368a: 0x000d, 0x368b: 0x000d,
+ 0x368c: 0x000d, 0x368d: 0x000d, 0x368e: 0x000d, 0x368f: 0x000d, 0x3690: 0x0001, 0x3691: 0x0001,
+ 0x3692: 0x0001, 0x3693: 0x0001, 0x3694: 0x0001, 0x3695: 0x0001, 0x3696: 0x0001, 0x3697: 0x0001,
+ 0x3698: 0x0001, 0x3699: 0x0001, 0x369a: 0x0001, 0x369b: 0x0001, 0x369c: 0x0001, 0x369d: 0x0001,
+ 0x369e: 0x0001, 0x369f: 0x0001, 0x36a0: 0x0001, 0x36a1: 0x0001, 0x36a2: 0x0001, 0x36a3: 0x0001,
+ 0x36a4: 0x0001, 0x36a5: 0x0001, 0x36a6: 0x0001, 0x36a7: 0x0001, 0x36a8: 0x0001, 0x36a9: 0x0001,
+ 0x36aa: 0x0001, 0x36ab: 0x0001, 0x36ac: 0x0001, 0x36ad: 0x0001, 0x36ae: 0x0001, 0x36af: 0x0001,
+ 0x36b0: 0x0001, 0x36b1: 0x0001, 0x36b2: 0x0001, 0x36b3: 0x0001, 0x36b4: 0x0001, 0x36b5: 0x0001,
+ 0x36b6: 0x0001, 0x36b7: 0x0001, 0x36b8: 0x0001, 0x36b9: 0x0001, 0x36ba: 0x0001, 0x36bb: 0x0001,
+ 0x36bc: 0x0001, 0x36bd: 0x0001, 0x36be: 0x0001, 0x36bf: 0x0001,
+ // Block 0xdb, offset 0x36c0
+ 0x36c0: 0x000d, 0x36c1: 0x000d, 0x36c2: 0x000d, 0x36c3: 0x000d, 0x36c4: 0x000d, 0x36c5: 0x000d,
+ 0x36c6: 0x000d, 0x36c7: 0x000d, 0x36c8: 0x000d, 0x36c9: 0x000d, 0x36ca: 0x000d, 0x36cb: 0x000d,
+ 0x36cc: 0x000d, 0x36cd: 0x000d, 0x36ce: 0x000d, 0x36cf: 0x000d, 0x36d0: 0x000d, 0x36d1: 0x000d,
+ 0x36d2: 0x000d, 0x36d3: 0x000d, 0x36d4: 0x000d, 0x36d5: 0x000d, 0x36d6: 0x000d, 0x36d7: 0x000d,
+ 0x36d8: 0x000d, 0x36d9: 0x000d, 0x36da: 0x000d, 0x36db: 0x000d, 0x36dc: 0x000d, 0x36dd: 0x000d,
+ 0x36de: 0x000d, 0x36df: 0x000d, 0x36e0: 0x000d, 0x36e1: 0x000d, 0x36e2: 0x000d, 0x36e3: 0x000d,
+ 0x36e4: 0x000d, 0x36e5: 0x000d, 0x36e6: 0x000d, 0x36e7: 0x000d, 0x36e8: 0x000d, 0x36e9: 0x000d,
+ 0x36ea: 0x000d, 0x36eb: 0x000d, 0x36ec: 0x000d, 0x36ed: 0x000d, 0x36ee: 0x000d, 0x36ef: 0x000d,
+ 0x36f0: 0x000a, 0x36f1: 0x000a, 0x36f2: 0x000d, 0x36f3: 0x000d, 0x36f4: 0x000d, 0x36f5: 0x000d,
+ 0x36f6: 0x000d, 0x36f7: 0x000d, 0x36f8: 0x000d, 0x36f9: 0x000d, 0x36fa: 0x000d, 0x36fb: 0x000d,
+ 0x36fc: 0x000d, 0x36fd: 0x000d, 0x36fe: 0x000d, 0x36ff: 0x000d,
+ // Block 0xdc, offset 0x3700
+ 0x3700: 0x000a, 0x3701: 0x000a, 0x3702: 0x000a, 0x3703: 0x000a, 0x3704: 0x000a, 0x3705: 0x000a,
+ 0x3706: 0x000a, 0x3707: 0x000a, 0x3708: 0x000a, 0x3709: 0x000a, 0x370a: 0x000a, 0x370b: 0x000a,
+ 0x370c: 0x000a, 0x370d: 0x000a, 0x370e: 0x000a, 0x370f: 0x000a, 0x3710: 0x000a, 0x3711: 0x000a,
+ 0x3712: 0x000a, 0x3713: 0x000a, 0x3714: 0x000a, 0x3715: 0x000a, 0x3716: 0x000a, 0x3717: 0x000a,
+ 0x3718: 0x000a, 0x3719: 0x000a, 0x371a: 0x000a, 0x371b: 0x000a, 0x371c: 0x000a, 0x371d: 0x000a,
+ 0x371e: 0x000a, 0x371f: 0x000a, 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a,
+ 0x3724: 0x000a, 0x3725: 0x000a, 0x3726: 0x000a, 0x3727: 0x000a, 0x3728: 0x000a, 0x3729: 0x000a,
+ 0x372a: 0x000a, 0x372b: 0x000a,
+ 0x3730: 0x000a, 0x3731: 0x000a, 0x3732: 0x000a, 0x3733: 0x000a, 0x3734: 0x000a, 0x3735: 0x000a,
+ 0x3736: 0x000a, 0x3737: 0x000a, 0x3738: 0x000a, 0x3739: 0x000a, 0x373a: 0x000a, 0x373b: 0x000a,
+ 0x373c: 0x000a, 0x373d: 0x000a, 0x373e: 0x000a, 0x373f: 0x000a,
+ // Block 0xdd, offset 0x3740
+ 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a,
+ 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a,
+ 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a,
+ 0x3752: 0x000a, 0x3753: 0x000a,
+ 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a,
+ 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a,
+ 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, 0x376d: 0x000a, 0x376e: 0x000a,
+ 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a,
+ 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, 0x377a: 0x000a, 0x377b: 0x000a,
+ 0x377c: 0x000a, 0x377d: 0x000a, 0x377e: 0x000a, 0x377f: 0x000a,
+ // Block 0xde, offset 0x3780
+ 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a,
+ 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a,
+ 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3791: 0x000a,
+ 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a,
+ 0x3798: 0x000a, 0x3799: 0x000a, 0x379a: 0x000a, 0x379b: 0x000a, 0x379c: 0x000a, 0x379d: 0x000a,
+ 0x379e: 0x000a, 0x379f: 0x000a, 0x37a0: 0x000a, 0x37a1: 0x000a, 0x37a2: 0x000a, 0x37a3: 0x000a,
+ 0x37a4: 0x000a, 0x37a5: 0x000a, 0x37a6: 0x000a, 0x37a7: 0x000a, 0x37a8: 0x000a, 0x37a9: 0x000a,
+ 0x37aa: 0x000a, 0x37ab: 0x000a, 0x37ac: 0x000a, 0x37ad: 0x000a, 0x37ae: 0x000a, 0x37af: 0x000a,
+ 0x37b0: 0x000a, 0x37b1: 0x000a, 0x37b2: 0x000a, 0x37b3: 0x000a, 0x37b4: 0x000a, 0x37b5: 0x000a,
+ // Block 0xdf, offset 0x37c0
+ 0x37c0: 0x0002, 0x37c1: 0x0002, 0x37c2: 0x0002, 0x37c3: 0x0002, 0x37c4: 0x0002, 0x37c5: 0x0002,
+ 0x37c6: 0x0002, 0x37c7: 0x0002, 0x37c8: 0x0002, 0x37c9: 0x0002, 0x37ca: 0x0002, 0x37cb: 0x000a,
+ 0x37cc: 0x000a,
+ 0x37ef: 0x000a,
+ // Block 0xe0, offset 0x3800
+ 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a,
+ // Block 0xe1, offset 0x3840
+ 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a,
+ 0x3864: 0x000a, 0x3865: 0x000a,
+ // Block 0xe2, offset 0x3880
+ 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a,
+ 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a,
+ 0x388c: 0x000a, 0x388d: 0x000a, 0x388e: 0x000a, 0x388f: 0x000a, 0x3890: 0x000a, 0x3891: 0x000a,
+ 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a,
+ 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a,
+ 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a,
+ 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a,
+ 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a,
+ 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a,
+ // Block 0xe3, offset 0x38c0
+ 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a,
+ 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a,
+ 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a,
+ 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a,
+ 0x38d8: 0x000a,
+ 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a,
+ 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a,
+ 0x38ea: 0x000a, 0x38eb: 0x000a,
+ // Block 0xe4, offset 0x3900
+ 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a,
+ 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a,
+ 0x3910: 0x000a, 0x3911: 0x000a,
+ 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a,
+ 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a,
+ 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, 0x3923: 0x000a,
+ 0x3924: 0x000a, 0x3925: 0x000a, 0x3926: 0x000a, 0x3927: 0x000a, 0x3928: 0x000a, 0x3929: 0x000a,
+ 0x392a: 0x000a, 0x392b: 0x000a, 0x392c: 0x000a, 0x392d: 0x000a, 0x392e: 0x000a, 0x392f: 0x000a,
+ 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a,
+ 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a, 0x393a: 0x000a, 0x393b: 0x000a,
+ 0x393c: 0x000a, 0x393d: 0x000a, 0x393e: 0x000a, 0x393f: 0x000a,
+ // Block 0xe5, offset 0x3940
+ 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a, 0x3943: 0x000a, 0x3944: 0x000a, 0x3945: 0x000a,
+ 0x3946: 0x000a, 0x3947: 0x000a,
+ 0x3950: 0x000a, 0x3951: 0x000a,
+ 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a,
+ 0x3958: 0x000a, 0x3959: 0x000a,
+ 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a,
+ 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a,
+ 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a,
+ 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a,
+ 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a,
+ 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a,
+ // Block 0xe6, offset 0x3980
+ 0x3980: 0x000a, 0x3981: 0x000a, 0x3982: 0x000a, 0x3983: 0x000a, 0x3984: 0x000a, 0x3985: 0x000a,
+ 0x3986: 0x000a, 0x3987: 0x000a,
+ 0x3990: 0x000a, 0x3991: 0x000a,
+ 0x3992: 0x000a, 0x3993: 0x000a, 0x3994: 0x000a, 0x3995: 0x000a, 0x3996: 0x000a, 0x3997: 0x000a,
+ 0x3998: 0x000a, 0x3999: 0x000a, 0x399a: 0x000a, 0x399b: 0x000a, 0x399c: 0x000a, 0x399d: 0x000a,
+ 0x399e: 0x000a, 0x399f: 0x000a, 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a,
+ 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a,
+ 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a,
+ // Block 0xe7, offset 0x39c0
+ 0x39c0: 0x000a, 0x39c1: 0x000a, 0x39c2: 0x000a, 0x39c3: 0x000a, 0x39c4: 0x000a, 0x39c5: 0x000a,
+ 0x39c6: 0x000a, 0x39c7: 0x000a, 0x39c8: 0x000a, 0x39c9: 0x000a, 0x39ca: 0x000a, 0x39cb: 0x000a,
+ 0x39cd: 0x000a, 0x39ce: 0x000a, 0x39cf: 0x000a, 0x39d0: 0x000a, 0x39d1: 0x000a,
+ 0x39d2: 0x000a, 0x39d3: 0x000a, 0x39d4: 0x000a, 0x39d5: 0x000a, 0x39d6: 0x000a, 0x39d7: 0x000a,
+ 0x39d8: 0x000a, 0x39d9: 0x000a, 0x39da: 0x000a, 0x39db: 0x000a, 0x39dc: 0x000a, 0x39dd: 0x000a,
+ 0x39de: 0x000a, 0x39df: 0x000a, 0x39e0: 0x000a, 0x39e1: 0x000a, 0x39e2: 0x000a, 0x39e3: 0x000a,
+ 0x39e4: 0x000a, 0x39e5: 0x000a, 0x39e6: 0x000a, 0x39e7: 0x000a, 0x39e8: 0x000a, 0x39e9: 0x000a,
+ 0x39ea: 0x000a, 0x39eb: 0x000a, 0x39ec: 0x000a, 0x39ed: 0x000a, 0x39ee: 0x000a, 0x39ef: 0x000a,
+ 0x39f0: 0x000a, 0x39f1: 0x000a, 0x39f2: 0x000a, 0x39f3: 0x000a, 0x39f4: 0x000a, 0x39f5: 0x000a,
+ 0x39f6: 0x000a, 0x39f7: 0x000a, 0x39f8: 0x000a, 0x39f9: 0x000a, 0x39fa: 0x000a, 0x39fb: 0x000a,
+ 0x39fc: 0x000a, 0x39fd: 0x000a, 0x39fe: 0x000a, 0x39ff: 0x000a,
+ // Block 0xe8, offset 0x3a00
+ 0x3a00: 0x000a, 0x3a01: 0x000a, 0x3a02: 0x000a, 0x3a03: 0x000a, 0x3a04: 0x000a, 0x3a05: 0x000a,
+ 0x3a06: 0x000a, 0x3a07: 0x000a, 0x3a08: 0x000a, 0x3a09: 0x000a, 0x3a0a: 0x000a, 0x3a0b: 0x000a,
+ 0x3a0c: 0x000a, 0x3a0d: 0x000a, 0x3a0e: 0x000a, 0x3a0f: 0x000a, 0x3a10: 0x000a, 0x3a11: 0x000a,
+ 0x3a12: 0x000a, 0x3a13: 0x000a, 0x3a14: 0x000a, 0x3a15: 0x000a, 0x3a16: 0x000a, 0x3a17: 0x000a,
+ 0x3a18: 0x000a, 0x3a19: 0x000a, 0x3a1a: 0x000a, 0x3a1b: 0x000a, 0x3a1c: 0x000a, 0x3a1d: 0x000a,
+ 0x3a1e: 0x000a, 0x3a1f: 0x000a, 0x3a20: 0x000a, 0x3a21: 0x000a, 0x3a22: 0x000a, 0x3a23: 0x000a,
+ 0x3a24: 0x000a, 0x3a25: 0x000a, 0x3a26: 0x000a, 0x3a27: 0x000a, 0x3a28: 0x000a, 0x3a29: 0x000a,
+ 0x3a2a: 0x000a, 0x3a2b: 0x000a, 0x3a2c: 0x000a, 0x3a2d: 0x000a, 0x3a2e: 0x000a, 0x3a2f: 0x000a,
+ 0x3a30: 0x000a, 0x3a31: 0x000a, 0x3a33: 0x000a, 0x3a34: 0x000a, 0x3a35: 0x000a,
+ 0x3a36: 0x000a, 0x3a3a: 0x000a, 0x3a3b: 0x000a,
+ 0x3a3c: 0x000a, 0x3a3d: 0x000a, 0x3a3e: 0x000a, 0x3a3f: 0x000a,
+ // Block 0xe9, offset 0x3a40
+ 0x3a40: 0x000a, 0x3a41: 0x000a, 0x3a42: 0x000a, 0x3a43: 0x000a, 0x3a44: 0x000a, 0x3a45: 0x000a,
+ 0x3a46: 0x000a, 0x3a47: 0x000a, 0x3a48: 0x000a, 0x3a49: 0x000a, 0x3a4a: 0x000a, 0x3a4b: 0x000a,
+ 0x3a4c: 0x000a, 0x3a4d: 0x000a, 0x3a4e: 0x000a, 0x3a4f: 0x000a, 0x3a50: 0x000a, 0x3a51: 0x000a,
+ 0x3a52: 0x000a, 0x3a53: 0x000a, 0x3a54: 0x000a, 0x3a55: 0x000a, 0x3a56: 0x000a, 0x3a57: 0x000a,
+ 0x3a58: 0x000a, 0x3a59: 0x000a, 0x3a5a: 0x000a, 0x3a5b: 0x000a, 0x3a5c: 0x000a, 0x3a5d: 0x000a,
+ 0x3a5e: 0x000a, 0x3a5f: 0x000a, 0x3a60: 0x000a, 0x3a61: 0x000a, 0x3a62: 0x000a,
+ 0x3a65: 0x000a, 0x3a66: 0x000a, 0x3a67: 0x000a, 0x3a68: 0x000a, 0x3a69: 0x000a,
+ 0x3a6a: 0x000a, 0x3a6e: 0x000a, 0x3a6f: 0x000a,
+ 0x3a70: 0x000a, 0x3a71: 0x000a, 0x3a72: 0x000a, 0x3a73: 0x000a, 0x3a74: 0x000a, 0x3a75: 0x000a,
+ 0x3a76: 0x000a, 0x3a77: 0x000a, 0x3a78: 0x000a, 0x3a79: 0x000a, 0x3a7a: 0x000a, 0x3a7b: 0x000a,
+ 0x3a7c: 0x000a, 0x3a7d: 0x000a, 0x3a7e: 0x000a, 0x3a7f: 0x000a,
+ // Block 0xea, offset 0x3a80
+ 0x3a80: 0x000a, 0x3a81: 0x000a, 0x3a82: 0x000a, 0x3a83: 0x000a, 0x3a84: 0x000a, 0x3a85: 0x000a,
+ 0x3a86: 0x000a, 0x3a87: 0x000a, 0x3a88: 0x000a, 0x3a89: 0x000a, 0x3a8a: 0x000a,
+ 0x3a8d: 0x000a, 0x3a8e: 0x000a, 0x3a8f: 0x000a, 0x3a90: 0x000a, 0x3a91: 0x000a,
+ 0x3a92: 0x000a, 0x3a93: 0x000a, 0x3a94: 0x000a, 0x3a95: 0x000a, 0x3a96: 0x000a, 0x3a97: 0x000a,
+ 0x3a98: 0x000a, 0x3a99: 0x000a, 0x3a9a: 0x000a, 0x3a9b: 0x000a, 0x3a9c: 0x000a, 0x3a9d: 0x000a,
+ 0x3a9e: 0x000a, 0x3a9f: 0x000a, 0x3aa0: 0x000a, 0x3aa1: 0x000a, 0x3aa2: 0x000a, 0x3aa3: 0x000a,
+ 0x3aa4: 0x000a, 0x3aa5: 0x000a, 0x3aa6: 0x000a, 0x3aa7: 0x000a, 0x3aa8: 0x000a, 0x3aa9: 0x000a,
+ 0x3aaa: 0x000a, 0x3aab: 0x000a, 0x3aac: 0x000a, 0x3aad: 0x000a, 0x3aae: 0x000a, 0x3aaf: 0x000a,
+ 0x3ab0: 0x000a, 0x3ab1: 0x000a, 0x3ab2: 0x000a, 0x3ab3: 0x000a, 0x3ab4: 0x000a, 0x3ab5: 0x000a,
+ 0x3ab6: 0x000a, 0x3ab7: 0x000a, 0x3ab8: 0x000a, 0x3ab9: 0x000a, 0x3aba: 0x000a, 0x3abb: 0x000a,
+ 0x3abc: 0x000a, 0x3abd: 0x000a, 0x3abe: 0x000a, 0x3abf: 0x000a,
+ // Block 0xeb, offset 0x3ac0
+ 0x3ac0: 0x000a, 0x3ac1: 0x000a, 0x3ac2: 0x000a, 0x3ac3: 0x000a, 0x3ac4: 0x000a, 0x3ac5: 0x000a,
+ 0x3ac6: 0x000a, 0x3ac7: 0x000a, 0x3ac8: 0x000a, 0x3ac9: 0x000a, 0x3aca: 0x000a, 0x3acb: 0x000a,
+ 0x3acc: 0x000a, 0x3acd: 0x000a, 0x3ace: 0x000a, 0x3acf: 0x000a, 0x3ad0: 0x000a, 0x3ad1: 0x000a,
+ 0x3ad2: 0x000a, 0x3ad3: 0x000a,
+ 0x3ae0: 0x000a, 0x3ae1: 0x000a, 0x3ae2: 0x000a, 0x3ae3: 0x000a,
+ 0x3ae4: 0x000a, 0x3ae5: 0x000a, 0x3ae6: 0x000a, 0x3ae7: 0x000a, 0x3ae8: 0x000a, 0x3ae9: 0x000a,
+ 0x3aea: 0x000a, 0x3aeb: 0x000a, 0x3aec: 0x000a, 0x3aed: 0x000a,
+ 0x3af0: 0x000a, 0x3af1: 0x000a, 0x3af2: 0x000a, 0x3af3: 0x000a,
+ 0x3af8: 0x000a, 0x3af9: 0x000a, 0x3afa: 0x000a,
+ // Block 0xec, offset 0x3b00
+ 0x3b00: 0x000a, 0x3b01: 0x000a, 0x3b02: 0x000a,
+ 0x3b10: 0x000a, 0x3b11: 0x000a,
+ 0x3b12: 0x000a, 0x3b13: 0x000a, 0x3b14: 0x000a, 0x3b15: 0x000a,
+ // Block 0xed, offset 0x3b40
+ 0x3b7e: 0x000b, 0x3b7f: 0x000b,
+ // Block 0xee, offset 0x3b80
+ 0x3b80: 0x000b, 0x3b81: 0x000b, 0x3b82: 0x000b, 0x3b83: 0x000b, 0x3b84: 0x000b, 0x3b85: 0x000b,
+ 0x3b86: 0x000b, 0x3b87: 0x000b, 0x3b88: 0x000b, 0x3b89: 0x000b, 0x3b8a: 0x000b, 0x3b8b: 0x000b,
+ 0x3b8c: 0x000b, 0x3b8d: 0x000b, 0x3b8e: 0x000b, 0x3b8f: 0x000b, 0x3b90: 0x000b, 0x3b91: 0x000b,
+ 0x3b92: 0x000b, 0x3b93: 0x000b, 0x3b94: 0x000b, 0x3b95: 0x000b, 0x3b96: 0x000b, 0x3b97: 0x000b,
+ 0x3b98: 0x000b, 0x3b99: 0x000b, 0x3b9a: 0x000b, 0x3b9b: 0x000b, 0x3b9c: 0x000b, 0x3b9d: 0x000b,
+ 0x3b9e: 0x000b, 0x3b9f: 0x000b, 0x3ba0: 0x000b, 0x3ba1: 0x000b, 0x3ba2: 0x000b, 0x3ba3: 0x000b,
+ 0x3ba4: 0x000b, 0x3ba5: 0x000b, 0x3ba6: 0x000b, 0x3ba7: 0x000b, 0x3ba8: 0x000b, 0x3ba9: 0x000b,
+ 0x3baa: 0x000b, 0x3bab: 0x000b, 0x3bac: 0x000b, 0x3bad: 0x000b, 0x3bae: 0x000b, 0x3baf: 0x000b,
+ 0x3bb0: 0x000b, 0x3bb1: 0x000b, 0x3bb2: 0x000b, 0x3bb3: 0x000b, 0x3bb4: 0x000b, 0x3bb5: 0x000b,
+ 0x3bb6: 0x000b, 0x3bb7: 0x000b, 0x3bb8: 0x000b, 0x3bb9: 0x000b, 0x3bba: 0x000b, 0x3bbb: 0x000b,
+ 0x3bbc: 0x000b, 0x3bbd: 0x000b, 0x3bbe: 0x000b, 0x3bbf: 0x000b,
+ // Block 0xef, offset 0x3bc0
+ 0x3bc0: 0x000c, 0x3bc1: 0x000c, 0x3bc2: 0x000c, 0x3bc3: 0x000c, 0x3bc4: 0x000c, 0x3bc5: 0x000c,
+ 0x3bc6: 0x000c, 0x3bc7: 0x000c, 0x3bc8: 0x000c, 0x3bc9: 0x000c, 0x3bca: 0x000c, 0x3bcb: 0x000c,
+ 0x3bcc: 0x000c, 0x3bcd: 0x000c, 0x3bce: 0x000c, 0x3bcf: 0x000c, 0x3bd0: 0x000c, 0x3bd1: 0x000c,
+ 0x3bd2: 0x000c, 0x3bd3: 0x000c, 0x3bd4: 0x000c, 0x3bd5: 0x000c, 0x3bd6: 0x000c, 0x3bd7: 0x000c,
+ 0x3bd8: 0x000c, 0x3bd9: 0x000c, 0x3bda: 0x000c, 0x3bdb: 0x000c, 0x3bdc: 0x000c, 0x3bdd: 0x000c,
+ 0x3bde: 0x000c, 0x3bdf: 0x000c, 0x3be0: 0x000c, 0x3be1: 0x000c, 0x3be2: 0x000c, 0x3be3: 0x000c,
+ 0x3be4: 0x000c, 0x3be5: 0x000c, 0x3be6: 0x000c, 0x3be7: 0x000c, 0x3be8: 0x000c, 0x3be9: 0x000c,
+ 0x3bea: 0x000c, 0x3beb: 0x000c, 0x3bec: 0x000c, 0x3bed: 0x000c, 0x3bee: 0x000c, 0x3bef: 0x000c,
+ 0x3bf0: 0x000b, 0x3bf1: 0x000b, 0x3bf2: 0x000b, 0x3bf3: 0x000b, 0x3bf4: 0x000b, 0x3bf5: 0x000b,
+ 0x3bf6: 0x000b, 0x3bf7: 0x000b, 0x3bf8: 0x000b, 0x3bf9: 0x000b, 0x3bfa: 0x000b, 0x3bfb: 0x000b,
+ 0x3bfc: 0x000b, 0x3bfd: 0x000b, 0x3bfe: 0x000b, 0x3bff: 0x000b,
+}
+
+// bidiIndex: 24 blocks, 1536 entries, 1536 bytes
+// Block 0 is the zero block.
+var bidiIndex = [1536]uint8{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x02,
+ 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08,
+ 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b,
+ 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06,
+ 0xea: 0x07, 0xef: 0x08,
+ 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15,
+ // Block 0x4, offset 0x100
+ 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b,
+ 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22,
+ 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28,
+ 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30,
+ // Block 0x5, offset 0x140
+ 0x140: 0x31, 0x141: 0x32, 0x142: 0x33,
+ 0x14d: 0x34, 0x14e: 0x35,
+ 0x150: 0x36,
+ 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b,
+ 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40,
+ 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47,
+ 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a,
+ 0x17e: 0x4b, 0x17f: 0x4c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54,
+ 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54,
+ 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54,
+ 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f,
+ 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61,
+ 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x54,
+ 0x1b3: 0x64, 0x1b5: 0x65, 0x1b7: 0x66,
+ 0x1b8: 0x67, 0x1b9: 0x68, 0x1ba: 0x69, 0x1bb: 0x6a, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6b,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x6c, 0x1c2: 0x6d, 0x1c3: 0x6e, 0x1c7: 0x6f,
+ 0x1c8: 0x70, 0x1c9: 0x71, 0x1ca: 0x72, 0x1cb: 0x73, 0x1cd: 0x74, 0x1cf: 0x75,
+ // Block 0x8, offset 0x200
+ 0x237: 0x54,
+ // Block 0x9, offset 0x240
+ 0x252: 0x76, 0x253: 0x77,
+ 0x258: 0x78, 0x259: 0x79, 0x25a: 0x7a, 0x25b: 0x7b, 0x25c: 0x7c, 0x25e: 0x7d,
+ 0x260: 0x7e, 0x261: 0x7f, 0x263: 0x80, 0x264: 0x81, 0x265: 0x82, 0x266: 0x83, 0x267: 0x84,
+ 0x268: 0x85, 0x269: 0x86, 0x26a: 0x87, 0x26b: 0x88, 0x26f: 0x89,
+ // Block 0xa, offset 0x280
+ 0x2ac: 0x8a, 0x2ad: 0x8b, 0x2ae: 0x0e, 0x2af: 0x0e,
+ 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8c, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8d,
+ 0x2b8: 0x8e, 0x2b9: 0x8f, 0x2ba: 0x0e, 0x2bb: 0x90, 0x2bc: 0x91, 0x2bd: 0x92, 0x2bf: 0x93,
+ // Block 0xb, offset 0x2c0
+ 0x2c4: 0x94, 0x2c5: 0x54, 0x2c6: 0x95, 0x2c7: 0x96,
+ 0x2cb: 0x97, 0x2cd: 0x98,
+ 0x2e0: 0x99, 0x2e1: 0x99, 0x2e2: 0x99, 0x2e3: 0x99, 0x2e4: 0x9a, 0x2e5: 0x99, 0x2e6: 0x99, 0x2e7: 0x99,
+ 0x2e8: 0x9b, 0x2e9: 0x99, 0x2ea: 0x99, 0x2eb: 0x9c, 0x2ec: 0x9d, 0x2ed: 0x99, 0x2ee: 0x99, 0x2ef: 0x99,
+ 0x2f0: 0x99, 0x2f1: 0x99, 0x2f2: 0x99, 0x2f3: 0x99, 0x2f4: 0x9e, 0x2f5: 0x99, 0x2f6: 0x99, 0x2f7: 0x99,
+ 0x2f8: 0x99, 0x2f9: 0x9f, 0x2fa: 0x99, 0x2fb: 0x99, 0x2fc: 0xa0, 0x2fd: 0xa1, 0x2fe: 0x99, 0x2ff: 0x99,
+ // Block 0xc, offset 0x300
+ 0x300: 0xa2, 0x301: 0xa3, 0x302: 0xa4, 0x304: 0xa5, 0x305: 0xa6, 0x306: 0xa7, 0x307: 0xa8,
+ 0x308: 0xa9, 0x30b: 0xaa, 0x30c: 0x26, 0x30d: 0xab,
+ 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1,
+ 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5,
+ 0x320: 0xb6, 0x327: 0xb7,
+ 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba,
+ 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf,
+ 0x33b: 0xc0, 0x33f: 0xc1,
+ // Block 0xd, offset 0x340
+ 0x36b: 0xc2, 0x36c: 0xc3,
+ 0x37d: 0xc4, 0x37e: 0xc5, 0x37f: 0xc6,
+ // Block 0xe, offset 0x380
+ 0x3b2: 0xc7,
+ // Block 0xf, offset 0x3c0
+ 0x3c5: 0xc8, 0x3c6: 0xc9,
+ 0x3c8: 0x54, 0x3c9: 0xca, 0x3cc: 0x54, 0x3cd: 0xcb,
+ 0x3db: 0xcc, 0x3dc: 0xcd, 0x3dd: 0xce, 0x3de: 0xcf, 0x3df: 0xd0,
+ 0x3e8: 0xd1, 0x3e9: 0xd2, 0x3ea: 0xd3,
+ // Block 0x10, offset 0x400
+ 0x400: 0xd4, 0x404: 0xc3,
+ 0x40b: 0xd5,
+ 0x420: 0x99, 0x421: 0x99, 0x422: 0x99, 0x423: 0xd6, 0x424: 0x99, 0x425: 0xd7, 0x426: 0x99, 0x427: 0x99,
+ 0x428: 0x99, 0x429: 0x99, 0x42a: 0x99, 0x42b: 0x99, 0x42c: 0x99, 0x42d: 0x99, 0x42e: 0x99, 0x42f: 0x99,
+ 0x430: 0x99, 0x431: 0xa0, 0x432: 0x0e, 0x433: 0x99, 0x434: 0x0e, 0x435: 0xd8, 0x436: 0x99, 0x437: 0x99,
+ 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd9, 0x43c: 0x99, 0x43d: 0x99, 0x43e: 0x99, 0x43f: 0x99,
+ // Block 0x11, offset 0x440
+ 0x440: 0xda, 0x441: 0x54, 0x442: 0xdb, 0x443: 0xdc, 0x444: 0xdd, 0x445: 0xde,
+ 0x449: 0xdf, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54,
+ 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54,
+ 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xe0, 0x45c: 0x54, 0x45d: 0x6a, 0x45e: 0x54, 0x45f: 0xe1,
+ 0x460: 0xe2, 0x461: 0xe3, 0x462: 0xe4, 0x464: 0xe5, 0x465: 0xe6, 0x466: 0xe7, 0x467: 0xe8,
+ 0x468: 0x54, 0x469: 0xe9, 0x46a: 0xea,
+ 0x47f: 0xeb,
+ // Block 0x12, offset 0x480
+ 0x4bf: 0xeb,
+ // Block 0x13, offset 0x4c0
+ 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b,
+ 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f,
+ 0x4ef: 0x10,
+ 0x4ff: 0x10,
+ // Block 0x14, offset 0x500
+ 0x50f: 0x10,
+ 0x51f: 0x10,
+ 0x52f: 0x10,
+ 0x53f: 0x10,
+ // Block 0x15, offset 0x540
+ 0x540: 0xec, 0x541: 0xec, 0x542: 0xec, 0x543: 0xec, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xed,
+ 0x548: 0xec, 0x549: 0xec, 0x54a: 0xec, 0x54b: 0xec, 0x54c: 0xec, 0x54d: 0xec, 0x54e: 0xec, 0x54f: 0xec,
+ 0x550: 0xec, 0x551: 0xec, 0x552: 0xec, 0x553: 0xec, 0x554: 0xec, 0x555: 0xec, 0x556: 0xec, 0x557: 0xec,
+ 0x558: 0xec, 0x559: 0xec, 0x55a: 0xec, 0x55b: 0xec, 0x55c: 0xec, 0x55d: 0xec, 0x55e: 0xec, 0x55f: 0xec,
+ 0x560: 0xec, 0x561: 0xec, 0x562: 0xec, 0x563: 0xec, 0x564: 0xec, 0x565: 0xec, 0x566: 0xec, 0x567: 0xec,
+ 0x568: 0xec, 0x569: 0xec, 0x56a: 0xec, 0x56b: 0xec, 0x56c: 0xec, 0x56d: 0xec, 0x56e: 0xec, 0x56f: 0xec,
+ 0x570: 0xec, 0x571: 0xec, 0x572: 0xec, 0x573: 0xec, 0x574: 0xec, 0x575: 0xec, 0x576: 0xec, 0x577: 0xec,
+ 0x578: 0xec, 0x579: 0xec, 0x57a: 0xec, 0x57b: 0xec, 0x57c: 0xec, 0x57d: 0xec, 0x57e: 0xec, 0x57f: 0xec,
+ // Block 0x16, offset 0x580
+ 0x58f: 0x10,
+ 0x59f: 0x10,
+ 0x5a0: 0x13,
+ 0x5af: 0x10,
+ 0x5bf: 0x10,
+ // Block 0x17, offset 0x5c0
+ 0x5cf: 0x10,
+}
+
+// Total table size 16952 bytes (16KiB); checksum: F50EF68C
diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
index 7297cce3..2c58f09b 100644
--- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
+++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go
@@ -1,6 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
-// +build go1.13
+// +build go1.13,!go1.14
package norm
diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
new file mode 100644
index 00000000..10f5202c
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go
@@ -0,0 +1,7710 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.14
+
+package norm
+
+import "sync"
+
+const (
+ // Version is the Unicode edition from which the tables are derived.
+ Version = "12.0.0"
+
+ // MaxTransformChunkSize indicates the maximum number of bytes that Transform
+ // may need to write atomically for any Form. Making a destination buffer at
+ // least this size ensures that Transform can always make progress and that
+ // the user does not need to grow the buffer on an ErrShortDst.
+ MaxTransformChunkSize = 35 + maxNonStarters*4
+)
+
+var ccc = [55]uint8{
+ 0, 1, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ 84, 91, 103, 107, 118, 122, 129, 130,
+ 132, 202, 214, 216, 218, 220, 222, 224,
+ 226, 228, 230, 232, 233, 234, 240,
+}
+
+const (
+ firstMulti = 0x186D
+ firstCCC = 0x2CA1
+ endMulti = 0x2F63
+ firstLeadingCCC = 0x49B1
+ firstCCCZeroExcept = 0x4A7B
+ firstStarterWithNLead = 0x4AA2
+ lastDecomp = 0x4AA4
+ maxDecomp = 0x8000
+)
+
+// decomps: 19108 bytes
+var decomps = [...]byte{
+ // Bytes 0 - 3f
+ 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41,
+ 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41,
+ 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41,
+ 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41,
+ 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41,
+ 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41,
+ 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41,
+ 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41,
+ // Bytes 40 - 7f
+ 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41,
+ 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41,
+ 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41,
+ 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41,
+ 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41,
+ 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41,
+ 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41,
+ 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41,
+ // Bytes 80 - bf
+ 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41,
+ 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41,
+ 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41,
+ 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41,
+ 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41,
+ 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41,
+ 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41,
+ 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42,
+ // Bytes c0 - ff
+ 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5,
+ 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2,
+ 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42,
+ 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1,
+ 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6,
+ 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42,
+ 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90,
+ 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9,
+ // Bytes 100 - 13f
+ 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42,
+ 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F,
+ 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9,
+ 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42,
+ 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB,
+ 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9,
+ 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42,
+ 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5,
+ // Bytes 140 - 17f
+ 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9,
+ 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42,
+ 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A,
+ 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA,
+ 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42,
+ 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F,
+ 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE,
+ 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42,
+ // Bytes 180 - 1bf
+ 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97,
+ 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE,
+ 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42,
+ 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F,
+ 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE,
+ 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42,
+ 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8,
+ 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE,
+ // Bytes 1c0 - 1ff
+ 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42,
+ 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7,
+ 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE,
+ 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42,
+ 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF,
+ 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF,
+ 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42,
+ 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87,
+ // Bytes 200 - 23f
+ 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF,
+ 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42,
+ 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90,
+ 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7,
+ 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42,
+ 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2,
+ 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8,
+ 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42,
+ // Bytes 240 - 27f
+ 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB,
+ 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8,
+ 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42,
+ 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3,
+ 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8,
+ 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42,
+ 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81,
+ 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9,
+ // Bytes 280 - 2bf
+ 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42,
+ 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89,
+ 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9,
+ 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42,
+ 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE,
+ 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA,
+ 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42,
+ 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C,
+ // Bytes 2c0 - 2ff
+ 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA,
+ 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42,
+ 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9,
+ 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA,
+ 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42,
+ 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81,
+ 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB,
+ 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42,
+ // Bytes 300 - 33f
+ 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90,
+ 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43,
+ 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43,
+ 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43,
+ 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43,
+ 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43,
+ 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43,
+ 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43,
+ // Bytes 340 - 37f
+ 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43,
+ 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43,
+ 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43,
+ 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43,
+ 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43,
+ 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43,
+ 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43,
+ 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43,
+ // Bytes 380 - 3bf
+ 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43,
+ 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43,
+ 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43,
+ 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43,
+ 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43,
+ 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43,
+ 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43,
+ 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43,
+ // Bytes 3c0 - 3ff
+ 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43,
+ 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43,
+ 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43,
+ 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43,
+ 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43,
+ 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43,
+ 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43,
+ 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43,
+ // Bytes 400 - 43f
+ 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43,
+ 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43,
+ 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43,
+ 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43,
+ 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43,
+ 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43,
+ 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43,
+ 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43,
+ // Bytes 440 - 47f
+ 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43,
+ 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43,
+ 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43,
+ 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43,
+ 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43,
+ 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43,
+ 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43,
+ 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43,
+ // Bytes 480 - 4bf
+ 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43,
+ 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43,
+ 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43,
+ 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43,
+ 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43,
+ 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43,
+ 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43,
+ 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43,
+ // Bytes 4c0 - 4ff
+ 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43,
+ 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43,
+ 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43,
+ 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43,
+ 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43,
+ 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43,
+ 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43,
+ 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43,
+ // Bytes 500 - 53f
+ 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43,
+ 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43,
+ 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43,
+ 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43,
+ 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43,
+ 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43,
+ 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43,
+ 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43,
+ // Bytes 540 - 57f
+ 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43,
+ 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43,
+ 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43,
+ 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43,
+ 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43,
+ 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43,
+ 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43,
+ 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43,
+ // Bytes 580 - 5bf
+ 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43,
+ 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43,
+ 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43,
+ 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43,
+ 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43,
+ 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43,
+ 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43,
+ 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43,
+ // Bytes 5c0 - 5ff
+ 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43,
+ 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43,
+ 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43,
+ 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43,
+ 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43,
+ 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43,
+ 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43,
+ 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43,
+ // Bytes 600 - 63f
+ 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43,
+ 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43,
+ 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43,
+ 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43,
+ 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43,
+ 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43,
+ 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43,
+ 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43,
+ // Bytes 640 - 67f
+ 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43,
+ 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43,
+ 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43,
+ 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43,
+ 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43,
+ 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43,
+ 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43,
+ 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43,
+ // Bytes 680 - 6bf
+ 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43,
+ 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43,
+ 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43,
+ 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43,
+ 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43,
+ 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43,
+ 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43,
+ 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43,
+ // Bytes 6c0 - 6ff
+ 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43,
+ 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43,
+ 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43,
+ 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43,
+ 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43,
+ 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43,
+ 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43,
+ 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43,
+ // Bytes 700 - 73f
+ 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43,
+ 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43,
+ 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43,
+ 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43,
+ 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43,
+ 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43,
+ 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43,
+ 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43,
+ // Bytes 740 - 77f
+ 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43,
+ 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43,
+ 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43,
+ 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43,
+ 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43,
+ 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43,
+ 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43,
+ 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43,
+ // Bytes 780 - 7bf
+ 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43,
+ 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43,
+ 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43,
+ 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43,
+ 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43,
+ 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43,
+ 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43,
+ 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43,
+ // Bytes 7c0 - 7ff
+ 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43,
+ 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43,
+ 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43,
+ 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43,
+ 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43,
+ 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43,
+ 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43,
+ 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43,
+ // Bytes 800 - 83f
+ 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43,
+ 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43,
+ 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43,
+ 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43,
+ 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43,
+ 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43,
+ 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43,
+ 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43,
+ // Bytes 840 - 87f
+ 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43,
+ 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43,
+ 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43,
+ 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43,
+ 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43,
+ 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43,
+ 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43,
+ 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43,
+ // Bytes 880 - 8bf
+ 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43,
+ 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43,
+ 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43,
+ 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43,
+ 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43,
+ 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43,
+ 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43,
+ 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43,
+ // Bytes 8c0 - 8ff
+ 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43,
+ 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43,
+ 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43,
+ 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43,
+ 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43,
+ 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43,
+ 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43,
+ 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43,
+ // Bytes 900 - 93f
+ 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43,
+ 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43,
+ 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43,
+ 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43,
+ 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43,
+ 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43,
+ 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43,
+ 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43,
+ // Bytes 940 - 97f
+ 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43,
+ 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43,
+ 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43,
+ 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43,
+ 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43,
+ 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43,
+ 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43,
+ 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43,
+ // Bytes 980 - 9bf
+ 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43,
+ 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43,
+ 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43,
+ 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43,
+ 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43,
+ 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43,
+ 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43,
+ 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43,
+ // Bytes 9c0 - 9ff
+ 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43,
+ 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43,
+ 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43,
+ 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43,
+ 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43,
+ 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43,
+ 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43,
+ 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43,
+ // Bytes a00 - a3f
+ 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43,
+ 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43,
+ 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43,
+ 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43,
+ 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43,
+ 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43,
+ 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43,
+ 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43,
+ // Bytes a40 - a7f
+ 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43,
+ 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43,
+ 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43,
+ 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43,
+ 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43,
+ 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43,
+ 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43,
+ 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43,
+ // Bytes a80 - abf
+ 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43,
+ 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43,
+ 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43,
+ 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43,
+ 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43,
+ 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43,
+ 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43,
+ 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43,
+ // Bytes ac0 - aff
+ 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43,
+ 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43,
+ 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43,
+ 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43,
+ 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43,
+ 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43,
+ 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43,
+ 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43,
+ // Bytes b00 - b3f
+ 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43,
+ 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43,
+ 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43,
+ 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43,
+ 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43,
+ 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43,
+ 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43,
+ 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43,
+ // Bytes b40 - b7f
+ 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43,
+ 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43,
+ 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43,
+ 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43,
+ 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43,
+ 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43,
+ 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43,
+ 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43,
+ // Bytes b80 - bbf
+ 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43,
+ 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43,
+ 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43,
+ 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43,
+ 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43,
+ 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43,
+ 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43,
+ 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43,
+ // Bytes bc0 - bff
+ 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43,
+ 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43,
+ 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43,
+ 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43,
+ 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43,
+ 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43,
+ 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43,
+ 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43,
+ // Bytes c00 - c3f
+ 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43,
+ 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43,
+ 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43,
+ 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43,
+ 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43,
+ 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43,
+ 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43,
+ 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43,
+ // Bytes c40 - c7f
+ 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43,
+ 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43,
+ 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43,
+ 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43,
+ 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43,
+ 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43,
+ 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43,
+ 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43,
+ // Bytes c80 - cbf
+ 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43,
+ 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43,
+ 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43,
+ 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43,
+ 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43,
+ 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43,
+ 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43,
+ 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43,
+ // Bytes cc0 - cff
+ 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43,
+ 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43,
+ 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43,
+ 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43,
+ 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43,
+ 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43,
+ 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43,
+ 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43,
+ // Bytes d00 - d3f
+ 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43,
+ 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43,
+ 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43,
+ 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43,
+ 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43,
+ 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43,
+ 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43,
+ 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43,
+ // Bytes d40 - d7f
+ 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43,
+ 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43,
+ 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43,
+ 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43,
+ 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43,
+ 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43,
+ 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43,
+ 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43,
+ // Bytes d80 - dbf
+ 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43,
+ 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43,
+ 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43,
+ 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43,
+ 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43,
+ 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43,
+ 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43,
+ 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43,
+ // Bytes dc0 - dff
+ 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43,
+ 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43,
+ 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43,
+ 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43,
+ 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43,
+ 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43,
+ 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43,
+ 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43,
+ // Bytes e00 - e3f
+ 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43,
+ 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43,
+ 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43,
+ 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43,
+ 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43,
+ 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43,
+ 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43,
+ 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43,
+ // Bytes e40 - e7f
+ 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43,
+ 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43,
+ 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43,
+ 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43,
+ 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43,
+ 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43,
+ 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43,
+ 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43,
+ // Bytes e80 - ebf
+ 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43,
+ 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43,
+ 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43,
+ 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43,
+ 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43,
+ 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43,
+ 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43,
+ 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43,
+ // Bytes ec0 - eff
+ 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43,
+ 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43,
+ 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43,
+ 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43,
+ 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43,
+ 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43,
+ 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43,
+ 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43,
+ // Bytes f00 - f3f
+ 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43,
+ 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43,
+ 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43,
+ 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43,
+ 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43,
+ 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43,
+ 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43,
+ 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43,
+ // Bytes f40 - f7f
+ 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43,
+ 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43,
+ 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43,
+ 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43,
+ 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43,
+ 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43,
+ 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43,
+ 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43,
+ // Bytes f80 - fbf
+ 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43,
+ 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43,
+ 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43,
+ 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43,
+ 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43,
+ 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43,
+ 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43,
+ 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43,
+ // Bytes fc0 - fff
+ 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43,
+ 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43,
+ 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43,
+ 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43,
+ 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43,
+ 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43,
+ 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43,
+ 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43,
+ // Bytes 1000 - 103f
+ 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43,
+ 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43,
+ 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43,
+ 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43,
+ 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43,
+ 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43,
+ 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43,
+ 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43,
+ // Bytes 1040 - 107f
+ 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43,
+ 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43,
+ 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43,
+ 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43,
+ 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43,
+ 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43,
+ 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43,
+ 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43,
+ // Bytes 1080 - 10bf
+ 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43,
+ 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43,
+ 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43,
+ 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43,
+ 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43,
+ 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43,
+ 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43,
+ 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43,
+ // Bytes 10c0 - 10ff
+ 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43,
+ 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43,
+ 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43,
+ 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43,
+ 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43,
+ 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43,
+ 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43,
+ 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43,
+ // Bytes 1100 - 113f
+ 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43,
+ 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43,
+ 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43,
+ 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43,
+ 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43,
+ 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43,
+ 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43,
+ 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43,
+ // Bytes 1140 - 117f
+ 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43,
+ 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43,
+ 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43,
+ 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43,
+ 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43,
+ 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43,
+ 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43,
+ 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43,
+ // Bytes 1180 - 11bf
+ 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43,
+ 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43,
+ 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43,
+ 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43,
+ 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43,
+ 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43,
+ 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43,
+ 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43,
+ // Bytes 11c0 - 11ff
+ 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43,
+ 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43,
+ 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43,
+ 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43,
+ 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43,
+ 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43,
+ 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43,
+ 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43,
+ // Bytes 1200 - 123f
+ 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43,
+ 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43,
+ 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43,
+ 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43,
+ 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43,
+ 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43,
+ 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43,
+ 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43,
+ // Bytes 1240 - 127f
+ 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43,
+ 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43,
+ 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43,
+ 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43,
+ 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43,
+ 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43,
+ 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43,
+ 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43,
+ // Bytes 1280 - 12bf
+ 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43,
+ 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43,
+ 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43,
+ 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43,
+ 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43,
+ 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43,
+ 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43,
+ 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43,
+ // Bytes 12c0 - 12ff
+ 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43,
+ 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43,
+ 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43,
+ 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43,
+ 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43,
+ 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43,
+ 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43,
+ 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43,
+ // Bytes 1300 - 133f
+ 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43,
+ 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43,
+ 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43,
+ 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43,
+ 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43,
+ 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43,
+ 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43,
+ 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43,
+ // Bytes 1340 - 137f
+ 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43,
+ 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43,
+ 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43,
+ 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43,
+ 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43,
+ 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43,
+ 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43,
+ 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43,
+ // Bytes 1380 - 13bf
+ 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43,
+ 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43,
+ 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43,
+ 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43,
+ 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43,
+ 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43,
+ 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43,
+ 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43,
+ // Bytes 13c0 - 13ff
+ 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43,
+ 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43,
+ 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43,
+ 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43,
+ 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43,
+ 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43,
+ 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43,
+ 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43,
+ // Bytes 1400 - 143f
+ 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43,
+ 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43,
+ 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43,
+ 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43,
+ 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43,
+ 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43,
+ 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43,
+ 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43,
+ // Bytes 1440 - 147f
+ 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43,
+ 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43,
+ 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43,
+ 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43,
+ 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43,
+ 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43,
+ 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43,
+ 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43,
+ // Bytes 1480 - 14bf
+ 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43,
+ 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43,
+ 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43,
+ 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43,
+ 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43,
+ 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43,
+ 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43,
+ 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43,
+ // Bytes 14c0 - 14ff
+ 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43,
+ 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43,
+ 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43,
+ 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43,
+ 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43,
+ 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43,
+ 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43,
+ 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43,
+ // Bytes 1500 - 153f
+ 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43,
+ 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43,
+ 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43,
+ 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43,
+ 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43,
+ 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43,
+ 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43,
+ 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43,
+ // Bytes 1540 - 157f
+ 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43,
+ 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43,
+ 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43,
+ 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43,
+ 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43,
+ 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43,
+ 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43,
+ 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43,
+ // Bytes 1580 - 15bf
+ 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43,
+ 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43,
+ 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43,
+ 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43,
+ 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43,
+ 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43,
+ 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43,
+ 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43,
+ // Bytes 15c0 - 15ff
+ 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43,
+ 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43,
+ 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43,
+ 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43,
+ 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43,
+ 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43,
+ 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43,
+ 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43,
+ // Bytes 1600 - 163f
+ 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43,
+ 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43,
+ 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43,
+ 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43,
+ 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43,
+ 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43,
+ 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43,
+ 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43,
+ // Bytes 1640 - 167f
+ 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44,
+ 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94,
+ 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0,
+ 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA,
+ 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0,
+ 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44,
+ 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93,
+ 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0,
+ // Bytes 1680 - 16bf
+ 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88,
+ 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1,
+ 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44,
+ 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86,
+ 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0,
+ 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94,
+ 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2,
+ 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44,
+ // Bytes 16c0 - 16ff
+ 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80,
+ 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0,
+ 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93,
+ 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3,
+ 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44,
+ 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A,
+ 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0,
+ 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA,
+ // Bytes 1700 - 173f
+ 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3,
+ 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44,
+ 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE,
+ 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0,
+ 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB,
+ 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4,
+ 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44,
+ 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2,
+ // Bytes 1740 - 177f
+ 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0,
+ 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84,
+ 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5,
+ 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44,
+ 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89,
+ 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0,
+ 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A,
+ 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5,
+ // Bytes 1780 - 17bf
+ 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44,
+ 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2,
+ 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0,
+ 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A,
+ 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6,
+ 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44,
+ 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93,
+ 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0,
+ // Bytes 17c0 - 17ff
+ 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7,
+ 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6,
+ 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44,
+ 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5,
+ 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0,
+ 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92,
+ 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7,
+ 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44,
+ // Bytes 1800 - 183f
+ 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2,
+ 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0,
+ 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92,
+ 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8,
+ 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44,
+ 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85,
+ 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0,
+ 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A,
+ // Bytes 1840 - 187f
+ 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9,
+ 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44,
+ 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84,
+ 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0,
+ 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92,
+ 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21,
+ 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30,
+ 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42,
+ // Bytes 1880 - 18bf
+ 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31,
+ 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31,
+ 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42,
+ 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39,
+ 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32,
+ 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42,
+ 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35,
+ 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32,
+ // Bytes 18c0 - 18ff
+ 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42,
+ 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31,
+ 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33,
+ 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42,
+ 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39,
+ 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34,
+ 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42,
+ 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35,
+ // Bytes 1900 - 193f
+ 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34,
+ 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42,
+ 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C,
+ 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37,
+ 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42,
+ 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D,
+ 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41,
+ 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42,
+ // Bytes 1940 - 197f
+ 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A,
+ 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48,
+ 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42,
+ 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A,
+ 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49,
+ 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42,
+ 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A,
+ 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D,
+ // Bytes 1980 - 19bf
+ 0x44, 0x42, 0x4D, 0x52, 0x42, 0x4D, 0x56, 0x42,
+ 0x4D, 0x57, 0x42, 0x4E, 0x4A, 0x42, 0x4E, 0x6A,
+ 0x42, 0x4E, 0x6F, 0x42, 0x50, 0x48, 0x42, 0x50,
+ 0x52, 0x42, 0x50, 0x61, 0x42, 0x52, 0x73, 0x42,
+ 0x53, 0x44, 0x42, 0x53, 0x4D, 0x42, 0x53, 0x53,
+ 0x42, 0x53, 0x76, 0x42, 0x54, 0x4D, 0x42, 0x56,
+ 0x49, 0x42, 0x57, 0x43, 0x42, 0x57, 0x5A, 0x42,
+ 0x57, 0x62, 0x42, 0x58, 0x49, 0x42, 0x63, 0x63,
+ // Bytes 19c0 - 19ff
+ 0x42, 0x63, 0x64, 0x42, 0x63, 0x6D, 0x42, 0x64,
+ 0x42, 0x42, 0x64, 0x61, 0x42, 0x64, 0x6C, 0x42,
+ 0x64, 0x6D, 0x42, 0x64, 0x7A, 0x42, 0x65, 0x56,
+ 0x42, 0x66, 0x66, 0x42, 0x66, 0x69, 0x42, 0x66,
+ 0x6C, 0x42, 0x66, 0x6D, 0x42, 0x68, 0x61, 0x42,
+ 0x69, 0x69, 0x42, 0x69, 0x6A, 0x42, 0x69, 0x6E,
+ 0x42, 0x69, 0x76, 0x42, 0x69, 0x78, 0x42, 0x6B,
+ 0x41, 0x42, 0x6B, 0x56, 0x42, 0x6B, 0x57, 0x42,
+ // Bytes 1a00 - 1a3f
+ 0x6B, 0x67, 0x42, 0x6B, 0x6C, 0x42, 0x6B, 0x6D,
+ 0x42, 0x6B, 0x74, 0x42, 0x6C, 0x6A, 0x42, 0x6C,
+ 0x6D, 0x42, 0x6C, 0x6E, 0x42, 0x6C, 0x78, 0x42,
+ 0x6D, 0x32, 0x42, 0x6D, 0x33, 0x42, 0x6D, 0x41,
+ 0x42, 0x6D, 0x56, 0x42, 0x6D, 0x57, 0x42, 0x6D,
+ 0x62, 0x42, 0x6D, 0x67, 0x42, 0x6D, 0x6C, 0x42,
+ 0x6D, 0x6D, 0x42, 0x6D, 0x73, 0x42, 0x6E, 0x41,
+ 0x42, 0x6E, 0x46, 0x42, 0x6E, 0x56, 0x42, 0x6E,
+ // Bytes 1a40 - 1a7f
+ 0x57, 0x42, 0x6E, 0x6A, 0x42, 0x6E, 0x6D, 0x42,
+ 0x6E, 0x73, 0x42, 0x6F, 0x56, 0x42, 0x70, 0x41,
+ 0x42, 0x70, 0x46, 0x42, 0x70, 0x56, 0x42, 0x70,
+ 0x57, 0x42, 0x70, 0x63, 0x42, 0x70, 0x73, 0x42,
+ 0x73, 0x72, 0x42, 0x73, 0x74, 0x42, 0x76, 0x69,
+ 0x42, 0x78, 0x69, 0x43, 0x28, 0x31, 0x29, 0x43,
+ 0x28, 0x32, 0x29, 0x43, 0x28, 0x33, 0x29, 0x43,
+ 0x28, 0x34, 0x29, 0x43, 0x28, 0x35, 0x29, 0x43,
+ // Bytes 1a80 - 1abf
+ 0x28, 0x36, 0x29, 0x43, 0x28, 0x37, 0x29, 0x43,
+ 0x28, 0x38, 0x29, 0x43, 0x28, 0x39, 0x29, 0x43,
+ 0x28, 0x41, 0x29, 0x43, 0x28, 0x42, 0x29, 0x43,
+ 0x28, 0x43, 0x29, 0x43, 0x28, 0x44, 0x29, 0x43,
+ 0x28, 0x45, 0x29, 0x43, 0x28, 0x46, 0x29, 0x43,
+ 0x28, 0x47, 0x29, 0x43, 0x28, 0x48, 0x29, 0x43,
+ 0x28, 0x49, 0x29, 0x43, 0x28, 0x4A, 0x29, 0x43,
+ 0x28, 0x4B, 0x29, 0x43, 0x28, 0x4C, 0x29, 0x43,
+ // Bytes 1ac0 - 1aff
+ 0x28, 0x4D, 0x29, 0x43, 0x28, 0x4E, 0x29, 0x43,
+ 0x28, 0x4F, 0x29, 0x43, 0x28, 0x50, 0x29, 0x43,
+ 0x28, 0x51, 0x29, 0x43, 0x28, 0x52, 0x29, 0x43,
+ 0x28, 0x53, 0x29, 0x43, 0x28, 0x54, 0x29, 0x43,
+ 0x28, 0x55, 0x29, 0x43, 0x28, 0x56, 0x29, 0x43,
+ 0x28, 0x57, 0x29, 0x43, 0x28, 0x58, 0x29, 0x43,
+ 0x28, 0x59, 0x29, 0x43, 0x28, 0x5A, 0x29, 0x43,
+ 0x28, 0x61, 0x29, 0x43, 0x28, 0x62, 0x29, 0x43,
+ // Bytes 1b00 - 1b3f
+ 0x28, 0x63, 0x29, 0x43, 0x28, 0x64, 0x29, 0x43,
+ 0x28, 0x65, 0x29, 0x43, 0x28, 0x66, 0x29, 0x43,
+ 0x28, 0x67, 0x29, 0x43, 0x28, 0x68, 0x29, 0x43,
+ 0x28, 0x69, 0x29, 0x43, 0x28, 0x6A, 0x29, 0x43,
+ 0x28, 0x6B, 0x29, 0x43, 0x28, 0x6C, 0x29, 0x43,
+ 0x28, 0x6D, 0x29, 0x43, 0x28, 0x6E, 0x29, 0x43,
+ 0x28, 0x6F, 0x29, 0x43, 0x28, 0x70, 0x29, 0x43,
+ 0x28, 0x71, 0x29, 0x43, 0x28, 0x72, 0x29, 0x43,
+ // Bytes 1b40 - 1b7f
+ 0x28, 0x73, 0x29, 0x43, 0x28, 0x74, 0x29, 0x43,
+ 0x28, 0x75, 0x29, 0x43, 0x28, 0x76, 0x29, 0x43,
+ 0x28, 0x77, 0x29, 0x43, 0x28, 0x78, 0x29, 0x43,
+ 0x28, 0x79, 0x29, 0x43, 0x28, 0x7A, 0x29, 0x43,
+ 0x2E, 0x2E, 0x2E, 0x43, 0x31, 0x30, 0x2E, 0x43,
+ 0x31, 0x31, 0x2E, 0x43, 0x31, 0x32, 0x2E, 0x43,
+ 0x31, 0x33, 0x2E, 0x43, 0x31, 0x34, 0x2E, 0x43,
+ 0x31, 0x35, 0x2E, 0x43, 0x31, 0x36, 0x2E, 0x43,
+ // Bytes 1b80 - 1bbf
+ 0x31, 0x37, 0x2E, 0x43, 0x31, 0x38, 0x2E, 0x43,
+ 0x31, 0x39, 0x2E, 0x43, 0x32, 0x30, 0x2E, 0x43,
+ 0x3A, 0x3A, 0x3D, 0x43, 0x3D, 0x3D, 0x3D, 0x43,
+ 0x43, 0x6F, 0x2E, 0x43, 0x46, 0x41, 0x58, 0x43,
+ 0x47, 0x48, 0x7A, 0x43, 0x47, 0x50, 0x61, 0x43,
+ 0x49, 0x49, 0x49, 0x43, 0x4C, 0x54, 0x44, 0x43,
+ 0x4C, 0xC2, 0xB7, 0x43, 0x4D, 0x48, 0x7A, 0x43,
+ 0x4D, 0x50, 0x61, 0x43, 0x4D, 0xCE, 0xA9, 0x43,
+ // Bytes 1bc0 - 1bff
+ 0x50, 0x50, 0x4D, 0x43, 0x50, 0x50, 0x56, 0x43,
+ 0x50, 0x54, 0x45, 0x43, 0x54, 0x45, 0x4C, 0x43,
+ 0x54, 0x48, 0x7A, 0x43, 0x56, 0x49, 0x49, 0x43,
+ 0x58, 0x49, 0x49, 0x43, 0x61, 0x2F, 0x63, 0x43,
+ 0x61, 0x2F, 0x73, 0x43, 0x61, 0xCA, 0xBE, 0x43,
+ 0x62, 0x61, 0x72, 0x43, 0x63, 0x2F, 0x6F, 0x43,
+ 0x63, 0x2F, 0x75, 0x43, 0x63, 0x61, 0x6C, 0x43,
+ 0x63, 0x6D, 0x32, 0x43, 0x63, 0x6D, 0x33, 0x43,
+ // Bytes 1c00 - 1c3f
+ 0x64, 0x6D, 0x32, 0x43, 0x64, 0x6D, 0x33, 0x43,
+ 0x65, 0x72, 0x67, 0x43, 0x66, 0x66, 0x69, 0x43,
+ 0x66, 0x66, 0x6C, 0x43, 0x67, 0x61, 0x6C, 0x43,
+ 0x68, 0x50, 0x61, 0x43, 0x69, 0x69, 0x69, 0x43,
+ 0x6B, 0x48, 0x7A, 0x43, 0x6B, 0x50, 0x61, 0x43,
+ 0x6B, 0x6D, 0x32, 0x43, 0x6B, 0x6D, 0x33, 0x43,
+ 0x6B, 0xCE, 0xA9, 0x43, 0x6C, 0x6F, 0x67, 0x43,
+ 0x6C, 0xC2, 0xB7, 0x43, 0x6D, 0x69, 0x6C, 0x43,
+ // Bytes 1c40 - 1c7f
+ 0x6D, 0x6D, 0x32, 0x43, 0x6D, 0x6D, 0x33, 0x43,
+ 0x6D, 0x6F, 0x6C, 0x43, 0x72, 0x61, 0x64, 0x43,
+ 0x76, 0x69, 0x69, 0x43, 0x78, 0x69, 0x69, 0x43,
+ 0xC2, 0xB0, 0x43, 0x43, 0xC2, 0xB0, 0x46, 0x43,
+ 0xCA, 0xBC, 0x6E, 0x43, 0xCE, 0xBC, 0x41, 0x43,
+ 0xCE, 0xBC, 0x46, 0x43, 0xCE, 0xBC, 0x56, 0x43,
+ 0xCE, 0xBC, 0x57, 0x43, 0xCE, 0xBC, 0x67, 0x43,
+ 0xCE, 0xBC, 0x6C, 0x43, 0xCE, 0xBC, 0x6D, 0x43,
+ // Bytes 1c80 - 1cbf
+ 0xCE, 0xBC, 0x73, 0x44, 0x28, 0x31, 0x30, 0x29,
+ 0x44, 0x28, 0x31, 0x31, 0x29, 0x44, 0x28, 0x31,
+ 0x32, 0x29, 0x44, 0x28, 0x31, 0x33, 0x29, 0x44,
+ 0x28, 0x31, 0x34, 0x29, 0x44, 0x28, 0x31, 0x35,
+ 0x29, 0x44, 0x28, 0x31, 0x36, 0x29, 0x44, 0x28,
+ 0x31, 0x37, 0x29, 0x44, 0x28, 0x31, 0x38, 0x29,
+ 0x44, 0x28, 0x31, 0x39, 0x29, 0x44, 0x28, 0x32,
+ 0x30, 0x29, 0x44, 0x30, 0xE7, 0x82, 0xB9, 0x44,
+ // Bytes 1cc0 - 1cff
+ 0x31, 0xE2, 0x81, 0x84, 0x44, 0x31, 0xE6, 0x97,
+ 0xA5, 0x44, 0x31, 0xE6, 0x9C, 0x88, 0x44, 0x31,
+ 0xE7, 0x82, 0xB9, 0x44, 0x32, 0xE6, 0x97, 0xA5,
+ 0x44, 0x32, 0xE6, 0x9C, 0x88, 0x44, 0x32, 0xE7,
+ 0x82, 0xB9, 0x44, 0x33, 0xE6, 0x97, 0xA5, 0x44,
+ 0x33, 0xE6, 0x9C, 0x88, 0x44, 0x33, 0xE7, 0x82,
+ 0xB9, 0x44, 0x34, 0xE6, 0x97, 0xA5, 0x44, 0x34,
+ 0xE6, 0x9C, 0x88, 0x44, 0x34, 0xE7, 0x82, 0xB9,
+ // Bytes 1d00 - 1d3f
+ 0x44, 0x35, 0xE6, 0x97, 0xA5, 0x44, 0x35, 0xE6,
+ 0x9C, 0x88, 0x44, 0x35, 0xE7, 0x82, 0xB9, 0x44,
+ 0x36, 0xE6, 0x97, 0xA5, 0x44, 0x36, 0xE6, 0x9C,
+ 0x88, 0x44, 0x36, 0xE7, 0x82, 0xB9, 0x44, 0x37,
+ 0xE6, 0x97, 0xA5, 0x44, 0x37, 0xE6, 0x9C, 0x88,
+ 0x44, 0x37, 0xE7, 0x82, 0xB9, 0x44, 0x38, 0xE6,
+ 0x97, 0xA5, 0x44, 0x38, 0xE6, 0x9C, 0x88, 0x44,
+ 0x38, 0xE7, 0x82, 0xB9, 0x44, 0x39, 0xE6, 0x97,
+ // Bytes 1d40 - 1d7f
+ 0xA5, 0x44, 0x39, 0xE6, 0x9C, 0x88, 0x44, 0x39,
+ 0xE7, 0x82, 0xB9, 0x44, 0x56, 0x49, 0x49, 0x49,
+ 0x44, 0x61, 0x2E, 0x6D, 0x2E, 0x44, 0x6B, 0x63,
+ 0x61, 0x6C, 0x44, 0x70, 0x2E, 0x6D, 0x2E, 0x44,
+ 0x76, 0x69, 0x69, 0x69, 0x44, 0xD5, 0xA5, 0xD6,
+ 0x82, 0x44, 0xD5, 0xB4, 0xD5, 0xA5, 0x44, 0xD5,
+ 0xB4, 0xD5, 0xAB, 0x44, 0xD5, 0xB4, 0xD5, 0xAD,
+ 0x44, 0xD5, 0xB4, 0xD5, 0xB6, 0x44, 0xD5, 0xBE,
+ // Bytes 1d80 - 1dbf
+ 0xD5, 0xB6, 0x44, 0xD7, 0x90, 0xD7, 0x9C, 0x44,
+ 0xD8, 0xA7, 0xD9, 0xB4, 0x44, 0xD8, 0xA8, 0xD8,
+ 0xAC, 0x44, 0xD8, 0xA8, 0xD8, 0xAD, 0x44, 0xD8,
+ 0xA8, 0xD8, 0xAE, 0x44, 0xD8, 0xA8, 0xD8, 0xB1,
+ 0x44, 0xD8, 0xA8, 0xD8, 0xB2, 0x44, 0xD8, 0xA8,
+ 0xD9, 0x85, 0x44, 0xD8, 0xA8, 0xD9, 0x86, 0x44,
+ 0xD8, 0xA8, 0xD9, 0x87, 0x44, 0xD8, 0xA8, 0xD9,
+ 0x89, 0x44, 0xD8, 0xA8, 0xD9, 0x8A, 0x44, 0xD8,
+ // Bytes 1dc0 - 1dff
+ 0xAA, 0xD8, 0xAC, 0x44, 0xD8, 0xAA, 0xD8, 0xAD,
+ 0x44, 0xD8, 0xAA, 0xD8, 0xAE, 0x44, 0xD8, 0xAA,
+ 0xD8, 0xB1, 0x44, 0xD8, 0xAA, 0xD8, 0xB2, 0x44,
+ 0xD8, 0xAA, 0xD9, 0x85, 0x44, 0xD8, 0xAA, 0xD9,
+ 0x86, 0x44, 0xD8, 0xAA, 0xD9, 0x87, 0x44, 0xD8,
+ 0xAA, 0xD9, 0x89, 0x44, 0xD8, 0xAA, 0xD9, 0x8A,
+ 0x44, 0xD8, 0xAB, 0xD8, 0xAC, 0x44, 0xD8, 0xAB,
+ 0xD8, 0xB1, 0x44, 0xD8, 0xAB, 0xD8, 0xB2, 0x44,
+ // Bytes 1e00 - 1e3f
+ 0xD8, 0xAB, 0xD9, 0x85, 0x44, 0xD8, 0xAB, 0xD9,
+ 0x86, 0x44, 0xD8, 0xAB, 0xD9, 0x87, 0x44, 0xD8,
+ 0xAB, 0xD9, 0x89, 0x44, 0xD8, 0xAB, 0xD9, 0x8A,
+ 0x44, 0xD8, 0xAC, 0xD8, 0xAD, 0x44, 0xD8, 0xAC,
+ 0xD9, 0x85, 0x44, 0xD8, 0xAC, 0xD9, 0x89, 0x44,
+ 0xD8, 0xAC, 0xD9, 0x8A, 0x44, 0xD8, 0xAD, 0xD8,
+ 0xAC, 0x44, 0xD8, 0xAD, 0xD9, 0x85, 0x44, 0xD8,
+ 0xAD, 0xD9, 0x89, 0x44, 0xD8, 0xAD, 0xD9, 0x8A,
+ // Bytes 1e40 - 1e7f
+ 0x44, 0xD8, 0xAE, 0xD8, 0xAC, 0x44, 0xD8, 0xAE,
+ 0xD8, 0xAD, 0x44, 0xD8, 0xAE, 0xD9, 0x85, 0x44,
+ 0xD8, 0xAE, 0xD9, 0x89, 0x44, 0xD8, 0xAE, 0xD9,
+ 0x8A, 0x44, 0xD8, 0xB3, 0xD8, 0xAC, 0x44, 0xD8,
+ 0xB3, 0xD8, 0xAD, 0x44, 0xD8, 0xB3, 0xD8, 0xAE,
+ 0x44, 0xD8, 0xB3, 0xD8, 0xB1, 0x44, 0xD8, 0xB3,
+ 0xD9, 0x85, 0x44, 0xD8, 0xB3, 0xD9, 0x87, 0x44,
+ 0xD8, 0xB3, 0xD9, 0x89, 0x44, 0xD8, 0xB3, 0xD9,
+ // Bytes 1e80 - 1ebf
+ 0x8A, 0x44, 0xD8, 0xB4, 0xD8, 0xAC, 0x44, 0xD8,
+ 0xB4, 0xD8, 0xAD, 0x44, 0xD8, 0xB4, 0xD8, 0xAE,
+ 0x44, 0xD8, 0xB4, 0xD8, 0xB1, 0x44, 0xD8, 0xB4,
+ 0xD9, 0x85, 0x44, 0xD8, 0xB4, 0xD9, 0x87, 0x44,
+ 0xD8, 0xB4, 0xD9, 0x89, 0x44, 0xD8, 0xB4, 0xD9,
+ 0x8A, 0x44, 0xD8, 0xB5, 0xD8, 0xAD, 0x44, 0xD8,
+ 0xB5, 0xD8, 0xAE, 0x44, 0xD8, 0xB5, 0xD8, 0xB1,
+ 0x44, 0xD8, 0xB5, 0xD9, 0x85, 0x44, 0xD8, 0xB5,
+ // Bytes 1ec0 - 1eff
+ 0xD9, 0x89, 0x44, 0xD8, 0xB5, 0xD9, 0x8A, 0x44,
+ 0xD8, 0xB6, 0xD8, 0xAC, 0x44, 0xD8, 0xB6, 0xD8,
+ 0xAD, 0x44, 0xD8, 0xB6, 0xD8, 0xAE, 0x44, 0xD8,
+ 0xB6, 0xD8, 0xB1, 0x44, 0xD8, 0xB6, 0xD9, 0x85,
+ 0x44, 0xD8, 0xB6, 0xD9, 0x89, 0x44, 0xD8, 0xB6,
+ 0xD9, 0x8A, 0x44, 0xD8, 0xB7, 0xD8, 0xAD, 0x44,
+ 0xD8, 0xB7, 0xD9, 0x85, 0x44, 0xD8, 0xB7, 0xD9,
+ 0x89, 0x44, 0xD8, 0xB7, 0xD9, 0x8A, 0x44, 0xD8,
+ // Bytes 1f00 - 1f3f
+ 0xB8, 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD8, 0xAC,
+ 0x44, 0xD8, 0xB9, 0xD9, 0x85, 0x44, 0xD8, 0xB9,
+ 0xD9, 0x89, 0x44, 0xD8, 0xB9, 0xD9, 0x8A, 0x44,
+ 0xD8, 0xBA, 0xD8, 0xAC, 0x44, 0xD8, 0xBA, 0xD9,
+ 0x85, 0x44, 0xD8, 0xBA, 0xD9, 0x89, 0x44, 0xD8,
+ 0xBA, 0xD9, 0x8A, 0x44, 0xD9, 0x81, 0xD8, 0xAC,
+ 0x44, 0xD9, 0x81, 0xD8, 0xAD, 0x44, 0xD9, 0x81,
+ 0xD8, 0xAE, 0x44, 0xD9, 0x81, 0xD9, 0x85, 0x44,
+ // Bytes 1f40 - 1f7f
+ 0xD9, 0x81, 0xD9, 0x89, 0x44, 0xD9, 0x81, 0xD9,
+ 0x8A, 0x44, 0xD9, 0x82, 0xD8, 0xAD, 0x44, 0xD9,
+ 0x82, 0xD9, 0x85, 0x44, 0xD9, 0x82, 0xD9, 0x89,
+ 0x44, 0xD9, 0x82, 0xD9, 0x8A, 0x44, 0xD9, 0x83,
+ 0xD8, 0xA7, 0x44, 0xD9, 0x83, 0xD8, 0xAC, 0x44,
+ 0xD9, 0x83, 0xD8, 0xAD, 0x44, 0xD9, 0x83, 0xD8,
+ 0xAE, 0x44, 0xD9, 0x83, 0xD9, 0x84, 0x44, 0xD9,
+ 0x83, 0xD9, 0x85, 0x44, 0xD9, 0x83, 0xD9, 0x89,
+ // Bytes 1f80 - 1fbf
+ 0x44, 0xD9, 0x83, 0xD9, 0x8A, 0x44, 0xD9, 0x84,
+ 0xD8, 0xA7, 0x44, 0xD9, 0x84, 0xD8, 0xAC, 0x44,
+ 0xD9, 0x84, 0xD8, 0xAD, 0x44, 0xD9, 0x84, 0xD8,
+ 0xAE, 0x44, 0xD9, 0x84, 0xD9, 0x85, 0x44, 0xD9,
+ 0x84, 0xD9, 0x87, 0x44, 0xD9, 0x84, 0xD9, 0x89,
+ 0x44, 0xD9, 0x84, 0xD9, 0x8A, 0x44, 0xD9, 0x85,
+ 0xD8, 0xA7, 0x44, 0xD9, 0x85, 0xD8, 0xAC, 0x44,
+ 0xD9, 0x85, 0xD8, 0xAD, 0x44, 0xD9, 0x85, 0xD8,
+ // Bytes 1fc0 - 1fff
+ 0xAE, 0x44, 0xD9, 0x85, 0xD9, 0x85, 0x44, 0xD9,
+ 0x85, 0xD9, 0x89, 0x44, 0xD9, 0x85, 0xD9, 0x8A,
+ 0x44, 0xD9, 0x86, 0xD8, 0xAC, 0x44, 0xD9, 0x86,
+ 0xD8, 0xAD, 0x44, 0xD9, 0x86, 0xD8, 0xAE, 0x44,
+ 0xD9, 0x86, 0xD8, 0xB1, 0x44, 0xD9, 0x86, 0xD8,
+ 0xB2, 0x44, 0xD9, 0x86, 0xD9, 0x85, 0x44, 0xD9,
+ 0x86, 0xD9, 0x86, 0x44, 0xD9, 0x86, 0xD9, 0x87,
+ 0x44, 0xD9, 0x86, 0xD9, 0x89, 0x44, 0xD9, 0x86,
+ // Bytes 2000 - 203f
+ 0xD9, 0x8A, 0x44, 0xD9, 0x87, 0xD8, 0xAC, 0x44,
+ 0xD9, 0x87, 0xD9, 0x85, 0x44, 0xD9, 0x87, 0xD9,
+ 0x89, 0x44, 0xD9, 0x87, 0xD9, 0x8A, 0x44, 0xD9,
+ 0x88, 0xD9, 0xB4, 0x44, 0xD9, 0x8A, 0xD8, 0xAC,
+ 0x44, 0xD9, 0x8A, 0xD8, 0xAD, 0x44, 0xD9, 0x8A,
+ 0xD8, 0xAE, 0x44, 0xD9, 0x8A, 0xD8, 0xB1, 0x44,
+ 0xD9, 0x8A, 0xD8, 0xB2, 0x44, 0xD9, 0x8A, 0xD9,
+ 0x85, 0x44, 0xD9, 0x8A, 0xD9, 0x86, 0x44, 0xD9,
+ // Bytes 2040 - 207f
+ 0x8A, 0xD9, 0x87, 0x44, 0xD9, 0x8A, 0xD9, 0x89,
+ 0x44, 0xD9, 0x8A, 0xD9, 0x8A, 0x44, 0xD9, 0x8A,
+ 0xD9, 0xB4, 0x44, 0xDB, 0x87, 0xD9, 0xB4, 0x45,
+ 0x28, 0xE1, 0x84, 0x80, 0x29, 0x45, 0x28, 0xE1,
+ 0x84, 0x82, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x83,
+ 0x29, 0x45, 0x28, 0xE1, 0x84, 0x85, 0x29, 0x45,
+ 0x28, 0xE1, 0x84, 0x86, 0x29, 0x45, 0x28, 0xE1,
+ 0x84, 0x87, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x89,
+ // Bytes 2080 - 20bf
+ 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8B, 0x29, 0x45,
+ 0x28, 0xE1, 0x84, 0x8C, 0x29, 0x45, 0x28, 0xE1,
+ 0x84, 0x8E, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8F,
+ 0x29, 0x45, 0x28, 0xE1, 0x84, 0x90, 0x29, 0x45,
+ 0x28, 0xE1, 0x84, 0x91, 0x29, 0x45, 0x28, 0xE1,
+ 0x84, 0x92, 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x80,
+ 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x83, 0x29, 0x45,
+ 0x28, 0xE4, 0xB8, 0x89, 0x29, 0x45, 0x28, 0xE4,
+ // Bytes 20c0 - 20ff
+ 0xB9, 0x9D, 0x29, 0x45, 0x28, 0xE4, 0xBA, 0x8C,
+ 0x29, 0x45, 0x28, 0xE4, 0xBA, 0x94, 0x29, 0x45,
+ 0x28, 0xE4, 0xBB, 0xA3, 0x29, 0x45, 0x28, 0xE4,
+ 0xBC, 0x81, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x91,
+ 0x29, 0x45, 0x28, 0xE5, 0x85, 0xAB, 0x29, 0x45,
+ 0x28, 0xE5, 0x85, 0xAD, 0x29, 0x45, 0x28, 0xE5,
+ 0x8A, 0xB4, 0x29, 0x45, 0x28, 0xE5, 0x8D, 0x81,
+ 0x29, 0x45, 0x28, 0xE5, 0x8D, 0x94, 0x29, 0x45,
+ // Bytes 2100 - 213f
+ 0x28, 0xE5, 0x90, 0x8D, 0x29, 0x45, 0x28, 0xE5,
+ 0x91, 0xBC, 0x29, 0x45, 0x28, 0xE5, 0x9B, 0x9B,
+ 0x29, 0x45, 0x28, 0xE5, 0x9C, 0x9F, 0x29, 0x45,
+ 0x28, 0xE5, 0xAD, 0xA6, 0x29, 0x45, 0x28, 0xE6,
+ 0x97, 0xA5, 0x29, 0x45, 0x28, 0xE6, 0x9C, 0x88,
+ 0x29, 0x45, 0x28, 0xE6, 0x9C, 0x89, 0x29, 0x45,
+ 0x28, 0xE6, 0x9C, 0xA8, 0x29, 0x45, 0x28, 0xE6,
+ 0xA0, 0xAA, 0x29, 0x45, 0x28, 0xE6, 0xB0, 0xB4,
+ // Bytes 2140 - 217f
+ 0x29, 0x45, 0x28, 0xE7, 0x81, 0xAB, 0x29, 0x45,
+ 0x28, 0xE7, 0x89, 0xB9, 0x29, 0x45, 0x28, 0xE7,
+ 0x9B, 0xA3, 0x29, 0x45, 0x28, 0xE7, 0xA4, 0xBE,
+ 0x29, 0x45, 0x28, 0xE7, 0xA5, 0x9D, 0x29, 0x45,
+ 0x28, 0xE7, 0xA5, 0xAD, 0x29, 0x45, 0x28, 0xE8,
+ 0x87, 0xAA, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xB3,
+ 0x29, 0x45, 0x28, 0xE8, 0xB2, 0xA1, 0x29, 0x45,
+ 0x28, 0xE8, 0xB3, 0x87, 0x29, 0x45, 0x28, 0xE9,
+ // Bytes 2180 - 21bf
+ 0x87, 0x91, 0x29, 0x45, 0x30, 0xE2, 0x81, 0x84,
+ 0x33, 0x45, 0x31, 0x30, 0xE6, 0x97, 0xA5, 0x45,
+ 0x31, 0x30, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x30,
+ 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x31, 0xE6, 0x97,
+ 0xA5, 0x45, 0x31, 0x31, 0xE6, 0x9C, 0x88, 0x45,
+ 0x31, 0x31, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x32,
+ 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x32, 0xE6, 0x9C,
+ 0x88, 0x45, 0x31, 0x32, 0xE7, 0x82, 0xB9, 0x45,
+ // Bytes 21c0 - 21ff
+ 0x31, 0x33, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x33,
+ 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x34, 0xE6, 0x97,
+ 0xA5, 0x45, 0x31, 0x34, 0xE7, 0x82, 0xB9, 0x45,
+ 0x31, 0x35, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x35,
+ 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x36, 0xE6, 0x97,
+ 0xA5, 0x45, 0x31, 0x36, 0xE7, 0x82, 0xB9, 0x45,
+ 0x31, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x37,
+ 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x38, 0xE6, 0x97,
+ // Bytes 2200 - 223f
+ 0xA5, 0x45, 0x31, 0x38, 0xE7, 0x82, 0xB9, 0x45,
+ 0x31, 0x39, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x39,
+ 0xE7, 0x82, 0xB9, 0x45, 0x31, 0xE2, 0x81, 0x84,
+ 0x32, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x33, 0x45,
+ 0x31, 0xE2, 0x81, 0x84, 0x34, 0x45, 0x31, 0xE2,
+ 0x81, 0x84, 0x35, 0x45, 0x31, 0xE2, 0x81, 0x84,
+ 0x36, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x37, 0x45,
+ 0x31, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x31, 0xE2,
+ // Bytes 2240 - 227f
+ 0x81, 0x84, 0x39, 0x45, 0x32, 0x30, 0xE6, 0x97,
+ 0xA5, 0x45, 0x32, 0x30, 0xE7, 0x82, 0xB9, 0x45,
+ 0x32, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x31,
+ 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x32, 0xE6, 0x97,
+ 0xA5, 0x45, 0x32, 0x32, 0xE7, 0x82, 0xB9, 0x45,
+ 0x32, 0x33, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x33,
+ 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x34, 0xE6, 0x97,
+ 0xA5, 0x45, 0x32, 0x34, 0xE7, 0x82, 0xB9, 0x45,
+ // Bytes 2280 - 22bf
+ 0x32, 0x35, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x36,
+ 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x37, 0xE6, 0x97,
+ 0xA5, 0x45, 0x32, 0x38, 0xE6, 0x97, 0xA5, 0x45,
+ 0x32, 0x39, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0xE2,
+ 0x81, 0x84, 0x33, 0x45, 0x32, 0xE2, 0x81, 0x84,
+ 0x35, 0x45, 0x33, 0x30, 0xE6, 0x97, 0xA5, 0x45,
+ 0x33, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0xE2,
+ 0x81, 0x84, 0x34, 0x45, 0x33, 0xE2, 0x81, 0x84,
+ // Bytes 22c0 - 22ff
+ 0x35, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x38, 0x45,
+ 0x34, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x35, 0xE2,
+ 0x81, 0x84, 0x36, 0x45, 0x35, 0xE2, 0x81, 0x84,
+ 0x38, 0x45, 0x37, 0xE2, 0x81, 0x84, 0x38, 0x45,
+ 0x41, 0xE2, 0x88, 0x95, 0x6D, 0x45, 0x56, 0xE2,
+ 0x88, 0x95, 0x6D, 0x45, 0x6D, 0xE2, 0x88, 0x95,
+ 0x73, 0x46, 0x31, 0xE2, 0x81, 0x84, 0x31, 0x30,
+ 0x46, 0x43, 0xE2, 0x88, 0x95, 0x6B, 0x67, 0x46,
+ // Bytes 2300 - 233f
+ 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x46, 0xD8,
+ 0xA8, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xA8,
+ 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD8,
+ 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAC,
+ 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9,
+ 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAD, 0xD8, 0xAC,
+ 0x46, 0xD8, 0xAA, 0xD8, 0xAD, 0xD9, 0x85, 0x46,
+ 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD8,
+ // Bytes 2340 - 237f
+ 0xAA, 0xD8, 0xAE, 0xD9, 0x89, 0x46, 0xD8, 0xAA,
+ 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD9,
+ 0x85, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, 0xD9, 0x85,
+ 0xD8, 0xAD, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8,
+ 0xAE, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD9, 0x89,
+ 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD9, 0x8A, 0x46,
+ 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD8,
+ 0xAC, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xAC,
+ // Bytes 2380 - 23bf
+ 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xAC, 0xD9,
+ 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD9, 0x85,
+ 0xD9, 0x8A, 0x46, 0xD8, 0xAD, 0xD8, 0xAC, 0xD9,
+ 0x8A, 0x46, 0xD8, 0xAD, 0xD9, 0x85, 0xD9, 0x89,
+ 0x46, 0xD8, 0xAD, 0xD9, 0x85, 0xD9, 0x8A, 0x46,
+ 0xD8, 0xB3, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD8,
+ 0xB3, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD8, 0xB3,
+ 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xB3, 0xD8,
+ // Bytes 23c0 - 23ff
+ 0xAE, 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAE,
+ 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD8,
+ 0xAC, 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAD,
+ 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD9, 0x85, 0x46,
+ 0xD8, 0xB4, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8,
+ 0xB4, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xB4,
+ 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD9,
+ 0x85, 0xD8, 0xAE, 0x46, 0xD8, 0xB4, 0xD9, 0x85,
+ // Bytes 2400 - 243f
+ 0xD9, 0x85, 0x46, 0xD8, 0xB5, 0xD8, 0xAD, 0xD8,
+ 0xAD, 0x46, 0xD8, 0xB5, 0xD8, 0xAD, 0xD9, 0x8A,
+ 0x46, 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, 0x46,
+ 0xD8, 0xB5, 0xD9, 0x84, 0xDB, 0x92, 0x46, 0xD8,
+ 0xB5, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB6,
+ 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xB6, 0xD8,
+ 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB6, 0xD8, 0xAE,
+ 0xD9, 0x85, 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD8,
+ // Bytes 2440 - 247f
+ 0xAD, 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD9, 0x85,
+ 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD9, 0x8A, 0x46,
+ 0xD8, 0xB9, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD8,
+ 0xB9, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB9,
+ 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xB9, 0xD9,
+ 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xBA, 0xD9, 0x85,
+ 0xD9, 0x85, 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9,
+ 0x89, 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x8A,
+ // Bytes 2480 - 24bf
+ 0x46, 0xD9, 0x81, 0xD8, 0xAE, 0xD9, 0x85, 0x46,
+ 0xD9, 0x81, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9,
+ 0x82, 0xD9, 0x84, 0xDB, 0x92, 0x46, 0xD9, 0x82,
+ 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x82, 0xD9,
+ 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x82, 0xD9, 0x85,
+ 0xD9, 0x8A, 0x46, 0xD9, 0x83, 0xD9, 0x85, 0xD9,
+ 0x85, 0x46, 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x8A,
+ 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD8, 0xAC, 0x46,
+ // Bytes 24c0 - 24ff
+ 0xD9, 0x84, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9,
+ 0x84, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x84,
+ 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8,
+ 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x84, 0xD8, 0xAD,
+ 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAE, 0xD9,
+ 0x85, 0x46, 0xD9, 0x84, 0xD9, 0x85, 0xD8, 0xAD,
+ 0x46, 0xD9, 0x84, 0xD9, 0x85, 0xD9, 0x8A, 0x46,
+ 0xD9, 0x85, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD9,
+ // Bytes 2500 - 253f
+ 0x85, 0xD8, 0xAC, 0xD8, 0xAE, 0x46, 0xD9, 0x85,
+ 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8,
+ 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAD,
+ 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD9,
+ 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x8A,
+ 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD8, 0xAC, 0x46,
+ 0xD9, 0x85, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9,
+ 0x85, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD9, 0x85,
+ // Bytes 2540 - 257f
+ 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD8,
+ 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x86, 0xD8, 0xAC,
+ 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9,
+ 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x8A,
+ 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x85, 0x46,
+ 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD9,
+ 0x86, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x86,
+ 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD9,
+ // Bytes 2580 - 25bf
+ 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x87, 0xD9, 0x85,
+ 0xD8, 0xAC, 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD9,
+ 0x85, 0x46, 0xD9, 0x8A, 0xD8, 0xAC, 0xD9, 0x8A,
+ 0x46, 0xD9, 0x8A, 0xD8, 0xAD, 0xD9, 0x8A, 0x46,
+ 0xD9, 0x8A, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9,
+ 0x8A, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x8A,
+ 0xD9, 0x94, 0xD8, 0xA7, 0x46, 0xD9, 0x8A, 0xD9,
+ 0x94, 0xD8, 0xAC, 0x46, 0xD9, 0x8A, 0xD9, 0x94,
+ // Bytes 25c0 - 25ff
+ 0xD8, 0xAD, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8,
+ 0xAE, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xB1,
+ 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xB2, 0x46,
+ 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x85, 0x46, 0xD9,
+ 0x8A, 0xD9, 0x94, 0xD9, 0x86, 0x46, 0xD9, 0x8A,
+ 0xD9, 0x94, 0xD9, 0x87, 0x46, 0xD9, 0x8A, 0xD9,
+ 0x94, 0xD9, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94,
+ 0xD9, 0x89, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9,
+ // Bytes 2600 - 263f
+ 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x86,
+ 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x87, 0x46,
+ 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x88, 0x46, 0xD9,
+ 0x8A, 0xD9, 0x94, 0xDB, 0x90, 0x46, 0xD9, 0x8A,
+ 0xD9, 0x94, 0xDB, 0x95, 0x46, 0xE0, 0xB9, 0x8D,
+ 0xE0, 0xB8, 0xB2, 0x46, 0xE0, 0xBA, 0xAB, 0xE0,
+ 0xBA, 0x99, 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA,
+ 0xA1, 0x46, 0xE0, 0xBB, 0x8D, 0xE0, 0xBA, 0xB2,
+ // Bytes 2640 - 267f
+ 0x46, 0xE0, 0xBD, 0x80, 0xE0, 0xBE, 0xB5, 0x46,
+ 0xE0, 0xBD, 0x82, 0xE0, 0xBE, 0xB7, 0x46, 0xE0,
+ 0xBD, 0x8C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD,
+ 0x91, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x96,
+ 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x9B, 0xE0,
+ 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0x90, 0xE0, 0xBE,
+ 0xB5, 0x46, 0xE0, 0xBE, 0x92, 0xE0, 0xBE, 0xB7,
+ 0x46, 0xE0, 0xBE, 0x9C, 0xE0, 0xBE, 0xB7, 0x46,
+ // Bytes 2680 - 26bf
+ 0xE0, 0xBE, 0xA1, 0xE0, 0xBE, 0xB7, 0x46, 0xE0,
+ 0xBE, 0xA6, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE,
+ 0xAB, 0xE0, 0xBE, 0xB7, 0x46, 0xE2, 0x80, 0xB2,
+ 0xE2, 0x80, 0xB2, 0x46, 0xE2, 0x80, 0xB5, 0xE2,
+ 0x80, 0xB5, 0x46, 0xE2, 0x88, 0xAB, 0xE2, 0x88,
+ 0xAB, 0x46, 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE,
+ 0x46, 0xE3, 0x81, 0xBB, 0xE3, 0x81, 0x8B, 0x46,
+ 0xE3, 0x82, 0x88, 0xE3, 0x82, 0x8A, 0x46, 0xE3,
+ // Bytes 26c0 - 26ff
+ 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0x46, 0xE3, 0x82,
+ 0xB3, 0xE3, 0x82, 0xB3, 0x46, 0xE3, 0x82, 0xB3,
+ 0xE3, 0x83, 0x88, 0x46, 0xE3, 0x83, 0x88, 0xE3,
+ 0x83, 0xB3, 0x46, 0xE3, 0x83, 0x8A, 0xE3, 0x83,
+ 0x8E, 0x46, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xB3,
+ 0x46, 0xE3, 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0x46,
+ 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xA9, 0x46, 0xE3,
+ 0x83, 0xAC, 0xE3, 0x83, 0xA0, 0x46, 0xE5, 0xA4,
+ // Bytes 2700 - 273f
+ 0xA7, 0xE6, 0xAD, 0xA3, 0x46, 0xE5, 0xB9, 0xB3,
+ 0xE6, 0x88, 0x90, 0x46, 0xE6, 0x98, 0x8E, 0xE6,
+ 0xB2, 0xBB, 0x46, 0xE6, 0x98, 0xAD, 0xE5, 0x92,
+ 0x8C, 0x47, 0x72, 0x61, 0x64, 0xE2, 0x88, 0x95,
+ 0x73, 0x47, 0xE3, 0x80, 0x94, 0x53, 0xE3, 0x80,
+ 0x95, 0x48, 0x28, 0xE1, 0x84, 0x80, 0xE1, 0x85,
+ 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x82, 0xE1,
+ 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x83,
+ // Bytes 2740 - 277f
+ 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84,
+ 0x85, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1,
+ 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28,
+ 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x29, 0x48,
+ 0x28, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x29,
+ 0x48, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1,
+ 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85,
+ 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, 0xE1,
+ // Bytes 2780 - 27bf
+ 0x85, 0xAE, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8E,
+ 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84,
+ 0x8F, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1,
+ 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28,
+ 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x29, 0x48,
+ 0x28, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x29,
+ 0x48, 0x72, 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73,
+ 0x32, 0x48, 0xD8, 0xA7, 0xD9, 0x83, 0xD8, 0xA8,
+ // Bytes 27c0 - 27ff
+ 0xD8, 0xB1, 0x48, 0xD8, 0xA7, 0xD9, 0x84, 0xD9,
+ 0x84, 0xD9, 0x87, 0x48, 0xD8, 0xB1, 0xD8, 0xB3,
+ 0xD9, 0x88, 0xD9, 0x84, 0x48, 0xD8, 0xB1, 0xDB,
+ 0x8C, 0xD8, 0xA7, 0xD9, 0x84, 0x48, 0xD8, 0xB5,
+ 0xD9, 0x84, 0xD8, 0xB9, 0xD9, 0x85, 0x48, 0xD8,
+ 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x48,
+ 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0xD8, 0xAF,
+ 0x48, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, 0x84, 0xD9,
+ // Bytes 2800 - 283f
+ 0x85, 0x49, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2,
+ 0xE2, 0x80, 0xB2, 0x49, 0xE2, 0x80, 0xB5, 0xE2,
+ 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x49, 0xE2, 0x88,
+ 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x49,
+ 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0xE2, 0x88,
+ 0xAE, 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xB8, 0x89,
+ 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE4,
+ 0xBA, 0x8C, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80,
+ // Bytes 2840 - 287f
+ 0x94, 0xE5, 0x8B, 0x9D, 0xE3, 0x80, 0x95, 0x49,
+ 0xE3, 0x80, 0x94, 0xE5, 0xAE, 0x89, 0xE3, 0x80,
+ 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x89, 0x93,
+ 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6,
+ 0x95, 0x97, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80,
+ 0x94, 0xE6, 0x9C, 0xAC, 0xE3, 0x80, 0x95, 0x49,
+ 0xE3, 0x80, 0x94, 0xE7, 0x82, 0xB9, 0xE3, 0x80,
+ 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, 0x9B, 0x97,
+ // Bytes 2880 - 28bf
+ 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x82, 0xA2, 0xE3,
+ 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x82,
+ 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49,
+ 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0xA9, 0xE3, 0x83,
+ 0xB3, 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xB3,
+ 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, 0xAA, 0xE3,
+ 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x49, 0xE3, 0x82,
+ 0xAB, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAA, 0x49,
+ // Bytes 28c0 - 28ff
+ 0xE3, 0x82, 0xB1, 0xE3, 0x83, 0xBC, 0xE3, 0x82,
+ 0xB9, 0x49, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xAB,
+ 0xE3, 0x83, 0x8A, 0x49, 0xE3, 0x82, 0xBB, 0xE3,
+ 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82,
+ 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x49,
+ 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0xE3, 0x82,
+ 0xB7, 0x49, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99,
+ 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x8E, 0xE3,
+ // Bytes 2900 - 293f
+ 0x83, 0x83, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83,
+ 0x8F, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0x84, 0x49,
+ 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ 0xAB, 0x49, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A,
+ 0xE3, 0x82, 0xB3, 0x49, 0xE3, 0x83, 0x95, 0xE3,
+ 0x83, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83,
+ 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xBD, 0x49,
+ 0xE3, 0x83, 0x98, 0xE3, 0x83, 0xAB, 0xE3, 0x83,
+ // Bytes 2940 - 297f
+ 0x84, 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC,
+ 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9B, 0xE3,
+ 0x83, 0xBC, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83,
+ 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAB, 0x49,
+ 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0x83, 0xE3, 0x83,
+ 0x8F, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xAB,
+ 0xE3, 0x82, 0xAF, 0x49, 0xE3, 0x83, 0xA4, 0xE3,
+ 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83,
+ // Bytes 2980 - 29bf
+ 0xA6, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, 0x49,
+ 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83,
+ 0x88, 0x4C, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2,
+ 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x4C, 0xE2,
+ 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB,
+ 0xE2, 0x88, 0xAB, 0x4C, 0xE3, 0x82, 0xA2, 0xE3,
+ 0x83, 0xAB, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA1,
+ 0x4C, 0xE3, 0x82, 0xA8, 0xE3, 0x83, 0xBC, 0xE3,
+ // Bytes 29c0 - 29ff
+ 0x82, 0xAB, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82,
+ 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAD, 0xE3,
+ 0x83, 0xB3, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x9E, 0x4C,
+ 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xA9, 0xE3, 0x83,
+ 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x82, 0xAB,
+ 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAA, 0xE3, 0x83,
+ 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99,
+ // Bytes 2a00 - 2a3f
+ 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xBC, 0x4C, 0xE3,
+ 0x82, 0xAD, 0xE3, 0x83, 0xA5, 0xE3, 0x83, 0xAA,
+ 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAF, 0xE3,
+ 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0,
+ 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3,
+ 0x83, 0xBC, 0xE3, 0x83, 0x8D, 0x4C, 0xE3, 0x82,
+ 0xB5, 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, 0xE3,
+ 0x83, 0xAB, 0x4C, 0xE3, 0x82, 0xBF, 0xE3, 0x82,
+ // Bytes 2a40 - 2a7f
+ 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x4C,
+ 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83,
+ 0xBC, 0xE3, 0x83, 0x84, 0x4C, 0xE3, 0x83, 0x92,
+ 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xAF, 0xE3, 0x83,
+ 0xAB, 0x4C, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA3,
+ 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0x4C, 0xE3,
+ 0x83, 0x98, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC,
+ 0xE3, 0x82, 0xBF, 0x4C, 0xE3, 0x83, 0x98, 0xE3,
+ // Bytes 2a80 - 2abf
+ 0x82, 0x9A, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0x92,
+ 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3,
+ 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83,
+ 0x9B, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3,
+ 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x9E, 0xE3, 0x82,
+ 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0x4C,
+ 0xE3, 0x83, 0x9F, 0xE3, 0x82, 0xAF, 0xE3, 0x83,
+ 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, 0x83, 0xA1,
+ // Bytes 2ac0 - 2aff
+ 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83,
+ 0xAB, 0x4C, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x83,
+ 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3,
+ 0x83, 0xAB, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A,
+ 0xE3, 0x83, 0xBC, 0x4C, 0xE6, 0xA0, 0xAA, 0xE5,
+ 0xBC, 0x8F, 0xE4, 0xBC, 0x9A, 0xE7, 0xA4, 0xBE,
+ 0x4E, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA9,
+ 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xAE, 0x29, 0x4F,
+ // Bytes 2b00 - 2b3f
+ 0xD8, 0xAC, 0xD9, 0x84, 0x20, 0xD8, 0xAC, 0xD9,
+ 0x84, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x87, 0x4F,
+ 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0x8F, 0xE3, 0x82,
+ 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0x4F,
+ 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, 0xE3, 0x83,
+ 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0x4F,
+ 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x83,
+ 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4F,
+ // Bytes 2b40 - 2b7f
+ 0xE3, 0x82, 0xB5, 0xE3, 0x83, 0xB3, 0xE3, 0x83,
+ 0x81, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x4F,
+ 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ 0xBC, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xAB, 0x4F,
+ 0xE3, 0x83, 0x98, 0xE3, 0x82, 0xAF, 0xE3, 0x82,
+ 0xBF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x4F,
+ 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x82,
+ 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x4F,
+ // Bytes 2b80 - 2bbf
+ 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xB3, 0xE3, 0x82,
+ 0xB7, 0xE3, 0x83, 0xA7, 0xE3, 0x83, 0xB3, 0x4F,
+ 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x4F,
+ 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0xBC, 0xE3, 0x83,
+ 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x51,
+ 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1,
+ 0x84, 0x8C, 0xE1, 0x85, 0xA5, 0xE1, 0x86, 0xAB,
+ // Bytes 2bc0 - 2bff
+ 0x29, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99,
+ 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBF, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0xBC, 0x52, 0xE3, 0x82, 0xAD,
+ 0xE3, 0x83, 0xAD, 0xE3, 0x82, 0xAF, 0xE3, 0x82,
+ 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x52,
+ 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x83,
+ 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3,
+ 0x83, 0xAB, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x82,
+ // Bytes 2c00 - 2c3f
+ 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0xE3,
+ 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x52, 0xE3, 0x82,
+ 0xAF, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBB, 0xE3,
+ 0x82, 0x99, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAD,
+ 0x52, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3,
+ 0x83, 0xBC, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3,
+ 0xE3, 0x83, 0x88, 0x52, 0xE3, 0x83, 0x92, 0xE3,
+ 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0xE3, 0x82, 0xB9,
+ // Bytes 2c40 - 2c7f
+ 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3,
+ 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x83,
+ 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0xA7, 0xE3, 0x83,
+ 0xAB, 0x52, 0xE3, 0x83, 0x9F, 0xE3, 0x83, 0xAA,
+ 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0xE3, 0x83,
+ 0xBC, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0xAC,
+ 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82,
+ 0xB1, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, 0x61,
+ // Bytes 2c80 - 2cbf
+ 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, 0x20, 0xD8,
+ 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x20,
+ 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, 0x87,
+ 0x20, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, 0x84, 0xD9,
+ 0x85, 0x06, 0xE0, 0xA7, 0x87, 0xE0, 0xA6, 0xBE,
+ 0x01, 0x06, 0xE0, 0xA7, 0x87, 0xE0, 0xA7, 0x97,
+ 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAC, 0xBE,
+ 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAD, 0x96,
+ // Bytes 2cc0 - 2cff
+ 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAD, 0x97,
+ 0x01, 0x06, 0xE0, 0xAE, 0x92, 0xE0, 0xAF, 0x97,
+ 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, 0xAE, 0xBE,
+ 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, 0xAF, 0x97,
+ 0x01, 0x06, 0xE0, 0xAF, 0x87, 0xE0, 0xAE, 0xBE,
+ 0x01, 0x06, 0xE0, 0xB2, 0xBF, 0xE0, 0xB3, 0x95,
+ 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x95,
+ 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x96,
+ // Bytes 2d00 - 2d3f
+ 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, 0xB4, 0xBE,
+ 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, 0xB5, 0x97,
+ 0x01, 0x06, 0xE0, 0xB5, 0x87, 0xE0, 0xB4, 0xBE,
+ 0x01, 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x9F,
+ 0x01, 0x06, 0xE1, 0x80, 0xA5, 0xE1, 0x80, 0xAE,
+ 0x01, 0x06, 0xE1, 0xAC, 0x85, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0x87, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0x89, 0xE1, 0xAC, 0xB5,
+ // Bytes 2d40 - 2d7f
+ 0x01, 0x06, 0xE1, 0xAC, 0x8B, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0x8D, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0x91, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0xBA, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0xBC, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0xBE, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAC, 0xBF, 0xE1, 0xAC, 0xB5,
+ 0x01, 0x06, 0xE1, 0xAD, 0x82, 0xE1, 0xAC, 0xB5,
+ // Bytes 2d80 - 2dbf
+ 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB1, 0xF0, 0x91,
+ 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB2,
+ 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91,
+ 0x8D, 0x87, 0xF0, 0x91, 0x8C, 0xBE, 0x01, 0x08,
+ 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, 0x8D, 0x97,
+ 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91,
+ 0x92, 0xB0, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9,
+ 0xF0, 0x91, 0x92, 0xBA, 0x01, 0x08, 0xF0, 0x91,
+ // Bytes 2dc0 - 2dff
+ 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBD, 0x01, 0x08,
+ 0xF0, 0x91, 0x96, 0xB8, 0xF0, 0x91, 0x96, 0xAF,
+ 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB9, 0xF0, 0x91,
+ 0x96, 0xAF, 0x01, 0x09, 0xE0, 0xB3, 0x86, 0xE0,
+ 0xB3, 0x82, 0xE0, 0xB3, 0x95, 0x02, 0x09, 0xE0,
+ 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0xE0, 0xB7, 0x8A,
+ 0x12, 0x44, 0x44, 0x5A, 0xCC, 0x8C, 0xC9, 0x44,
+ 0x44, 0x7A, 0xCC, 0x8C, 0xC9, 0x44, 0x64, 0x7A,
+ // Bytes 2e00 - 2e3f
+ 0xCC, 0x8C, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7,
+ 0xD9, 0x93, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7,
+ 0xD9, 0x94, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7,
+ 0xD9, 0x95, 0xB5, 0x46, 0xE1, 0x84, 0x80, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x82, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x83, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x85, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x86, 0xE1,
+ // Bytes 2e40 - 2e7f
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x87, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x89, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1,
+ 0x85, 0xAE, 0x01, 0x46, 0xE1, 0x84, 0x8C, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8E, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8F, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x90, 0xE1,
+ // Bytes 2e80 - 2ebf
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x91, 0xE1,
+ 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x92, 0xE1,
+ 0x85, 0xA1, 0x01, 0x49, 0xE3, 0x83, 0xA1, 0xE3,
+ 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, 0xE1,
+ 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0xE1, 0x84, 0x8B,
+ 0xE1, 0x85, 0xB4, 0x01, 0x4C, 0xE3, 0x82, 0xAD,
+ 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xAB, 0xE3, 0x82,
+ 0x99, 0x0D, 0x4C, 0xE3, 0x82, 0xB3, 0xE3, 0x83,
+ // Bytes 2ec0 - 2eff
+ 0xBC, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D,
+ 0x4C, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3,
+ 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE1,
+ 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0xE1, 0x86, 0xB7,
+ 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA9, 0x01, 0x4F,
+ 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0x8B, 0xE3, 0x83,
+ 0xB3, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D,
+ 0x4F, 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xAA, 0xE3,
+ // Bytes 2f00 - 2f3f
+ 0x83, 0xB3, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99,
+ 0x0D, 0x4F, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A,
+ 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB7, 0xE3, 0x82,
+ 0x99, 0x0D, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82,
+ 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3,
+ 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x82, 0xA8, 0xE3,
+ 0x82, 0xB9, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xBC,
+ 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, 0x52,
+ // Bytes 2f40 - 2f7f
+ 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA1, 0xE3, 0x83,
+ 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3,
+ 0x82, 0x99, 0x0D, 0x86, 0xE0, 0xB3, 0x86, 0xE0,
+ 0xB3, 0x82, 0x01, 0x86, 0xE0, 0xB7, 0x99, 0xE0,
+ 0xB7, 0x8F, 0x01, 0x03, 0x3C, 0xCC, 0xB8, 0x05,
+ 0x03, 0x3D, 0xCC, 0xB8, 0x05, 0x03, 0x3E, 0xCC,
+ 0xB8, 0x05, 0x03, 0x41, 0xCC, 0x80, 0xC9, 0x03,
+ 0x41, 0xCC, 0x81, 0xC9, 0x03, 0x41, 0xCC, 0x83,
+ // Bytes 2f80 - 2fbf
+ 0xC9, 0x03, 0x41, 0xCC, 0x84, 0xC9, 0x03, 0x41,
+ 0xCC, 0x89, 0xC9, 0x03, 0x41, 0xCC, 0x8C, 0xC9,
+ 0x03, 0x41, 0xCC, 0x8F, 0xC9, 0x03, 0x41, 0xCC,
+ 0x91, 0xC9, 0x03, 0x41, 0xCC, 0xA5, 0xB5, 0x03,
+ 0x41, 0xCC, 0xA8, 0xA5, 0x03, 0x42, 0xCC, 0x87,
+ 0xC9, 0x03, 0x42, 0xCC, 0xA3, 0xB5, 0x03, 0x42,
+ 0xCC, 0xB1, 0xB5, 0x03, 0x43, 0xCC, 0x81, 0xC9,
+ 0x03, 0x43, 0xCC, 0x82, 0xC9, 0x03, 0x43, 0xCC,
+ // Bytes 2fc0 - 2fff
+ 0x87, 0xC9, 0x03, 0x43, 0xCC, 0x8C, 0xC9, 0x03,
+ 0x44, 0xCC, 0x87, 0xC9, 0x03, 0x44, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x44, 0xCC, 0xA3, 0xB5, 0x03, 0x44,
+ 0xCC, 0xA7, 0xA5, 0x03, 0x44, 0xCC, 0xAD, 0xB5,
+ 0x03, 0x44, 0xCC, 0xB1, 0xB5, 0x03, 0x45, 0xCC,
+ 0x80, 0xC9, 0x03, 0x45, 0xCC, 0x81, 0xC9, 0x03,
+ 0x45, 0xCC, 0x83, 0xC9, 0x03, 0x45, 0xCC, 0x86,
+ 0xC9, 0x03, 0x45, 0xCC, 0x87, 0xC9, 0x03, 0x45,
+ // Bytes 3000 - 303f
+ 0xCC, 0x88, 0xC9, 0x03, 0x45, 0xCC, 0x89, 0xC9,
+ 0x03, 0x45, 0xCC, 0x8C, 0xC9, 0x03, 0x45, 0xCC,
+ 0x8F, 0xC9, 0x03, 0x45, 0xCC, 0x91, 0xC9, 0x03,
+ 0x45, 0xCC, 0xA8, 0xA5, 0x03, 0x45, 0xCC, 0xAD,
+ 0xB5, 0x03, 0x45, 0xCC, 0xB0, 0xB5, 0x03, 0x46,
+ 0xCC, 0x87, 0xC9, 0x03, 0x47, 0xCC, 0x81, 0xC9,
+ 0x03, 0x47, 0xCC, 0x82, 0xC9, 0x03, 0x47, 0xCC,
+ 0x84, 0xC9, 0x03, 0x47, 0xCC, 0x86, 0xC9, 0x03,
+ // Bytes 3040 - 307f
+ 0x47, 0xCC, 0x87, 0xC9, 0x03, 0x47, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x47, 0xCC, 0xA7, 0xA5, 0x03, 0x48,
+ 0xCC, 0x82, 0xC9, 0x03, 0x48, 0xCC, 0x87, 0xC9,
+ 0x03, 0x48, 0xCC, 0x88, 0xC9, 0x03, 0x48, 0xCC,
+ 0x8C, 0xC9, 0x03, 0x48, 0xCC, 0xA3, 0xB5, 0x03,
+ 0x48, 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0xAE,
+ 0xB5, 0x03, 0x49, 0xCC, 0x80, 0xC9, 0x03, 0x49,
+ 0xCC, 0x81, 0xC9, 0x03, 0x49, 0xCC, 0x82, 0xC9,
+ // Bytes 3080 - 30bf
+ 0x03, 0x49, 0xCC, 0x83, 0xC9, 0x03, 0x49, 0xCC,
+ 0x84, 0xC9, 0x03, 0x49, 0xCC, 0x86, 0xC9, 0x03,
+ 0x49, 0xCC, 0x87, 0xC9, 0x03, 0x49, 0xCC, 0x89,
+ 0xC9, 0x03, 0x49, 0xCC, 0x8C, 0xC9, 0x03, 0x49,
+ 0xCC, 0x8F, 0xC9, 0x03, 0x49, 0xCC, 0x91, 0xC9,
+ 0x03, 0x49, 0xCC, 0xA3, 0xB5, 0x03, 0x49, 0xCC,
+ 0xA8, 0xA5, 0x03, 0x49, 0xCC, 0xB0, 0xB5, 0x03,
+ 0x4A, 0xCC, 0x82, 0xC9, 0x03, 0x4B, 0xCC, 0x81,
+ // Bytes 30c0 - 30ff
+ 0xC9, 0x03, 0x4B, 0xCC, 0x8C, 0xC9, 0x03, 0x4B,
+ 0xCC, 0xA3, 0xB5, 0x03, 0x4B, 0xCC, 0xA7, 0xA5,
+ 0x03, 0x4B, 0xCC, 0xB1, 0xB5, 0x03, 0x4C, 0xCC,
+ 0x81, 0xC9, 0x03, 0x4C, 0xCC, 0x8C, 0xC9, 0x03,
+ 0x4C, 0xCC, 0xA7, 0xA5, 0x03, 0x4C, 0xCC, 0xAD,
+ 0xB5, 0x03, 0x4C, 0xCC, 0xB1, 0xB5, 0x03, 0x4D,
+ 0xCC, 0x81, 0xC9, 0x03, 0x4D, 0xCC, 0x87, 0xC9,
+ 0x03, 0x4D, 0xCC, 0xA3, 0xB5, 0x03, 0x4E, 0xCC,
+ // Bytes 3100 - 313f
+ 0x80, 0xC9, 0x03, 0x4E, 0xCC, 0x81, 0xC9, 0x03,
+ 0x4E, 0xCC, 0x83, 0xC9, 0x03, 0x4E, 0xCC, 0x87,
+ 0xC9, 0x03, 0x4E, 0xCC, 0x8C, 0xC9, 0x03, 0x4E,
+ 0xCC, 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0xA7, 0xA5,
+ 0x03, 0x4E, 0xCC, 0xAD, 0xB5, 0x03, 0x4E, 0xCC,
+ 0xB1, 0xB5, 0x03, 0x4F, 0xCC, 0x80, 0xC9, 0x03,
+ 0x4F, 0xCC, 0x81, 0xC9, 0x03, 0x4F, 0xCC, 0x86,
+ 0xC9, 0x03, 0x4F, 0xCC, 0x89, 0xC9, 0x03, 0x4F,
+ // Bytes 3140 - 317f
+ 0xCC, 0x8B, 0xC9, 0x03, 0x4F, 0xCC, 0x8C, 0xC9,
+ 0x03, 0x4F, 0xCC, 0x8F, 0xC9, 0x03, 0x4F, 0xCC,
+ 0x91, 0xC9, 0x03, 0x50, 0xCC, 0x81, 0xC9, 0x03,
+ 0x50, 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x81,
+ 0xC9, 0x03, 0x52, 0xCC, 0x87, 0xC9, 0x03, 0x52,
+ 0xCC, 0x8C, 0xC9, 0x03, 0x52, 0xCC, 0x8F, 0xC9,
+ 0x03, 0x52, 0xCC, 0x91, 0xC9, 0x03, 0x52, 0xCC,
+ 0xA7, 0xA5, 0x03, 0x52, 0xCC, 0xB1, 0xB5, 0x03,
+ // Bytes 3180 - 31bf
+ 0x53, 0xCC, 0x82, 0xC9, 0x03, 0x53, 0xCC, 0x87,
+ 0xC9, 0x03, 0x53, 0xCC, 0xA6, 0xB5, 0x03, 0x53,
+ 0xCC, 0xA7, 0xA5, 0x03, 0x54, 0xCC, 0x87, 0xC9,
+ 0x03, 0x54, 0xCC, 0x8C, 0xC9, 0x03, 0x54, 0xCC,
+ 0xA3, 0xB5, 0x03, 0x54, 0xCC, 0xA6, 0xB5, 0x03,
+ 0x54, 0xCC, 0xA7, 0xA5, 0x03, 0x54, 0xCC, 0xAD,
+ 0xB5, 0x03, 0x54, 0xCC, 0xB1, 0xB5, 0x03, 0x55,
+ 0xCC, 0x80, 0xC9, 0x03, 0x55, 0xCC, 0x81, 0xC9,
+ // Bytes 31c0 - 31ff
+ 0x03, 0x55, 0xCC, 0x82, 0xC9, 0x03, 0x55, 0xCC,
+ 0x86, 0xC9, 0x03, 0x55, 0xCC, 0x89, 0xC9, 0x03,
+ 0x55, 0xCC, 0x8A, 0xC9, 0x03, 0x55, 0xCC, 0x8B,
+ 0xC9, 0x03, 0x55, 0xCC, 0x8C, 0xC9, 0x03, 0x55,
+ 0xCC, 0x8F, 0xC9, 0x03, 0x55, 0xCC, 0x91, 0xC9,
+ 0x03, 0x55, 0xCC, 0xA3, 0xB5, 0x03, 0x55, 0xCC,
+ 0xA4, 0xB5, 0x03, 0x55, 0xCC, 0xA8, 0xA5, 0x03,
+ 0x55, 0xCC, 0xAD, 0xB5, 0x03, 0x55, 0xCC, 0xB0,
+ // Bytes 3200 - 323f
+ 0xB5, 0x03, 0x56, 0xCC, 0x83, 0xC9, 0x03, 0x56,
+ 0xCC, 0xA3, 0xB5, 0x03, 0x57, 0xCC, 0x80, 0xC9,
+ 0x03, 0x57, 0xCC, 0x81, 0xC9, 0x03, 0x57, 0xCC,
+ 0x82, 0xC9, 0x03, 0x57, 0xCC, 0x87, 0xC9, 0x03,
+ 0x57, 0xCC, 0x88, 0xC9, 0x03, 0x57, 0xCC, 0xA3,
+ 0xB5, 0x03, 0x58, 0xCC, 0x87, 0xC9, 0x03, 0x58,
+ 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x80, 0xC9,
+ 0x03, 0x59, 0xCC, 0x81, 0xC9, 0x03, 0x59, 0xCC,
+ // Bytes 3240 - 327f
+ 0x82, 0xC9, 0x03, 0x59, 0xCC, 0x83, 0xC9, 0x03,
+ 0x59, 0xCC, 0x84, 0xC9, 0x03, 0x59, 0xCC, 0x87,
+ 0xC9, 0x03, 0x59, 0xCC, 0x88, 0xC9, 0x03, 0x59,
+ 0xCC, 0x89, 0xC9, 0x03, 0x59, 0xCC, 0xA3, 0xB5,
+ 0x03, 0x5A, 0xCC, 0x81, 0xC9, 0x03, 0x5A, 0xCC,
+ 0x82, 0xC9, 0x03, 0x5A, 0xCC, 0x87, 0xC9, 0x03,
+ 0x5A, 0xCC, 0x8C, 0xC9, 0x03, 0x5A, 0xCC, 0xA3,
+ 0xB5, 0x03, 0x5A, 0xCC, 0xB1, 0xB5, 0x03, 0x61,
+ // Bytes 3280 - 32bf
+ 0xCC, 0x80, 0xC9, 0x03, 0x61, 0xCC, 0x81, 0xC9,
+ 0x03, 0x61, 0xCC, 0x83, 0xC9, 0x03, 0x61, 0xCC,
+ 0x84, 0xC9, 0x03, 0x61, 0xCC, 0x89, 0xC9, 0x03,
+ 0x61, 0xCC, 0x8C, 0xC9, 0x03, 0x61, 0xCC, 0x8F,
+ 0xC9, 0x03, 0x61, 0xCC, 0x91, 0xC9, 0x03, 0x61,
+ 0xCC, 0xA5, 0xB5, 0x03, 0x61, 0xCC, 0xA8, 0xA5,
+ 0x03, 0x62, 0xCC, 0x87, 0xC9, 0x03, 0x62, 0xCC,
+ 0xA3, 0xB5, 0x03, 0x62, 0xCC, 0xB1, 0xB5, 0x03,
+ // Bytes 32c0 - 32ff
+ 0x63, 0xCC, 0x81, 0xC9, 0x03, 0x63, 0xCC, 0x82,
+ 0xC9, 0x03, 0x63, 0xCC, 0x87, 0xC9, 0x03, 0x63,
+ 0xCC, 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0x87, 0xC9,
+ 0x03, 0x64, 0xCC, 0x8C, 0xC9, 0x03, 0x64, 0xCC,
+ 0xA3, 0xB5, 0x03, 0x64, 0xCC, 0xA7, 0xA5, 0x03,
+ 0x64, 0xCC, 0xAD, 0xB5, 0x03, 0x64, 0xCC, 0xB1,
+ 0xB5, 0x03, 0x65, 0xCC, 0x80, 0xC9, 0x03, 0x65,
+ 0xCC, 0x81, 0xC9, 0x03, 0x65, 0xCC, 0x83, 0xC9,
+ // Bytes 3300 - 333f
+ 0x03, 0x65, 0xCC, 0x86, 0xC9, 0x03, 0x65, 0xCC,
+ 0x87, 0xC9, 0x03, 0x65, 0xCC, 0x88, 0xC9, 0x03,
+ 0x65, 0xCC, 0x89, 0xC9, 0x03, 0x65, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x65, 0xCC, 0x8F, 0xC9, 0x03, 0x65,
+ 0xCC, 0x91, 0xC9, 0x03, 0x65, 0xCC, 0xA8, 0xA5,
+ 0x03, 0x65, 0xCC, 0xAD, 0xB5, 0x03, 0x65, 0xCC,
+ 0xB0, 0xB5, 0x03, 0x66, 0xCC, 0x87, 0xC9, 0x03,
+ 0x67, 0xCC, 0x81, 0xC9, 0x03, 0x67, 0xCC, 0x82,
+ // Bytes 3340 - 337f
+ 0xC9, 0x03, 0x67, 0xCC, 0x84, 0xC9, 0x03, 0x67,
+ 0xCC, 0x86, 0xC9, 0x03, 0x67, 0xCC, 0x87, 0xC9,
+ 0x03, 0x67, 0xCC, 0x8C, 0xC9, 0x03, 0x67, 0xCC,
+ 0xA7, 0xA5, 0x03, 0x68, 0xCC, 0x82, 0xC9, 0x03,
+ 0x68, 0xCC, 0x87, 0xC9, 0x03, 0x68, 0xCC, 0x88,
+ 0xC9, 0x03, 0x68, 0xCC, 0x8C, 0xC9, 0x03, 0x68,
+ 0xCC, 0xA3, 0xB5, 0x03, 0x68, 0xCC, 0xA7, 0xA5,
+ 0x03, 0x68, 0xCC, 0xAE, 0xB5, 0x03, 0x68, 0xCC,
+ // Bytes 3380 - 33bf
+ 0xB1, 0xB5, 0x03, 0x69, 0xCC, 0x80, 0xC9, 0x03,
+ 0x69, 0xCC, 0x81, 0xC9, 0x03, 0x69, 0xCC, 0x82,
+ 0xC9, 0x03, 0x69, 0xCC, 0x83, 0xC9, 0x03, 0x69,
+ 0xCC, 0x84, 0xC9, 0x03, 0x69, 0xCC, 0x86, 0xC9,
+ 0x03, 0x69, 0xCC, 0x89, 0xC9, 0x03, 0x69, 0xCC,
+ 0x8C, 0xC9, 0x03, 0x69, 0xCC, 0x8F, 0xC9, 0x03,
+ 0x69, 0xCC, 0x91, 0xC9, 0x03, 0x69, 0xCC, 0xA3,
+ 0xB5, 0x03, 0x69, 0xCC, 0xA8, 0xA5, 0x03, 0x69,
+ // Bytes 33c0 - 33ff
+ 0xCC, 0xB0, 0xB5, 0x03, 0x6A, 0xCC, 0x82, 0xC9,
+ 0x03, 0x6A, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC,
+ 0x81, 0xC9, 0x03, 0x6B, 0xCC, 0x8C, 0xC9, 0x03,
+ 0x6B, 0xCC, 0xA3, 0xB5, 0x03, 0x6B, 0xCC, 0xA7,
+ 0xA5, 0x03, 0x6B, 0xCC, 0xB1, 0xB5, 0x03, 0x6C,
+ 0xCC, 0x81, 0xC9, 0x03, 0x6C, 0xCC, 0x8C, 0xC9,
+ 0x03, 0x6C, 0xCC, 0xA7, 0xA5, 0x03, 0x6C, 0xCC,
+ 0xAD, 0xB5, 0x03, 0x6C, 0xCC, 0xB1, 0xB5, 0x03,
+ // Bytes 3400 - 343f
+ 0x6D, 0xCC, 0x81, 0xC9, 0x03, 0x6D, 0xCC, 0x87,
+ 0xC9, 0x03, 0x6D, 0xCC, 0xA3, 0xB5, 0x03, 0x6E,
+ 0xCC, 0x80, 0xC9, 0x03, 0x6E, 0xCC, 0x81, 0xC9,
+ 0x03, 0x6E, 0xCC, 0x83, 0xC9, 0x03, 0x6E, 0xCC,
+ 0x87, 0xC9, 0x03, 0x6E, 0xCC, 0x8C, 0xC9, 0x03,
+ 0x6E, 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0xA7,
+ 0xA5, 0x03, 0x6E, 0xCC, 0xAD, 0xB5, 0x03, 0x6E,
+ 0xCC, 0xB1, 0xB5, 0x03, 0x6F, 0xCC, 0x80, 0xC9,
+ // Bytes 3440 - 347f
+ 0x03, 0x6F, 0xCC, 0x81, 0xC9, 0x03, 0x6F, 0xCC,
+ 0x86, 0xC9, 0x03, 0x6F, 0xCC, 0x89, 0xC9, 0x03,
+ 0x6F, 0xCC, 0x8B, 0xC9, 0x03, 0x6F, 0xCC, 0x8C,
+ 0xC9, 0x03, 0x6F, 0xCC, 0x8F, 0xC9, 0x03, 0x6F,
+ 0xCC, 0x91, 0xC9, 0x03, 0x70, 0xCC, 0x81, 0xC9,
+ 0x03, 0x70, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC,
+ 0x81, 0xC9, 0x03, 0x72, 0xCC, 0x87, 0xC9, 0x03,
+ 0x72, 0xCC, 0x8C, 0xC9, 0x03, 0x72, 0xCC, 0x8F,
+ // Bytes 3480 - 34bf
+ 0xC9, 0x03, 0x72, 0xCC, 0x91, 0xC9, 0x03, 0x72,
+ 0xCC, 0xA7, 0xA5, 0x03, 0x72, 0xCC, 0xB1, 0xB5,
+ 0x03, 0x73, 0xCC, 0x82, 0xC9, 0x03, 0x73, 0xCC,
+ 0x87, 0xC9, 0x03, 0x73, 0xCC, 0xA6, 0xB5, 0x03,
+ 0x73, 0xCC, 0xA7, 0xA5, 0x03, 0x74, 0xCC, 0x87,
+ 0xC9, 0x03, 0x74, 0xCC, 0x88, 0xC9, 0x03, 0x74,
+ 0xCC, 0x8C, 0xC9, 0x03, 0x74, 0xCC, 0xA3, 0xB5,
+ 0x03, 0x74, 0xCC, 0xA6, 0xB5, 0x03, 0x74, 0xCC,
+ // Bytes 34c0 - 34ff
+ 0xA7, 0xA5, 0x03, 0x74, 0xCC, 0xAD, 0xB5, 0x03,
+ 0x74, 0xCC, 0xB1, 0xB5, 0x03, 0x75, 0xCC, 0x80,
+ 0xC9, 0x03, 0x75, 0xCC, 0x81, 0xC9, 0x03, 0x75,
+ 0xCC, 0x82, 0xC9, 0x03, 0x75, 0xCC, 0x86, 0xC9,
+ 0x03, 0x75, 0xCC, 0x89, 0xC9, 0x03, 0x75, 0xCC,
+ 0x8A, 0xC9, 0x03, 0x75, 0xCC, 0x8B, 0xC9, 0x03,
+ 0x75, 0xCC, 0x8C, 0xC9, 0x03, 0x75, 0xCC, 0x8F,
+ 0xC9, 0x03, 0x75, 0xCC, 0x91, 0xC9, 0x03, 0x75,
+ // Bytes 3500 - 353f
+ 0xCC, 0xA3, 0xB5, 0x03, 0x75, 0xCC, 0xA4, 0xB5,
+ 0x03, 0x75, 0xCC, 0xA8, 0xA5, 0x03, 0x75, 0xCC,
+ 0xAD, 0xB5, 0x03, 0x75, 0xCC, 0xB0, 0xB5, 0x03,
+ 0x76, 0xCC, 0x83, 0xC9, 0x03, 0x76, 0xCC, 0xA3,
+ 0xB5, 0x03, 0x77, 0xCC, 0x80, 0xC9, 0x03, 0x77,
+ 0xCC, 0x81, 0xC9, 0x03, 0x77, 0xCC, 0x82, 0xC9,
+ 0x03, 0x77, 0xCC, 0x87, 0xC9, 0x03, 0x77, 0xCC,
+ 0x88, 0xC9, 0x03, 0x77, 0xCC, 0x8A, 0xC9, 0x03,
+ // Bytes 3540 - 357f
+ 0x77, 0xCC, 0xA3, 0xB5, 0x03, 0x78, 0xCC, 0x87,
+ 0xC9, 0x03, 0x78, 0xCC, 0x88, 0xC9, 0x03, 0x79,
+ 0xCC, 0x80, 0xC9, 0x03, 0x79, 0xCC, 0x81, 0xC9,
+ 0x03, 0x79, 0xCC, 0x82, 0xC9, 0x03, 0x79, 0xCC,
+ 0x83, 0xC9, 0x03, 0x79, 0xCC, 0x84, 0xC9, 0x03,
+ 0x79, 0xCC, 0x87, 0xC9, 0x03, 0x79, 0xCC, 0x88,
+ 0xC9, 0x03, 0x79, 0xCC, 0x89, 0xC9, 0x03, 0x79,
+ 0xCC, 0x8A, 0xC9, 0x03, 0x79, 0xCC, 0xA3, 0xB5,
+ // Bytes 3580 - 35bf
+ 0x03, 0x7A, 0xCC, 0x81, 0xC9, 0x03, 0x7A, 0xCC,
+ 0x82, 0xC9, 0x03, 0x7A, 0xCC, 0x87, 0xC9, 0x03,
+ 0x7A, 0xCC, 0x8C, 0xC9, 0x03, 0x7A, 0xCC, 0xA3,
+ 0xB5, 0x03, 0x7A, 0xCC, 0xB1, 0xB5, 0x04, 0xC2,
+ 0xA8, 0xCC, 0x80, 0xCA, 0x04, 0xC2, 0xA8, 0xCC,
+ 0x81, 0xCA, 0x04, 0xC2, 0xA8, 0xCD, 0x82, 0xCA,
+ 0x04, 0xC3, 0x86, 0xCC, 0x81, 0xC9, 0x04, 0xC3,
+ 0x86, 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0x98, 0xCC,
+ // Bytes 35c0 - 35ff
+ 0x81, 0xC9, 0x04, 0xC3, 0xA6, 0xCC, 0x81, 0xC9,
+ 0x04, 0xC3, 0xA6, 0xCC, 0x84, 0xC9, 0x04, 0xC3,
+ 0xB8, 0xCC, 0x81, 0xC9, 0x04, 0xC5, 0xBF, 0xCC,
+ 0x87, 0xC9, 0x04, 0xC6, 0xB7, 0xCC, 0x8C, 0xC9,
+ 0x04, 0xCA, 0x92, 0xCC, 0x8C, 0xC9, 0x04, 0xCE,
+ 0x91, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x91, 0xCC,
+ 0x81, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x84, 0xC9,
+ 0x04, 0xCE, 0x91, 0xCC, 0x86, 0xC9, 0x04, 0xCE,
+ // Bytes 3600 - 363f
+ 0x91, 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0x95, 0xCC,
+ 0x80, 0xC9, 0x04, 0xCE, 0x95, 0xCC, 0x81, 0xC9,
+ 0x04, 0xCE, 0x97, 0xCC, 0x80, 0xC9, 0x04, 0xCE,
+ 0x97, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, 0xCD,
+ 0x85, 0xD9, 0x04, 0xCE, 0x99, 0xCC, 0x80, 0xC9,
+ 0x04, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x04, 0xCE,
+ 0x99, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x99, 0xCC,
+ 0x86, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x88, 0xC9,
+ // Bytes 3640 - 367f
+ 0x04, 0xCE, 0x9F, 0xCC, 0x80, 0xC9, 0x04, 0xCE,
+ 0x9F, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA1, 0xCC,
+ 0x94, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x80, 0xC9,
+ 0x04, 0xCE, 0xA5, 0xCC, 0x81, 0xC9, 0x04, 0xCE,
+ 0xA5, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xA5, 0xCC,
+ 0x86, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x88, 0xC9,
+ 0x04, 0xCE, 0xA9, 0xCC, 0x80, 0xC9, 0x04, 0xCE,
+ 0xA9, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA9, 0xCD,
+ // Bytes 3680 - 36bf
+ 0x85, 0xD9, 0x04, 0xCE, 0xB1, 0xCC, 0x84, 0xC9,
+ 0x04, 0xCE, 0xB1, 0xCC, 0x86, 0xC9, 0x04, 0xCE,
+ 0xB1, 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB5, 0xCC,
+ 0x80, 0xC9, 0x04, 0xCE, 0xB5, 0xCC, 0x81, 0xC9,
+ 0x04, 0xCE, 0xB7, 0xCD, 0x85, 0xD9, 0x04, 0xCE,
+ 0xB9, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xB9, 0xCC,
+ 0x81, 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x84, 0xC9,
+ 0x04, 0xCE, 0xB9, 0xCC, 0x86, 0xC9, 0x04, 0xCE,
+ // Bytes 36c0 - 36ff
+ 0xB9, 0xCD, 0x82, 0xC9, 0x04, 0xCE, 0xBF, 0xCC,
+ 0x80, 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x81, 0xC9,
+ 0x04, 0xCF, 0x81, 0xCC, 0x93, 0xC9, 0x04, 0xCF,
+ 0x81, 0xCC, 0x94, 0xC9, 0x04, 0xCF, 0x85, 0xCC,
+ 0x80, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x81, 0xC9,
+ 0x04, 0xCF, 0x85, 0xCC, 0x84, 0xC9, 0x04, 0xCF,
+ 0x85, 0xCC, 0x86, 0xC9, 0x04, 0xCF, 0x85, 0xCD,
+ 0x82, 0xC9, 0x04, 0xCF, 0x89, 0xCD, 0x85, 0xD9,
+ // Bytes 3700 - 373f
+ 0x04, 0xCF, 0x92, 0xCC, 0x81, 0xC9, 0x04, 0xCF,
+ 0x92, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x86, 0xCC,
+ 0x88, 0xC9, 0x04, 0xD0, 0x90, 0xCC, 0x86, 0xC9,
+ 0x04, 0xD0, 0x90, 0xCC, 0x88, 0xC9, 0x04, 0xD0,
+ 0x93, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x95, 0xCC,
+ 0x80, 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x86, 0xC9,
+ 0x04, 0xD0, 0x95, 0xCC, 0x88, 0xC9, 0x04, 0xD0,
+ 0x96, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x96, 0xCC,
+ // Bytes 3740 - 377f
+ 0x88, 0xC9, 0x04, 0xD0, 0x97, 0xCC, 0x88, 0xC9,
+ 0x04, 0xD0, 0x98, 0xCC, 0x80, 0xC9, 0x04, 0xD0,
+ 0x98, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0x98, 0xCC,
+ 0x86, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x88, 0xC9,
+ 0x04, 0xD0, 0x9A, 0xCC, 0x81, 0xC9, 0x04, 0xD0,
+ 0x9E, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC,
+ 0x84, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x86, 0xC9,
+ 0x04, 0xD0, 0xA3, 0xCC, 0x88, 0xC9, 0x04, 0xD0,
+ // Bytes 3780 - 37bf
+ 0xA3, 0xCC, 0x8B, 0xC9, 0x04, 0xD0, 0xA7, 0xCC,
+ 0x88, 0xC9, 0x04, 0xD0, 0xAB, 0xCC, 0x88, 0xC9,
+ 0x04, 0xD0, 0xAD, 0xCC, 0x88, 0xC9, 0x04, 0xD0,
+ 0xB0, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB0, 0xCC,
+ 0x88, 0xC9, 0x04, 0xD0, 0xB3, 0xCC, 0x81, 0xC9,
+ 0x04, 0xD0, 0xB5, 0xCC, 0x80, 0xC9, 0x04, 0xD0,
+ 0xB5, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB5, 0xCC,
+ 0x88, 0xC9, 0x04, 0xD0, 0xB6, 0xCC, 0x86, 0xC9,
+ // Bytes 37c0 - 37ff
+ 0x04, 0xD0, 0xB6, 0xCC, 0x88, 0xC9, 0x04, 0xD0,
+ 0xB7, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB8, 0xCC,
+ 0x80, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x84, 0xC9,
+ 0x04, 0xD0, 0xB8, 0xCC, 0x86, 0xC9, 0x04, 0xD0,
+ 0xB8, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xBA, 0xCC,
+ 0x81, 0xC9, 0x04, 0xD0, 0xBE, 0xCC, 0x88, 0xC9,
+ 0x04, 0xD1, 0x83, 0xCC, 0x84, 0xC9, 0x04, 0xD1,
+ 0x83, 0xCC, 0x86, 0xC9, 0x04, 0xD1, 0x83, 0xCC,
+ // Bytes 3800 - 383f
+ 0x88, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x8B, 0xC9,
+ 0x04, 0xD1, 0x87, 0xCC, 0x88, 0xC9, 0x04, 0xD1,
+ 0x8B, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8D, 0xCC,
+ 0x88, 0xC9, 0x04, 0xD1, 0x96, 0xCC, 0x88, 0xC9,
+ 0x04, 0xD1, 0xB4, 0xCC, 0x8F, 0xC9, 0x04, 0xD1,
+ 0xB5, 0xCC, 0x8F, 0xC9, 0x04, 0xD3, 0x98, 0xCC,
+ 0x88, 0xC9, 0x04, 0xD3, 0x99, 0xCC, 0x88, 0xC9,
+ 0x04, 0xD3, 0xA8, 0xCC, 0x88, 0xC9, 0x04, 0xD3,
+ // Bytes 3840 - 387f
+ 0xA9, 0xCC, 0x88, 0xC9, 0x04, 0xD8, 0xA7, 0xD9,
+ 0x93, 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x94, 0xC9,
+ 0x04, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, 0x04, 0xD9,
+ 0x88, 0xD9, 0x94, 0xC9, 0x04, 0xD9, 0x8A, 0xD9,
+ 0x94, 0xC9, 0x04, 0xDB, 0x81, 0xD9, 0x94, 0xC9,
+ 0x04, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x04, 0xDB,
+ 0x95, 0xD9, 0x94, 0xC9, 0x05, 0x41, 0xCC, 0x82,
+ 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC,
+ // Bytes 3880 - 38bf
+ 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x83,
+ 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x89, 0xCA,
+ 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x80, 0xCA, 0x05,
+ 0x41, 0xCC, 0x86, 0xCC, 0x81, 0xCA, 0x05, 0x41,
+ 0xCC, 0x86, 0xCC, 0x83, 0xCA, 0x05, 0x41, 0xCC,
+ 0x86, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, 0x87,
+ 0xCC, 0x84, 0xCA, 0x05, 0x41, 0xCC, 0x88, 0xCC,
+ 0x84, 0xCA, 0x05, 0x41, 0xCC, 0x8A, 0xCC, 0x81,
+ // Bytes 38c0 - 38ff
+ 0xCA, 0x05, 0x41, 0xCC, 0xA3, 0xCC, 0x82, 0xCA,
+ 0x05, 0x41, 0xCC, 0xA3, 0xCC, 0x86, 0xCA, 0x05,
+ 0x43, 0xCC, 0xA7, 0xCC, 0x81, 0xCA, 0x05, 0x45,
+ 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, 0x45, 0xCC,
+ 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82,
+ 0xCC, 0x83, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC,
+ 0x89, 0xCA, 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x80,
+ 0xCA, 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x81, 0xCA,
+ // Bytes 3900 - 393f
+ 0x05, 0x45, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05,
+ 0x45, 0xCC, 0xA7, 0xCC, 0x86, 0xCA, 0x05, 0x49,
+ 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x4C, 0xCC,
+ 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x82,
+ 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC,
+ 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x83,
+ 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x89, 0xCA,
+ 0x05, 0x4F, 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05,
+ // Bytes 3940 - 397f
+ 0x4F, 0xCC, 0x83, 0xCC, 0x84, 0xCA, 0x05, 0x4F,
+ 0xCC, 0x83, 0xCC, 0x88, 0xCA, 0x05, 0x4F, 0xCC,
+ 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, 0x84,
+ 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x87, 0xCC,
+ 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x88, 0xCC, 0x84,
+ 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x80, 0xCA,
+ 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05,
+ 0x4F, 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x4F,
+ // Bytes 3980 - 39bf
+ 0xCC, 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC,
+ 0x9B, 0xCC, 0xA3, 0xB6, 0x05, 0x4F, 0xCC, 0xA3,
+ 0xCC, 0x82, 0xCA, 0x05, 0x4F, 0xCC, 0xA8, 0xCC,
+ 0x84, 0xCA, 0x05, 0x52, 0xCC, 0xA3, 0xCC, 0x84,
+ 0xCA, 0x05, 0x53, 0xCC, 0x81, 0xCC, 0x87, 0xCA,
+ 0x05, 0x53, 0xCC, 0x8C, 0xCC, 0x87, 0xCA, 0x05,
+ 0x53, 0xCC, 0xA3, 0xCC, 0x87, 0xCA, 0x05, 0x55,
+ 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC,
+ // Bytes 39c0 - 39ff
+ 0x84, 0xCC, 0x88, 0xCA, 0x05, 0x55, 0xCC, 0x88,
+ 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC,
+ 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x84,
+ 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x8C, 0xCA,
+ 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05,
+ 0x55, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x55,
+ 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x55, 0xCC,
+ 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x55, 0xCC, 0x9B,
+ // Bytes 3a00 - 3a3f
+ 0xCC, 0xA3, 0xB6, 0x05, 0x61, 0xCC, 0x82, 0xCC,
+ 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x81,
+ 0xCA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x83, 0xCA,
+ 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05,
+ 0x61, 0xCC, 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x61,
+ 0xCC, 0x86, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC,
+ 0x86, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, 0x86,
+ 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x87, 0xCC,
+ // Bytes 3a40 - 3a7f
+ 0x84, 0xCA, 0x05, 0x61, 0xCC, 0x88, 0xCC, 0x84,
+ 0xCA, 0x05, 0x61, 0xCC, 0x8A, 0xCC, 0x81, 0xCA,
+ 0x05, 0x61, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05,
+ 0x61, 0xCC, 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x63,
+ 0xCC, 0xA7, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC,
+ 0x82, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, 0x82,
+ 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC,
+ 0x83, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x89,
+ // Bytes 3a80 - 3abf
+ 0xCA, 0x05, 0x65, 0xCC, 0x84, 0xCC, 0x80, 0xCA,
+ 0x05, 0x65, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05,
+ 0x65, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x65,
+ 0xCC, 0xA7, 0xCC, 0x86, 0xCA, 0x05, 0x69, 0xCC,
+ 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x6C, 0xCC, 0xA3,
+ 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC,
+ 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x81,
+ 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x83, 0xCA,
+ // Bytes 3ac0 - 3aff
+ 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05,
+ 0x6F, 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x6F,
+ 0xCC, 0x83, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC,
+ 0x83, 0xCC, 0x88, 0xCA, 0x05, 0x6F, 0xCC, 0x84,
+ 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC,
+ 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x87, 0xCC, 0x84,
+ 0xCA, 0x05, 0x6F, 0xCC, 0x88, 0xCC, 0x84, 0xCA,
+ 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05,
+ // Bytes 3b00 - 3b3f
+ 0x6F, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x6F,
+ 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC,
+ 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x9B,
+ 0xCC, 0xA3, 0xB6, 0x05, 0x6F, 0xCC, 0xA3, 0xCC,
+ 0x82, 0xCA, 0x05, 0x6F, 0xCC, 0xA8, 0xCC, 0x84,
+ 0xCA, 0x05, 0x72, 0xCC, 0xA3, 0xCC, 0x84, 0xCA,
+ 0x05, 0x73, 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05,
+ 0x73, 0xCC, 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x73,
+ // Bytes 3b40 - 3b7f
+ 0xCC, 0xA3, 0xCC, 0x87, 0xCA, 0x05, 0x75, 0xCC,
+ 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x84,
+ 0xCC, 0x88, 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC,
+ 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x81,
+ 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x84, 0xCA,
+ 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05,
+ 0x75, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x75,
+ 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x75, 0xCC,
+ // Bytes 3b80 - 3bbf
+ 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x75, 0xCC, 0x9B,
+ 0xCC, 0x89, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC,
+ 0xA3, 0xB6, 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x80,
+ 0xCA, 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x81, 0xCA,
+ 0x05, 0xE1, 0xBE, 0xBF, 0xCD, 0x82, 0xCA, 0x05,
+ 0xE1, 0xBF, 0xBE, 0xCC, 0x80, 0xCA, 0x05, 0xE1,
+ 0xBF, 0xBE, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBF,
+ 0xBE, 0xCD, 0x82, 0xCA, 0x05, 0xE2, 0x86, 0x90,
+ // Bytes 3bc0 - 3bff
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x86, 0x92, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x86, 0x94, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x87, 0x90, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x87, 0x92, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x87, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x88, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88,
+ 0x88, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x8B,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0xA3, 0xCC,
+ // Bytes 3c00 - 3c3f
+ 0xB8, 0x05, 0x05, 0xE2, 0x88, 0xA5, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x88, 0xBC, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x89, 0x83, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x89, 0x85, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x89, 0x88, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89,
+ 0x8D, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA1,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA4, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA5, 0xCC, 0xB8,
+ // Bytes 3c40 - 3c7f
+ 0x05, 0x05, 0xE2, 0x89, 0xB2, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x89, 0xB3, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x89, 0xB6, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x89, 0xB7, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89,
+ 0xBA, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBB,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBC, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBD, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x8A, 0x82, 0xCC, 0xB8, 0x05,
+ // Bytes 3c80 - 3cbf
+ 0x05, 0xE2, 0x8A, 0x83, 0xCC, 0xB8, 0x05, 0x05,
+ 0xE2, 0x8A, 0x86, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x8A, 0x87, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A,
+ 0x91, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x92,
+ 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xA2, 0xCC,
+ 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xA8, 0xCC, 0xB8,
+ 0x05, 0x05, 0xE2, 0x8A, 0xA9, 0xCC, 0xB8, 0x05,
+ 0x05, 0xE2, 0x8A, 0xAB, 0xCC, 0xB8, 0x05, 0x05,
+ // Bytes 3cc0 - 3cff
+ 0xE2, 0x8A, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2,
+ 0x8A, 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A,
+ 0xB4, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB5,
+ 0xCC, 0xB8, 0x05, 0x06, 0xCE, 0x91, 0xCC, 0x93,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x91, 0xCC, 0x94,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x95, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x93,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x94,
+ // Bytes 3d00 - 3d3f
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x97, 0xCC, 0x93,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x97, 0xCC, 0x94,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x99, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x93,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x93,
+ 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94,
+ // Bytes 3d40 - 3d7f
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94,
+ 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x93,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x94,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94,
+ // Bytes 3d80 - 3dbf
+ 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xA9, 0xCC, 0x93,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xA9, 0xCC, 0x94,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x80,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x81,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x93,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x94,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCD, 0x82,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB5, 0xCC, 0x93,
+ // Bytes 3dc0 - 3dff
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x93,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x94,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB7, 0xCC, 0x80,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x81,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x93,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x94,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCD, 0x82,
+ // Bytes 3e00 - 3e3f
+ 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB9, 0xCC, 0x88,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x88,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x88,
+ 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93,
+ 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94,
+ // Bytes 3e40 - 3e7f
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94,
+ 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x93,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x94,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88,
+ // Bytes 3e80 - 3ebf
+ 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93,
+ 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94,
+ 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94,
+ 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x89, 0xCC, 0x80,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x81,
+ // Bytes 3ec0 - 3eff
+ 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x93,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x94,
+ 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCD, 0x82,
+ 0xCD, 0x85, 0xDA, 0x06, 0xE0, 0xA4, 0xA8, 0xE0,
+ 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xA4, 0xB0, 0xE0,
+ 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xA4, 0xB3, 0xE0,
+ 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xB1, 0x86, 0xE0,
+ 0xB1, 0x96, 0x85, 0x06, 0xE0, 0xB7, 0x99, 0xE0,
+ // Bytes 3f00 - 3f3f
+ 0xB7, 0x8A, 0x11, 0x06, 0xE3, 0x81, 0x86, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8B, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8D, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8F, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x91, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x93, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x95, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x97, 0xE3,
+ // Bytes 3f40 - 3f7f
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x99, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9B, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9D, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9F, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA1, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA4, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA6, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA8, 0xE3,
+ // Bytes 3f80 - 3fbf
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xAF, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xAF, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB2, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB2, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB5, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB5, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB8, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB8, 0xE3,
+ // Bytes 3fc0 - 3fff
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xBB, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xBB, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x82, 0x9D, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xA6, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAB, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAD, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAF, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB1, 0xE3,
+ // Bytes 4000 - 403f
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB3, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB5, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB7, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB9, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBB, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBD, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBF, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x81, 0xE3,
+ // Bytes 4040 - 407f
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x84, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x86, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x88, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x8F, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x8F, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x92, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x92, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x95, 0xE3,
+ // Bytes 4080 - 40bf
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x95, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x98, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x98, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x9B, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x9B, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0xAF, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB0, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB1, 0xE3,
+ // Bytes 40c0 - 40ff
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB2, 0xE3,
+ 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xBD, 0xE3,
+ 0x82, 0x99, 0x0D, 0x08, 0xCE, 0x91, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91,
+ 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94,
+ // Bytes 4100 - 413f
+ 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91,
+ 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x93,
+ 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97,
+ 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85,
+ // Bytes 4140 - 417f
+ 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82,
+ 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9,
+ 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9,
+ // Bytes 4180 - 41bf
+ 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x93,
+ 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1,
+ 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82,
+ // Bytes 41c0 - 41ff
+ 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7,
+ 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94,
+ 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7,
+ 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08,
+ // Bytes 4200 - 423f
+ 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x93,
+ 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89,
+ 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08,
+ 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85,
+ 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82,
+ 0xCD, 0x85, 0xDB, 0x08, 0xF0, 0x91, 0x82, 0x99,
+ // Bytes 4240 - 427f
+ 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91,
+ 0x82, 0x9B, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x08,
+ 0xF0, 0x91, 0x82, 0xA5, 0xF0, 0x91, 0x82, 0xBA,
+ 0x09, 0x42, 0xC2, 0xB4, 0x01, 0x43, 0x20, 0xCC,
+ 0x81, 0xC9, 0x43, 0x20, 0xCC, 0x83, 0xC9, 0x43,
+ 0x20, 0xCC, 0x84, 0xC9, 0x43, 0x20, 0xCC, 0x85,
+ 0xC9, 0x43, 0x20, 0xCC, 0x86, 0xC9, 0x43, 0x20,
+ 0xCC, 0x87, 0xC9, 0x43, 0x20, 0xCC, 0x88, 0xC9,
+ // Bytes 4280 - 42bf
+ 0x43, 0x20, 0xCC, 0x8A, 0xC9, 0x43, 0x20, 0xCC,
+ 0x8B, 0xC9, 0x43, 0x20, 0xCC, 0x93, 0xC9, 0x43,
+ 0x20, 0xCC, 0x94, 0xC9, 0x43, 0x20, 0xCC, 0xA7,
+ 0xA5, 0x43, 0x20, 0xCC, 0xA8, 0xA5, 0x43, 0x20,
+ 0xCC, 0xB3, 0xB5, 0x43, 0x20, 0xCD, 0x82, 0xC9,
+ 0x43, 0x20, 0xCD, 0x85, 0xD9, 0x43, 0x20, 0xD9,
+ 0x8B, 0x59, 0x43, 0x20, 0xD9, 0x8C, 0x5D, 0x43,
+ 0x20, 0xD9, 0x8D, 0x61, 0x43, 0x20, 0xD9, 0x8E,
+ // Bytes 42c0 - 42ff
+ 0x65, 0x43, 0x20, 0xD9, 0x8F, 0x69, 0x43, 0x20,
+ 0xD9, 0x90, 0x6D, 0x43, 0x20, 0xD9, 0x91, 0x71,
+ 0x43, 0x20, 0xD9, 0x92, 0x75, 0x43, 0x41, 0xCC,
+ 0x8A, 0xC9, 0x43, 0x73, 0xCC, 0x87, 0xC9, 0x44,
+ 0x20, 0xE3, 0x82, 0x99, 0x0D, 0x44, 0x20, 0xE3,
+ 0x82, 0x9A, 0x0D, 0x44, 0xC2, 0xA8, 0xCC, 0x81,
+ 0xCA, 0x44, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x44,
+ 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x97,
+ // Bytes 4300 - 433f
+ 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x99, 0xCC, 0x81,
+ 0xC9, 0x44, 0xCE, 0x9F, 0xCC, 0x81, 0xC9, 0x44,
+ 0xCE, 0xA5, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5,
+ 0xCC, 0x88, 0xC9, 0x44, 0xCE, 0xA9, 0xCC, 0x81,
+ 0xC9, 0x44, 0xCE, 0xB1, 0xCC, 0x81, 0xC9, 0x44,
+ 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB7,
+ 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB9, 0xCC, 0x81,
+ 0xC9, 0x44, 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x44,
+ // Bytes 4340 - 437f
+ 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x89,
+ 0xCC, 0x81, 0xC9, 0x44, 0xD7, 0x90, 0xD6, 0xB7,
+ 0x31, 0x44, 0xD7, 0x90, 0xD6, 0xB8, 0x35, 0x44,
+ 0xD7, 0x90, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x91,
+ 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBF,
+ 0x49, 0x44, 0xD7, 0x92, 0xD6, 0xBC, 0x41, 0x44,
+ 0xD7, 0x93, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x94,
+ 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x95, 0xD6, 0xB9,
+ // Bytes 4380 - 43bf
+ 0x39, 0x44, 0xD7, 0x95, 0xD6, 0xBC, 0x41, 0x44,
+ 0xD7, 0x96, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x98,
+ 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x99, 0xD6, 0xB4,
+ 0x25, 0x44, 0xD7, 0x99, 0xD6, 0xBC, 0x41, 0x44,
+ 0xD7, 0x9A, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9B,
+ 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBF,
+ 0x49, 0x44, 0xD7, 0x9C, 0xD6, 0xBC, 0x41, 0x44,
+ 0xD7, 0x9E, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA0,
+ // Bytes 43c0 - 43ff
+ 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA1, 0xD6, 0xBC,
+ 0x41, 0x44, 0xD7, 0xA3, 0xD6, 0xBC, 0x41, 0x44,
+ 0xD7, 0xA4, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4,
+ 0xD6, 0xBF, 0x49, 0x44, 0xD7, 0xA6, 0xD6, 0xBC,
+ 0x41, 0x44, 0xD7, 0xA7, 0xD6, 0xBC, 0x41, 0x44,
+ 0xD7, 0xA8, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA9,
+ 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD7, 0x81,
+ 0x4D, 0x44, 0xD7, 0xA9, 0xD7, 0x82, 0x51, 0x44,
+ // Bytes 4400 - 443f
+ 0xD7, 0xAA, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xB2,
+ 0xD6, 0xB7, 0x31, 0x44, 0xD8, 0xA7, 0xD9, 0x8B,
+ 0x59, 0x44, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x44,
+ 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x44, 0xD8, 0xA7,
+ 0xD9, 0x95, 0xB5, 0x44, 0xD8, 0xB0, 0xD9, 0xB0,
+ 0x79, 0x44, 0xD8, 0xB1, 0xD9, 0xB0, 0x79, 0x44,
+ 0xD9, 0x80, 0xD9, 0x8B, 0x59, 0x44, 0xD9, 0x80,
+ 0xD9, 0x8E, 0x65, 0x44, 0xD9, 0x80, 0xD9, 0x8F,
+ // Bytes 4440 - 447f
+ 0x69, 0x44, 0xD9, 0x80, 0xD9, 0x90, 0x6D, 0x44,
+ 0xD9, 0x80, 0xD9, 0x91, 0x71, 0x44, 0xD9, 0x80,
+ 0xD9, 0x92, 0x75, 0x44, 0xD9, 0x87, 0xD9, 0xB0,
+ 0x79, 0x44, 0xD9, 0x88, 0xD9, 0x94, 0xC9, 0x44,
+ 0xD9, 0x89, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x8A,
+ 0xD9, 0x94, 0xC9, 0x44, 0xDB, 0x92, 0xD9, 0x94,
+ 0xC9, 0x44, 0xDB, 0x95, 0xD9, 0x94, 0xC9, 0x45,
+ 0x20, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x45, 0x20,
+ // Bytes 4480 - 44bf
+ 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC,
+ 0x88, 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xCC, 0x93,
+ 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC,
+ 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCD, 0x82,
+ 0xCA, 0x45, 0x20, 0xCC, 0x94, 0xCC, 0x80, 0xCA,
+ 0x45, 0x20, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x45,
+ 0x20, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x45, 0x20,
+ 0xD9, 0x8C, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9,
+ // Bytes 44c0 - 44ff
+ 0x8D, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8E,
+ 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8F, 0xD9,
+ 0x91, 0x72, 0x45, 0x20, 0xD9, 0x90, 0xD9, 0x91,
+ 0x72, 0x45, 0x20, 0xD9, 0x91, 0xD9, 0xB0, 0x7A,
+ 0x45, 0xE2, 0xAB, 0x9D, 0xCC, 0xB8, 0x05, 0x46,
+ 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x46,
+ 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x46,
+ 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, 0x81, 0x4E, 0x46,
+ // Bytes 4500 - 453f
+ 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, 0x82, 0x52, 0x46,
+ 0xD9, 0x80, 0xD9, 0x8E, 0xD9, 0x91, 0x72, 0x46,
+ 0xD9, 0x80, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x46,
+ 0xD9, 0x80, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x46,
+ 0xE0, 0xA4, 0x95, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA4, 0x96, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA4, 0x97, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA4, 0x9C, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ // Bytes 4540 - 457f
+ 0xE0, 0xA4, 0xA1, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA4, 0xA2, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA4, 0xAB, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA4, 0xAF, 0xE0, 0xA4, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA6, 0xA1, 0xE0, 0xA6, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA6, 0xA2, 0xE0, 0xA6, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA6, 0xAF, 0xE0, 0xA6, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA8, 0x96, 0xE0, 0xA8, 0xBC, 0x09, 0x46,
+ // Bytes 4580 - 45bf
+ 0xE0, 0xA8, 0x97, 0xE0, 0xA8, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA8, 0x9C, 0xE0, 0xA8, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA8, 0xAB, 0xE0, 0xA8, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA8, 0xB2, 0xE0, 0xA8, 0xBC, 0x09, 0x46,
+ 0xE0, 0xA8, 0xB8, 0xE0, 0xA8, 0xBC, 0x09, 0x46,
+ 0xE0, 0xAC, 0xA1, 0xE0, 0xAC, 0xBC, 0x09, 0x46,
+ 0xE0, 0xAC, 0xA2, 0xE0, 0xAC, 0xBC, 0x09, 0x46,
+ 0xE0, 0xBE, 0xB2, 0xE0, 0xBE, 0x80, 0x9D, 0x46,
+ // Bytes 45c0 - 45ff
+ 0xE0, 0xBE, 0xB3, 0xE0, 0xBE, 0x80, 0x9D, 0x46,
+ 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, 0x48,
+ 0xF0, 0x9D, 0x85, 0x97, 0xF0, 0x9D, 0x85, 0xA5,
+ 0xAD, 0x48, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D,
+ 0x85, 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xB9,
+ 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, 0x9D,
+ 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x49,
+ 0xE0, 0xBE, 0xB2, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE,
+ // Bytes 4600 - 463f
+ 0x80, 0x9E, 0x49, 0xE0, 0xBE, 0xB3, 0xE0, 0xBD,
+ 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x4C, 0xF0, 0x9D,
+ 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D,
+ 0x85, 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98,
+ 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF,
+ 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D,
+ 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB0, 0xAE, 0x4C,
+ 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5,
+ // Bytes 4640 - 467f
+ 0xF0, 0x9D, 0x85, 0xB1, 0xAE, 0x4C, 0xF0, 0x9D,
+ 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D,
+ 0x85, 0xB2, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xB9,
+ 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE,
+ 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D,
+ 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C,
+ 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5,
+ 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, 0x9D,
+ // Bytes 4680 - 46bf
+ 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D,
+ 0x85, 0xAF, 0xAE, 0x83, 0x41, 0xCC, 0x82, 0xC9,
+ 0x83, 0x41, 0xCC, 0x86, 0xC9, 0x83, 0x41, 0xCC,
+ 0x87, 0xC9, 0x83, 0x41, 0xCC, 0x88, 0xC9, 0x83,
+ 0x41, 0xCC, 0x8A, 0xC9, 0x83, 0x41, 0xCC, 0xA3,
+ 0xB5, 0x83, 0x43, 0xCC, 0xA7, 0xA5, 0x83, 0x45,
+ 0xCC, 0x82, 0xC9, 0x83, 0x45, 0xCC, 0x84, 0xC9,
+ 0x83, 0x45, 0xCC, 0xA3, 0xB5, 0x83, 0x45, 0xCC,
+ // Bytes 46c0 - 46ff
+ 0xA7, 0xA5, 0x83, 0x49, 0xCC, 0x88, 0xC9, 0x83,
+ 0x4C, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0x82,
+ 0xC9, 0x83, 0x4F, 0xCC, 0x83, 0xC9, 0x83, 0x4F,
+ 0xCC, 0x84, 0xC9, 0x83, 0x4F, 0xCC, 0x87, 0xC9,
+ 0x83, 0x4F, 0xCC, 0x88, 0xC9, 0x83, 0x4F, 0xCC,
+ 0x9B, 0xAD, 0x83, 0x4F, 0xCC, 0xA3, 0xB5, 0x83,
+ 0x4F, 0xCC, 0xA8, 0xA5, 0x83, 0x52, 0xCC, 0xA3,
+ 0xB5, 0x83, 0x53, 0xCC, 0x81, 0xC9, 0x83, 0x53,
+ // Bytes 4700 - 473f
+ 0xCC, 0x8C, 0xC9, 0x83, 0x53, 0xCC, 0xA3, 0xB5,
+ 0x83, 0x55, 0xCC, 0x83, 0xC9, 0x83, 0x55, 0xCC,
+ 0x84, 0xC9, 0x83, 0x55, 0xCC, 0x88, 0xC9, 0x83,
+ 0x55, 0xCC, 0x9B, 0xAD, 0x83, 0x61, 0xCC, 0x82,
+ 0xC9, 0x83, 0x61, 0xCC, 0x86, 0xC9, 0x83, 0x61,
+ 0xCC, 0x87, 0xC9, 0x83, 0x61, 0xCC, 0x88, 0xC9,
+ 0x83, 0x61, 0xCC, 0x8A, 0xC9, 0x83, 0x61, 0xCC,
+ 0xA3, 0xB5, 0x83, 0x63, 0xCC, 0xA7, 0xA5, 0x83,
+ // Bytes 4740 - 477f
+ 0x65, 0xCC, 0x82, 0xC9, 0x83, 0x65, 0xCC, 0x84,
+ 0xC9, 0x83, 0x65, 0xCC, 0xA3, 0xB5, 0x83, 0x65,
+ 0xCC, 0xA7, 0xA5, 0x83, 0x69, 0xCC, 0x88, 0xC9,
+ 0x83, 0x6C, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC,
+ 0x82, 0xC9, 0x83, 0x6F, 0xCC, 0x83, 0xC9, 0x83,
+ 0x6F, 0xCC, 0x84, 0xC9, 0x83, 0x6F, 0xCC, 0x87,
+ 0xC9, 0x83, 0x6F, 0xCC, 0x88, 0xC9, 0x83, 0x6F,
+ 0xCC, 0x9B, 0xAD, 0x83, 0x6F, 0xCC, 0xA3, 0xB5,
+ // Bytes 4780 - 47bf
+ 0x83, 0x6F, 0xCC, 0xA8, 0xA5, 0x83, 0x72, 0xCC,
+ 0xA3, 0xB5, 0x83, 0x73, 0xCC, 0x81, 0xC9, 0x83,
+ 0x73, 0xCC, 0x8C, 0xC9, 0x83, 0x73, 0xCC, 0xA3,
+ 0xB5, 0x83, 0x75, 0xCC, 0x83, 0xC9, 0x83, 0x75,
+ 0xCC, 0x84, 0xC9, 0x83, 0x75, 0xCC, 0x88, 0xC9,
+ 0x83, 0x75, 0xCC, 0x9B, 0xAD, 0x84, 0xCE, 0x91,
+ 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x91, 0xCC, 0x94,
+ 0xC9, 0x84, 0xCE, 0x95, 0xCC, 0x93, 0xC9, 0x84,
+ // Bytes 47c0 - 47ff
+ 0xCE, 0x95, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x97,
+ 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x94,
+ 0xC9, 0x84, 0xCE, 0x99, 0xCC, 0x93, 0xC9, 0x84,
+ 0xCE, 0x99, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x9F,
+ 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x94,
+ 0xC9, 0x84, 0xCE, 0xA5, 0xCC, 0x94, 0xC9, 0x84,
+ 0xCE, 0xA9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xA9,
+ 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x80,
+ // Bytes 4800 - 483f
+ 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x81, 0xC9, 0x84,
+ 0xCE, 0xB1, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB1,
+ 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB1, 0xCD, 0x82,
+ 0xC9, 0x84, 0xCE, 0xB5, 0xCC, 0x93, 0xC9, 0x84,
+ 0xCE, 0xB5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB7,
+ 0xCC, 0x80, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x81,
+ 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x93, 0xC9, 0x84,
+ 0xCE, 0xB7, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB7,
+ // Bytes 4840 - 487f
+ 0xCD, 0x82, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, 0x88,
+ 0xC9, 0x84, 0xCE, 0xB9, 0xCC, 0x93, 0xC9, 0x84,
+ 0xCE, 0xB9, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xBF,
+ 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x94,
+ 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x88, 0xC9, 0x84,
+ 0xCF, 0x85, 0xCC, 0x93, 0xC9, 0x84, 0xCF, 0x85,
+ 0xCC, 0x94, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x80,
+ 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x81, 0xC9, 0x84,
+ // Bytes 4880 - 48bf
+ 0xCF, 0x89, 0xCC, 0x93, 0xC9, 0x84, 0xCF, 0x89,
+ 0xCC, 0x94, 0xC9, 0x84, 0xCF, 0x89, 0xCD, 0x82,
+ 0xC9, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82,
+ // Bytes 48c0 - 48ff
+ 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81,
+ // Bytes 4900 - 493f
+ 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80,
+ // Bytes 4940 - 497f
+ 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82,
+ // Bytes 4980 - 49bf
+ 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82,
+ 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80,
+ 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81,
+ 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82,
+ 0xCA, 0x42, 0xCC, 0x80, 0xC9, 0x32, 0x42, 0xCC,
+ 0x81, 0xC9, 0x32, 0x42, 0xCC, 0x93, 0xC9, 0x32,
+ // Bytes 49c0 - 49ff
+ 0x43, 0xE1, 0x85, 0xA1, 0x01, 0x00, 0x43, 0xE1,
+ 0x85, 0xA2, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA3,
+ 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA4, 0x01, 0x00,
+ 0x43, 0xE1, 0x85, 0xA5, 0x01, 0x00, 0x43, 0xE1,
+ 0x85, 0xA6, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA7,
+ 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA8, 0x01, 0x00,
+ 0x43, 0xE1, 0x85, 0xA9, 0x01, 0x00, 0x43, 0xE1,
+ 0x85, 0xAA, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAB,
+ // Bytes 4a00 - 4a3f
+ 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAC, 0x01, 0x00,
+ 0x43, 0xE1, 0x85, 0xAD, 0x01, 0x00, 0x43, 0xE1,
+ 0x85, 0xAE, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAF,
+ 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB0, 0x01, 0x00,
+ 0x43, 0xE1, 0x85, 0xB1, 0x01, 0x00, 0x43, 0xE1,
+ 0x85, 0xB2, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB3,
+ 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB4, 0x01, 0x00,
+ 0x43, 0xE1, 0x85, 0xB5, 0x01, 0x00, 0x43, 0xE1,
+ // Bytes 4a40 - 4a7f
+ 0x86, 0xAA, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAC,
+ 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAD, 0x01, 0x00,
+ 0x43, 0xE1, 0x86, 0xB0, 0x01, 0x00, 0x43, 0xE1,
+ 0x86, 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB2,
+ 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB3, 0x01, 0x00,
+ 0x43, 0xE1, 0x86, 0xB4, 0x01, 0x00, 0x43, 0xE1,
+ 0x86, 0xB5, 0x01, 0x00, 0x44, 0xCC, 0x88, 0xCC,
+ 0x81, 0xCA, 0x32, 0x43, 0xE3, 0x82, 0x99, 0x0D,
+ // Bytes 4a80 - 4abf
+ 0x03, 0x43, 0xE3, 0x82, 0x9A, 0x0D, 0x03, 0x46,
+ 0xE0, 0xBD, 0xB1, 0xE0, 0xBD, 0xB2, 0x9E, 0x26,
+ 0x46, 0xE0, 0xBD, 0xB1, 0xE0, 0xBD, 0xB4, 0xA2,
+ 0x26, 0x46, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80,
+ 0x9E, 0x26, 0x00, 0x01,
+}
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfcTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfcValues[c0]
+ }
+ i := nfcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfcTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfcTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfcValues[c0]
+ }
+ i := nfcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// nfcTrie. Total size: 10610 bytes (10.36 KiB). Checksum: 95e8869a9f81e5e6.
+type nfcTrie struct{}
+
+func newNfcTrie(i int) *nfcTrie {
+ return &nfcTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 46:
+ return uint16(nfcValues[n<<6+uint32(b)])
+ default:
+ n -= 46
+ return uint16(nfcSparse.lookup(n, b))
+ }
+}
+
+// nfcValues: 48 blocks, 3072 entries, 6144 bytes
+// The third block is the zero block.
+var nfcValues = [3072]uint16{
+ // Block 0x0, offset 0x0
+ 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000,
+ // Block 0x1, offset 0x40
+ 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000,
+ 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000,
+ 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000,
+ 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000,
+ 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000,
+ 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000,
+ 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000,
+ 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000,
+ 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000,
+ 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x2f72, 0xc1: 0x2f77, 0xc2: 0x468b, 0xc3: 0x2f7c, 0xc4: 0x469a, 0xc5: 0x469f,
+ 0xc6: 0xa000, 0xc7: 0x46a9, 0xc8: 0x2fe5, 0xc9: 0x2fea, 0xca: 0x46ae, 0xcb: 0x2ffe,
+ 0xcc: 0x3071, 0xcd: 0x3076, 0xce: 0x307b, 0xcf: 0x46c2, 0xd1: 0x3107,
+ 0xd2: 0x312a, 0xd3: 0x312f, 0xd4: 0x46cc, 0xd5: 0x46d1, 0xd6: 0x46e0,
+ 0xd8: 0xa000, 0xd9: 0x31b6, 0xda: 0x31bb, 0xdb: 0x31c0, 0xdc: 0x4712, 0xdd: 0x3238,
+ 0xe0: 0x327e, 0xe1: 0x3283, 0xe2: 0x471c, 0xe3: 0x3288,
+ 0xe4: 0x472b, 0xe5: 0x4730, 0xe6: 0xa000, 0xe7: 0x473a, 0xe8: 0x32f1, 0xe9: 0x32f6,
+ 0xea: 0x473f, 0xeb: 0x330a, 0xec: 0x3382, 0xed: 0x3387, 0xee: 0x338c, 0xef: 0x4753,
+ 0xf1: 0x3418, 0xf2: 0x343b, 0xf3: 0x3440, 0xf4: 0x475d, 0xf5: 0x4762,
+ 0xf6: 0x4771, 0xf8: 0xa000, 0xf9: 0x34cc, 0xfa: 0x34d1, 0xfb: 0x34d6,
+ 0xfc: 0x47a3, 0xfd: 0x3553, 0xff: 0x356c,
+ // Block 0x4, offset 0x100
+ 0x100: 0x2f81, 0x101: 0x328d, 0x102: 0x4690, 0x103: 0x4721, 0x104: 0x2f9f, 0x105: 0x32ab,
+ 0x106: 0x2fb3, 0x107: 0x32bf, 0x108: 0x2fb8, 0x109: 0x32c4, 0x10a: 0x2fbd, 0x10b: 0x32c9,
+ 0x10c: 0x2fc2, 0x10d: 0x32ce, 0x10e: 0x2fcc, 0x10f: 0x32d8,
+ 0x112: 0x46b3, 0x113: 0x4744, 0x114: 0x2ff4, 0x115: 0x3300, 0x116: 0x2ff9, 0x117: 0x3305,
+ 0x118: 0x3017, 0x119: 0x3323, 0x11a: 0x3008, 0x11b: 0x3314, 0x11c: 0x3030, 0x11d: 0x333c,
+ 0x11e: 0x303a, 0x11f: 0x3346, 0x120: 0x303f, 0x121: 0x334b, 0x122: 0x3049, 0x123: 0x3355,
+ 0x124: 0x304e, 0x125: 0x335a, 0x128: 0x3080, 0x129: 0x3391,
+ 0x12a: 0x3085, 0x12b: 0x3396, 0x12c: 0x308a, 0x12d: 0x339b, 0x12e: 0x30ad, 0x12f: 0x33b9,
+ 0x130: 0x308f, 0x134: 0x30b7, 0x135: 0x33c3,
+ 0x136: 0x30cb, 0x137: 0x33dc, 0x139: 0x30d5, 0x13a: 0x33e6, 0x13b: 0x30df,
+ 0x13c: 0x33f0, 0x13d: 0x30da, 0x13e: 0x33eb,
+ // Block 0x5, offset 0x140
+ 0x143: 0x3102, 0x144: 0x3413, 0x145: 0x311b,
+ 0x146: 0x342c, 0x147: 0x3111, 0x148: 0x3422,
+ 0x14c: 0x46d6, 0x14d: 0x4767, 0x14e: 0x3134, 0x14f: 0x3445, 0x150: 0x313e, 0x151: 0x344f,
+ 0x154: 0x315c, 0x155: 0x346d, 0x156: 0x3175, 0x157: 0x3486,
+ 0x158: 0x3166, 0x159: 0x3477, 0x15a: 0x46f9, 0x15b: 0x478a, 0x15c: 0x317f, 0x15d: 0x3490,
+ 0x15e: 0x318e, 0x15f: 0x349f, 0x160: 0x46fe, 0x161: 0x478f, 0x162: 0x31a7, 0x163: 0x34bd,
+ 0x164: 0x3198, 0x165: 0x34ae, 0x168: 0x4708, 0x169: 0x4799,
+ 0x16a: 0x470d, 0x16b: 0x479e, 0x16c: 0x31c5, 0x16d: 0x34db, 0x16e: 0x31cf, 0x16f: 0x34e5,
+ 0x170: 0x31d4, 0x171: 0x34ea, 0x172: 0x31f2, 0x173: 0x3508, 0x174: 0x3215, 0x175: 0x352b,
+ 0x176: 0x323d, 0x177: 0x3558, 0x178: 0x3251, 0x179: 0x3260, 0x17a: 0x3580, 0x17b: 0x326a,
+ 0x17c: 0x358a, 0x17d: 0x326f, 0x17e: 0x358f, 0x17f: 0xa000,
+ // Block 0x6, offset 0x180
+ 0x184: 0x8100, 0x185: 0x8100,
+ 0x186: 0x8100,
+ 0x18d: 0x2f8b, 0x18e: 0x3297, 0x18f: 0x3099, 0x190: 0x33a5, 0x191: 0x3143,
+ 0x192: 0x3454, 0x193: 0x31d9, 0x194: 0x34ef, 0x195: 0x39d2, 0x196: 0x3b61, 0x197: 0x39cb,
+ 0x198: 0x3b5a, 0x199: 0x39d9, 0x19a: 0x3b68, 0x19b: 0x39c4, 0x19c: 0x3b53,
+ 0x19e: 0x38b3, 0x19f: 0x3a42, 0x1a0: 0x38ac, 0x1a1: 0x3a3b, 0x1a2: 0x35b6, 0x1a3: 0x35c8,
+ 0x1a6: 0x3044, 0x1a7: 0x3350, 0x1a8: 0x30c1, 0x1a9: 0x33d2,
+ 0x1aa: 0x46ef, 0x1ab: 0x4780, 0x1ac: 0x3993, 0x1ad: 0x3b22, 0x1ae: 0x35da, 0x1af: 0x35e0,
+ 0x1b0: 0x33c8, 0x1b4: 0x302b, 0x1b5: 0x3337,
+ 0x1b8: 0x30fd, 0x1b9: 0x340e, 0x1ba: 0x38ba, 0x1bb: 0x3a49,
+ 0x1bc: 0x35b0, 0x1bd: 0x35c2, 0x1be: 0x35bc, 0x1bf: 0x35ce,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x2f90, 0x1c1: 0x329c, 0x1c2: 0x2f95, 0x1c3: 0x32a1, 0x1c4: 0x300d, 0x1c5: 0x3319,
+ 0x1c6: 0x3012, 0x1c7: 0x331e, 0x1c8: 0x309e, 0x1c9: 0x33aa, 0x1ca: 0x30a3, 0x1cb: 0x33af,
+ 0x1cc: 0x3148, 0x1cd: 0x3459, 0x1ce: 0x314d, 0x1cf: 0x345e, 0x1d0: 0x316b, 0x1d1: 0x347c,
+ 0x1d2: 0x3170, 0x1d3: 0x3481, 0x1d4: 0x31de, 0x1d5: 0x34f4, 0x1d6: 0x31e3, 0x1d7: 0x34f9,
+ 0x1d8: 0x3189, 0x1d9: 0x349a, 0x1da: 0x31a2, 0x1db: 0x34b8,
+ 0x1de: 0x305d, 0x1df: 0x3369,
+ 0x1e6: 0x4695, 0x1e7: 0x4726, 0x1e8: 0x46bd, 0x1e9: 0x474e,
+ 0x1ea: 0x3962, 0x1eb: 0x3af1, 0x1ec: 0x393f, 0x1ed: 0x3ace, 0x1ee: 0x46db, 0x1ef: 0x476c,
+ 0x1f0: 0x395b, 0x1f1: 0x3aea, 0x1f2: 0x3247, 0x1f3: 0x3562,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132,
+ 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932,
+ 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932,
+ 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d,
+ 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d,
+ 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d,
+ 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d,
+ 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d,
+ 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101,
+ 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d,
+ 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132,
+ // Block 0x9, offset 0x240
+ 0x240: 0x49b1, 0x241: 0x49b6, 0x242: 0x9932, 0x243: 0x49bb, 0x244: 0x4a74, 0x245: 0x9936,
+ 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132,
+ 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132,
+ 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132,
+ 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135,
+ 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132,
+ 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132,
+ 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132,
+ 0x274: 0x0170,
+ 0x27a: 0x8100,
+ 0x27e: 0x0037,
+ // Block 0xa, offset 0x280
+ 0x284: 0x8100, 0x285: 0x35a4,
+ 0x286: 0x35ec, 0x287: 0x00ce, 0x288: 0x360a, 0x289: 0x3616, 0x28a: 0x3628,
+ 0x28c: 0x3646, 0x28e: 0x3658, 0x28f: 0x3676, 0x290: 0x3e0b, 0x291: 0xa000,
+ 0x295: 0xa000, 0x297: 0xa000,
+ 0x299: 0xa000,
+ 0x29f: 0xa000, 0x2a1: 0xa000,
+ 0x2a5: 0xa000, 0x2a9: 0xa000,
+ 0x2aa: 0x363a, 0x2ab: 0x366a, 0x2ac: 0x4801, 0x2ad: 0x369a, 0x2ae: 0x482b, 0x2af: 0x36ac,
+ 0x2b0: 0x3e73, 0x2b1: 0xa000, 0x2b5: 0xa000,
+ 0x2b7: 0xa000, 0x2b9: 0xa000,
+ 0x2bf: 0xa000,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x3724, 0x2c1: 0x3730, 0x2c3: 0x371e,
+ 0x2c6: 0xa000, 0x2c7: 0x370c,
+ 0x2cc: 0x3760, 0x2cd: 0x3748, 0x2ce: 0x3772, 0x2d0: 0xa000,
+ 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000,
+ 0x2d8: 0xa000, 0x2d9: 0x3754, 0x2da: 0xa000,
+ 0x2de: 0xa000, 0x2e3: 0xa000,
+ 0x2e7: 0xa000,
+ 0x2eb: 0xa000, 0x2ed: 0xa000,
+ 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000,
+ 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d8, 0x2fa: 0xa000,
+ 0x2fe: 0xa000,
+ // Block 0xc, offset 0x300
+ 0x301: 0x3736, 0x302: 0x37ba,
+ 0x310: 0x3712, 0x311: 0x3796,
+ 0x312: 0x3718, 0x313: 0x379c, 0x316: 0x372a, 0x317: 0x37ae,
+ 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x382c, 0x31b: 0x3832, 0x31c: 0x373c, 0x31d: 0x37c0,
+ 0x31e: 0x3742, 0x31f: 0x37c6, 0x322: 0x374e, 0x323: 0x37d2,
+ 0x324: 0x375a, 0x325: 0x37de, 0x326: 0x3766, 0x327: 0x37ea, 0x328: 0xa000, 0x329: 0xa000,
+ 0x32a: 0x3838, 0x32b: 0x383e, 0x32c: 0x3790, 0x32d: 0x3814, 0x32e: 0x376c, 0x32f: 0x37f0,
+ 0x330: 0x3778, 0x331: 0x37fc, 0x332: 0x377e, 0x333: 0x3802, 0x334: 0x3784, 0x335: 0x3808,
+ 0x338: 0x378a, 0x339: 0x380e,
+ // Block 0xd, offset 0x340
+ 0x351: 0x812d,
+ 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132,
+ 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132,
+ 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d,
+ 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132,
+ 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132,
+ 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a,
+ 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f,
+ 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112,
+ // Block 0xe, offset 0x380
+ 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116,
+ 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c,
+ 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132,
+ 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132,
+ 0x39e: 0x8132, 0x39f: 0x812d,
+ 0x3b0: 0x811e,
+ // Block 0xf, offset 0x3c0
+ 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132,
+ 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132,
+ 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d,
+ 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d,
+ 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d,
+ 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132,
+ 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132,
+ 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132,
+ // Block 0x10, offset 0x400
+ 0x405: 0xa000,
+ 0x406: 0x2d29, 0x407: 0xa000, 0x408: 0x2d31, 0x409: 0xa000, 0x40a: 0x2d39, 0x40b: 0xa000,
+ 0x40c: 0x2d41, 0x40d: 0xa000, 0x40e: 0x2d49, 0x411: 0xa000,
+ 0x412: 0x2d51,
+ 0x434: 0x8102, 0x435: 0x9900,
+ 0x43a: 0xa000, 0x43b: 0x2d59,
+ 0x43c: 0xa000, 0x43d: 0x2d61, 0x43e: 0xa000, 0x43f: 0xa000,
+ // Block 0x11, offset 0x440
+ 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132,
+ 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132,
+ 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132,
+ 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132,
+ 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132,
+ 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132,
+ 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132,
+ 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132,
+ 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132,
+ 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132,
+ 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d,
+ // Block 0x12, offset 0x480
+ 0x480: 0x2f9a, 0x481: 0x32a6, 0x482: 0x2fa4, 0x483: 0x32b0, 0x484: 0x2fa9, 0x485: 0x32b5,
+ 0x486: 0x2fae, 0x487: 0x32ba, 0x488: 0x38cf, 0x489: 0x3a5e, 0x48a: 0x2fc7, 0x48b: 0x32d3,
+ 0x48c: 0x2fd1, 0x48d: 0x32dd, 0x48e: 0x2fe0, 0x48f: 0x32ec, 0x490: 0x2fd6, 0x491: 0x32e2,
+ 0x492: 0x2fdb, 0x493: 0x32e7, 0x494: 0x38f2, 0x495: 0x3a81, 0x496: 0x38f9, 0x497: 0x3a88,
+ 0x498: 0x301c, 0x499: 0x3328, 0x49a: 0x3021, 0x49b: 0x332d, 0x49c: 0x3907, 0x49d: 0x3a96,
+ 0x49e: 0x3026, 0x49f: 0x3332, 0x4a0: 0x3035, 0x4a1: 0x3341, 0x4a2: 0x3053, 0x4a3: 0x335f,
+ 0x4a4: 0x3062, 0x4a5: 0x336e, 0x4a6: 0x3058, 0x4a7: 0x3364, 0x4a8: 0x3067, 0x4a9: 0x3373,
+ 0x4aa: 0x306c, 0x4ab: 0x3378, 0x4ac: 0x30b2, 0x4ad: 0x33be, 0x4ae: 0x390e, 0x4af: 0x3a9d,
+ 0x4b0: 0x30bc, 0x4b1: 0x33cd, 0x4b2: 0x30c6, 0x4b3: 0x33d7, 0x4b4: 0x30d0, 0x4b5: 0x33e1,
+ 0x4b6: 0x46c7, 0x4b7: 0x4758, 0x4b8: 0x3915, 0x4b9: 0x3aa4, 0x4ba: 0x30e9, 0x4bb: 0x33fa,
+ 0x4bc: 0x30e4, 0x4bd: 0x33f5, 0x4be: 0x30ee, 0x4bf: 0x33ff,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x30f3, 0x4c1: 0x3404, 0x4c2: 0x30f8, 0x4c3: 0x3409, 0x4c4: 0x310c, 0x4c5: 0x341d,
+ 0x4c6: 0x3116, 0x4c7: 0x3427, 0x4c8: 0x3125, 0x4c9: 0x3436, 0x4ca: 0x3120, 0x4cb: 0x3431,
+ 0x4cc: 0x3938, 0x4cd: 0x3ac7, 0x4ce: 0x3946, 0x4cf: 0x3ad5, 0x4d0: 0x394d, 0x4d1: 0x3adc,
+ 0x4d2: 0x3954, 0x4d3: 0x3ae3, 0x4d4: 0x3152, 0x4d5: 0x3463, 0x4d6: 0x3157, 0x4d7: 0x3468,
+ 0x4d8: 0x3161, 0x4d9: 0x3472, 0x4da: 0x46f4, 0x4db: 0x4785, 0x4dc: 0x399a, 0x4dd: 0x3b29,
+ 0x4de: 0x317a, 0x4df: 0x348b, 0x4e0: 0x3184, 0x4e1: 0x3495, 0x4e2: 0x4703, 0x4e3: 0x4794,
+ 0x4e4: 0x39a1, 0x4e5: 0x3b30, 0x4e6: 0x39a8, 0x4e7: 0x3b37, 0x4e8: 0x39af, 0x4e9: 0x3b3e,
+ 0x4ea: 0x3193, 0x4eb: 0x34a4, 0x4ec: 0x319d, 0x4ed: 0x34b3, 0x4ee: 0x31b1, 0x4ef: 0x34c7,
+ 0x4f0: 0x31ac, 0x4f1: 0x34c2, 0x4f2: 0x31ed, 0x4f3: 0x3503, 0x4f4: 0x31fc, 0x4f5: 0x3512,
+ 0x4f6: 0x31f7, 0x4f7: 0x350d, 0x4f8: 0x39b6, 0x4f9: 0x3b45, 0x4fa: 0x39bd, 0x4fb: 0x3b4c,
+ 0x4fc: 0x3201, 0x4fd: 0x3517, 0x4fe: 0x3206, 0x4ff: 0x351c,
+ // Block 0x14, offset 0x500
+ 0x500: 0x320b, 0x501: 0x3521, 0x502: 0x3210, 0x503: 0x3526, 0x504: 0x321f, 0x505: 0x3535,
+ 0x506: 0x321a, 0x507: 0x3530, 0x508: 0x3224, 0x509: 0x353f, 0x50a: 0x3229, 0x50b: 0x3544,
+ 0x50c: 0x322e, 0x50d: 0x3549, 0x50e: 0x324c, 0x50f: 0x3567, 0x510: 0x3265, 0x511: 0x3585,
+ 0x512: 0x3274, 0x513: 0x3594, 0x514: 0x3279, 0x515: 0x3599, 0x516: 0x337d, 0x517: 0x34a9,
+ 0x518: 0x353a, 0x519: 0x3576, 0x51b: 0x35d4,
+ 0x520: 0x46a4, 0x521: 0x4735, 0x522: 0x2f86, 0x523: 0x3292,
+ 0x524: 0x387b, 0x525: 0x3a0a, 0x526: 0x3874, 0x527: 0x3a03, 0x528: 0x3889, 0x529: 0x3a18,
+ 0x52a: 0x3882, 0x52b: 0x3a11, 0x52c: 0x38c1, 0x52d: 0x3a50, 0x52e: 0x3897, 0x52f: 0x3a26,
+ 0x530: 0x3890, 0x531: 0x3a1f, 0x532: 0x38a5, 0x533: 0x3a34, 0x534: 0x389e, 0x535: 0x3a2d,
+ 0x536: 0x38c8, 0x537: 0x3a57, 0x538: 0x46b8, 0x539: 0x4749, 0x53a: 0x3003, 0x53b: 0x330f,
+ 0x53c: 0x2fef, 0x53d: 0x32fb, 0x53e: 0x38dd, 0x53f: 0x3a6c,
+ // Block 0x15, offset 0x540
+ 0x540: 0x38d6, 0x541: 0x3a65, 0x542: 0x38eb, 0x543: 0x3a7a, 0x544: 0x38e4, 0x545: 0x3a73,
+ 0x546: 0x3900, 0x547: 0x3a8f, 0x548: 0x3094, 0x549: 0x33a0, 0x54a: 0x30a8, 0x54b: 0x33b4,
+ 0x54c: 0x46ea, 0x54d: 0x477b, 0x54e: 0x3139, 0x54f: 0x344a, 0x550: 0x3923, 0x551: 0x3ab2,
+ 0x552: 0x391c, 0x553: 0x3aab, 0x554: 0x3931, 0x555: 0x3ac0, 0x556: 0x392a, 0x557: 0x3ab9,
+ 0x558: 0x398c, 0x559: 0x3b1b, 0x55a: 0x3970, 0x55b: 0x3aff, 0x55c: 0x3969, 0x55d: 0x3af8,
+ 0x55e: 0x397e, 0x55f: 0x3b0d, 0x560: 0x3977, 0x561: 0x3b06, 0x562: 0x3985, 0x563: 0x3b14,
+ 0x564: 0x31e8, 0x565: 0x34fe, 0x566: 0x31ca, 0x567: 0x34e0, 0x568: 0x39e7, 0x569: 0x3b76,
+ 0x56a: 0x39e0, 0x56b: 0x3b6f, 0x56c: 0x39f5, 0x56d: 0x3b84, 0x56e: 0x39ee, 0x56f: 0x3b7d,
+ 0x570: 0x39fc, 0x571: 0x3b8b, 0x572: 0x3233, 0x573: 0x354e, 0x574: 0x325b, 0x575: 0x357b,
+ 0x576: 0x3256, 0x577: 0x3571, 0x578: 0x3242, 0x579: 0x355d,
+ // Block 0x16, offset 0x580
+ 0x580: 0x4807, 0x581: 0x480d, 0x582: 0x4921, 0x583: 0x4939, 0x584: 0x4929, 0x585: 0x4941,
+ 0x586: 0x4931, 0x587: 0x4949, 0x588: 0x47ad, 0x589: 0x47b3, 0x58a: 0x4891, 0x58b: 0x48a9,
+ 0x58c: 0x4899, 0x58d: 0x48b1, 0x58e: 0x48a1, 0x58f: 0x48b9, 0x590: 0x4819, 0x591: 0x481f,
+ 0x592: 0x3dbb, 0x593: 0x3dcb, 0x594: 0x3dc3, 0x595: 0x3dd3,
+ 0x598: 0x47b9, 0x599: 0x47bf, 0x59a: 0x3ceb, 0x59b: 0x3cfb, 0x59c: 0x3cf3, 0x59d: 0x3d03,
+ 0x5a0: 0x4831, 0x5a1: 0x4837, 0x5a2: 0x4951, 0x5a3: 0x4969,
+ 0x5a4: 0x4959, 0x5a5: 0x4971, 0x5a6: 0x4961, 0x5a7: 0x4979, 0x5a8: 0x47c5, 0x5a9: 0x47cb,
+ 0x5aa: 0x48c1, 0x5ab: 0x48d9, 0x5ac: 0x48c9, 0x5ad: 0x48e1, 0x5ae: 0x48d1, 0x5af: 0x48e9,
+ 0x5b0: 0x4849, 0x5b1: 0x484f, 0x5b2: 0x3e1b, 0x5b3: 0x3e33, 0x5b4: 0x3e23, 0x5b5: 0x3e3b,
+ 0x5b6: 0x3e2b, 0x5b7: 0x3e43, 0x5b8: 0x47d1, 0x5b9: 0x47d7, 0x5ba: 0x3d1b, 0x5bb: 0x3d33,
+ 0x5bc: 0x3d23, 0x5bd: 0x3d3b, 0x5be: 0x3d2b, 0x5bf: 0x3d43,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x4855, 0x5c1: 0x485b, 0x5c2: 0x3e4b, 0x5c3: 0x3e5b, 0x5c4: 0x3e53, 0x5c5: 0x3e63,
+ 0x5c8: 0x47dd, 0x5c9: 0x47e3, 0x5ca: 0x3d4b, 0x5cb: 0x3d5b,
+ 0x5cc: 0x3d53, 0x5cd: 0x3d63, 0x5d0: 0x4867, 0x5d1: 0x486d,
+ 0x5d2: 0x3e83, 0x5d3: 0x3e9b, 0x5d4: 0x3e8b, 0x5d5: 0x3ea3, 0x5d6: 0x3e93, 0x5d7: 0x3eab,
+ 0x5d9: 0x47e9, 0x5db: 0x3d6b, 0x5dd: 0x3d73,
+ 0x5df: 0x3d7b, 0x5e0: 0x487f, 0x5e1: 0x4885, 0x5e2: 0x4981, 0x5e3: 0x4999,
+ 0x5e4: 0x4989, 0x5e5: 0x49a1, 0x5e6: 0x4991, 0x5e7: 0x49a9, 0x5e8: 0x47ef, 0x5e9: 0x47f5,
+ 0x5ea: 0x48f1, 0x5eb: 0x4909, 0x5ec: 0x48f9, 0x5ed: 0x4911, 0x5ee: 0x4901, 0x5ef: 0x4919,
+ 0x5f0: 0x47fb, 0x5f1: 0x4321, 0x5f2: 0x3694, 0x5f3: 0x4327, 0x5f4: 0x4825, 0x5f5: 0x432d,
+ 0x5f6: 0x36a6, 0x5f7: 0x4333, 0x5f8: 0x36c4, 0x5f9: 0x4339, 0x5fa: 0x36dc, 0x5fb: 0x433f,
+ 0x5fc: 0x4873, 0x5fd: 0x4345,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3da3, 0x601: 0x3dab, 0x602: 0x4187, 0x603: 0x41a5, 0x604: 0x4191, 0x605: 0x41af,
+ 0x606: 0x419b, 0x607: 0x41b9, 0x608: 0x3cdb, 0x609: 0x3ce3, 0x60a: 0x40d3, 0x60b: 0x40f1,
+ 0x60c: 0x40dd, 0x60d: 0x40fb, 0x60e: 0x40e7, 0x60f: 0x4105, 0x610: 0x3deb, 0x611: 0x3df3,
+ 0x612: 0x41c3, 0x613: 0x41e1, 0x614: 0x41cd, 0x615: 0x41eb, 0x616: 0x41d7, 0x617: 0x41f5,
+ 0x618: 0x3d0b, 0x619: 0x3d13, 0x61a: 0x410f, 0x61b: 0x412d, 0x61c: 0x4119, 0x61d: 0x4137,
+ 0x61e: 0x4123, 0x61f: 0x4141, 0x620: 0x3ec3, 0x621: 0x3ecb, 0x622: 0x41ff, 0x623: 0x421d,
+ 0x624: 0x4209, 0x625: 0x4227, 0x626: 0x4213, 0x627: 0x4231, 0x628: 0x3d83, 0x629: 0x3d8b,
+ 0x62a: 0x414b, 0x62b: 0x4169, 0x62c: 0x4155, 0x62d: 0x4173, 0x62e: 0x415f, 0x62f: 0x417d,
+ 0x630: 0x3688, 0x631: 0x3682, 0x632: 0x3d93, 0x633: 0x368e, 0x634: 0x3d9b,
+ 0x636: 0x4813, 0x637: 0x3db3, 0x638: 0x35f8, 0x639: 0x35f2, 0x63a: 0x35e6, 0x63b: 0x42f1,
+ 0x63c: 0x35fe, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100,
+ // Block 0x19, offset 0x640
+ 0x640: 0x8100, 0x641: 0x35aa, 0x642: 0x3ddb, 0x643: 0x36a0, 0x644: 0x3de3,
+ 0x646: 0x483d, 0x647: 0x3dfb, 0x648: 0x3604, 0x649: 0x42f7, 0x64a: 0x3610, 0x64b: 0x42fd,
+ 0x64c: 0x361c, 0x64d: 0x3b92, 0x64e: 0x3b99, 0x64f: 0x3ba0, 0x650: 0x36b8, 0x651: 0x36b2,
+ 0x652: 0x3e03, 0x653: 0x44e7, 0x656: 0x36be, 0x657: 0x3e13,
+ 0x658: 0x3634, 0x659: 0x362e, 0x65a: 0x3622, 0x65b: 0x4303, 0x65d: 0x3ba7,
+ 0x65e: 0x3bae, 0x65f: 0x3bb5, 0x660: 0x36ee, 0x661: 0x36e8, 0x662: 0x3e6b, 0x663: 0x44ef,
+ 0x664: 0x36d0, 0x665: 0x36d6, 0x666: 0x36f4, 0x667: 0x3e7b, 0x668: 0x3664, 0x669: 0x365e,
+ 0x66a: 0x3652, 0x66b: 0x430f, 0x66c: 0x364c, 0x66d: 0x359e, 0x66e: 0x42eb, 0x66f: 0x0081,
+ 0x672: 0x3eb3, 0x673: 0x36fa, 0x674: 0x3ebb,
+ 0x676: 0x488b, 0x677: 0x3ed3, 0x678: 0x3640, 0x679: 0x4309, 0x67a: 0x3670, 0x67b: 0x431b,
+ 0x67c: 0x367c, 0x67d: 0x4259, 0x67e: 0xa100,
+ // Block 0x1a, offset 0x680
+ 0x681: 0x3c09, 0x683: 0xa000, 0x684: 0x3c10, 0x685: 0xa000,
+ 0x687: 0x3c17, 0x688: 0xa000, 0x689: 0x3c1e,
+ 0x68d: 0xa000,
+ 0x6a0: 0x2f68, 0x6a1: 0xa000, 0x6a2: 0x3c2c,
+ 0x6a4: 0xa000, 0x6a5: 0xa000,
+ 0x6ad: 0x3c25, 0x6ae: 0x2f63, 0x6af: 0x2f6d,
+ 0x6b0: 0x3c33, 0x6b1: 0x3c3a, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c41, 0x6b5: 0x3c48,
+ 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4f, 0x6b9: 0x3c56, 0x6ba: 0xa000, 0x6bb: 0xa000,
+ 0x6bc: 0xa000, 0x6bd: 0xa000,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3c5d, 0x6c1: 0x3c64, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c79, 0x6c5: 0x3c80,
+ 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c87, 0x6c9: 0x3c8e,
+ 0x6d1: 0xa000,
+ 0x6d2: 0xa000,
+ 0x6e2: 0xa000,
+ 0x6e8: 0xa000, 0x6e9: 0xa000,
+ 0x6eb: 0xa000, 0x6ec: 0x3ca3, 0x6ed: 0x3caa, 0x6ee: 0x3cb1, 0x6ef: 0x3cb8,
+ 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000,
+ // Block 0x1c, offset 0x700
+ 0x706: 0xa000, 0x70b: 0xa000,
+ 0x70c: 0x3f0b, 0x70d: 0xa000, 0x70e: 0x3f13, 0x70f: 0xa000, 0x710: 0x3f1b, 0x711: 0xa000,
+ 0x712: 0x3f23, 0x713: 0xa000, 0x714: 0x3f2b, 0x715: 0xa000, 0x716: 0x3f33, 0x717: 0xa000,
+ 0x718: 0x3f3b, 0x719: 0xa000, 0x71a: 0x3f43, 0x71b: 0xa000, 0x71c: 0x3f4b, 0x71d: 0xa000,
+ 0x71e: 0x3f53, 0x71f: 0xa000, 0x720: 0x3f5b, 0x721: 0xa000, 0x722: 0x3f63,
+ 0x724: 0xa000, 0x725: 0x3f6b, 0x726: 0xa000, 0x727: 0x3f73, 0x728: 0xa000, 0x729: 0x3f7b,
+ 0x72f: 0xa000,
+ 0x730: 0x3f83, 0x731: 0x3f8b, 0x732: 0xa000, 0x733: 0x3f93, 0x734: 0x3f9b, 0x735: 0xa000,
+ 0x736: 0x3fa3, 0x737: 0x3fab, 0x738: 0xa000, 0x739: 0x3fb3, 0x73a: 0x3fbb, 0x73b: 0xa000,
+ 0x73c: 0x3fc3, 0x73d: 0x3fcb,
+ // Block 0x1d, offset 0x740
+ 0x754: 0x3f03,
+ 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000,
+ 0x75e: 0x3fd3,
+ 0x766: 0xa000,
+ 0x76b: 0xa000, 0x76c: 0x3fe3, 0x76d: 0xa000, 0x76e: 0x3feb, 0x76f: 0xa000,
+ 0x770: 0x3ff3, 0x771: 0xa000, 0x772: 0x3ffb, 0x773: 0xa000, 0x774: 0x4003, 0x775: 0xa000,
+ 0x776: 0x400b, 0x777: 0xa000, 0x778: 0x4013, 0x779: 0xa000, 0x77a: 0x401b, 0x77b: 0xa000,
+ 0x77c: 0x4023, 0x77d: 0xa000, 0x77e: 0x402b, 0x77f: 0xa000,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x4033, 0x781: 0xa000, 0x782: 0x403b, 0x784: 0xa000, 0x785: 0x4043,
+ 0x786: 0xa000, 0x787: 0x404b, 0x788: 0xa000, 0x789: 0x4053,
+ 0x78f: 0xa000, 0x790: 0x405b, 0x791: 0x4063,
+ 0x792: 0xa000, 0x793: 0x406b, 0x794: 0x4073, 0x795: 0xa000, 0x796: 0x407b, 0x797: 0x4083,
+ 0x798: 0xa000, 0x799: 0x408b, 0x79a: 0x4093, 0x79b: 0xa000, 0x79c: 0x409b, 0x79d: 0x40a3,
+ 0x7af: 0xa000,
+ 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fdb,
+ 0x7b7: 0x40ab, 0x7b8: 0x40b3, 0x7b9: 0x40bb, 0x7ba: 0x40c3,
+ 0x7bd: 0xa000, 0x7be: 0x40cb,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb,
+ 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943,
+ 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3,
+ 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43,
+ 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87,
+ 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283,
+ 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f,
+ 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853,
+ 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b,
+ 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b,
+ 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b,
+ 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f,
+ 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7,
+ 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127,
+ 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357,
+ 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873,
+ 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3,
+ 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b,
+ 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57,
+ 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb,
+ 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f,
+ 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3,
+ 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83,
+ 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193,
+ 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b,
+ 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b,
+ 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f,
+ 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b,
+ 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753,
+ 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777,
+ 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3,
+ 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47,
+ 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af,
+ 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df,
+ 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817,
+ 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3,
+ 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457,
+ 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b,
+ 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27,
+ 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f,
+ 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03,
+ 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27,
+ 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af,
+ 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3,
+ 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb,
+ 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353,
+ 0x8e5: 0x1407, 0x8e6: 0x1433,
+ 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7,
+ 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897,
+ 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93,
+ 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b,
+ 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f,
+ 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f,
+ 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f,
+ 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff,
+ 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f,
+ 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f,
+ 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3,
+ 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7,
+ 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963,
+ 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b,
+ 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb,
+ 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf,
+ 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f,
+ 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013,
+ 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f,
+ 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b,
+ 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b,
+ 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb,
+ 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343,
+ 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f,
+ // Block 0x26, offset 0x980
+ 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b,
+ 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b,
+ 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2,
+ 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809,
+ 0x998: 0x1617, 0x999: 0x1627,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757,
+ 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773,
+ 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3,
+ 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf,
+ 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff,
+ 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f,
+ 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867,
+ 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af,
+ 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93,
+ 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3,
+ 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f,
+ 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983,
+ 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf,
+ 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3,
+ 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef,
+ 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23,
+ 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37,
+ 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63,
+ 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f,
+ 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692,
+ 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb,
+ 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f,
+ 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6,
+ 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9,
+ 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83,
+ 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3,
+ 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf,
+ 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7,
+ 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f,
+ 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b,
+ 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87,
+ 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb,
+ 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7,
+ 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663,
+ 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd,
+ 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7,
+ 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b,
+ 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f,
+ 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7,
+ 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700,
+ 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23,
+ 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53,
+ 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714,
+ 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b,
+ 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719,
+ 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728,
+ 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37,
+ 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57,
+ 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737,
+ 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741,
+ 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff,
+ 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637,
+ 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f,
+ 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093,
+ 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782,
+ 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3,
+ 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7,
+ 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133,
+ 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa,
+ 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4,
+ 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7,
+ 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7,
+ 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b,
+ 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd,
+ 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f,
+ 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f,
+ 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273,
+ 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677,
+ 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7,
+ 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb,
+ 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5,
+ 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa,
+ 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b,
+ 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7,
+ 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665,
+ 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f,
+ 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477,
+ 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693,
+ 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb,
+ 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b,
+ 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567,
+ 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7,
+ 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7,
+ 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef,
+ 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868,
+}
+
+// nfcIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var nfcIndex = [1408]uint8{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04,
+ 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32,
+ 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35,
+ 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05,
+ 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a,
+ 0xf0: 0x13,
+ // Block 0x4, offset 0x100
+ 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40,
+ 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47,
+ 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d,
+ 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55,
+ // Block 0x5, offset 0x140
+ 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b,
+ 0x14d: 0x5c,
+ 0x15c: 0x5d, 0x15f: 0x5e,
+ 0x162: 0x5f, 0x164: 0x60,
+ 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66,
+ 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f,
+ 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17,
+ // Block 0x6, offset 0x180
+ 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d,
+ 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70,
+ 0x1ab: 0x71,
+ 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77,
+ 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a,
+ // Block 0x8, offset 0x200
+ 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d,
+ 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83,
+ 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86,
+ 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87,
+ 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88,
+ // Block 0x9, offset 0x240
+ 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89,
+ 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a,
+ 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b,
+ 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c,
+ 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d,
+ 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87,
+ 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88,
+ 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89,
+ // Block 0xa, offset 0x280
+ 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a,
+ 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b,
+ 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c,
+ 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d,
+ 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87,
+ 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88,
+ 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89,
+ 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b,
+ 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c,
+ 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d,
+ 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e,
+ // Block 0xc, offset 0x300
+ 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20,
+ 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91,
+ 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95,
+ 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b,
+ // Block 0xd, offset 0x340
+ 0x347: 0x9c,
+ 0x34b: 0x9d, 0x34d: 0x9e,
+ 0x368: 0x9f, 0x36b: 0xa0,
+ 0x374: 0xa1,
+ 0x37d: 0xa2,
+ // Block 0xe, offset 0x380
+ 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6,
+ 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa,
+ 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf,
+ 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1,
+ 0x3a0: 0xb2, 0x3a7: 0xb3,
+ 0x3a8: 0xb4, 0x3a9: 0xb5, 0x3aa: 0xb6,
+ 0x3b0: 0x73, 0x3b5: 0xb7, 0x3b6: 0xb8,
+ // Block 0xf, offset 0x3c0
+ 0x3eb: 0xb9, 0x3ec: 0xba,
+ // Block 0x10, offset 0x400
+ 0x432: 0xbb,
+ // Block 0x11, offset 0x440
+ 0x445: 0xbc, 0x446: 0xbd, 0x447: 0xbe,
+ 0x449: 0xbf,
+ // Block 0x12, offset 0x480
+ 0x480: 0xc0, 0x484: 0xba,
+ 0x48b: 0xc1,
+ 0x4a3: 0xc2, 0x4a5: 0xc3,
+ // Block 0x13, offset 0x4c0
+ 0x4c8: 0xc4,
+ // Block 0x14, offset 0x500
+ 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c,
+ 0x528: 0x2d,
+ // Block 0x15, offset 0x540
+ 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d,
+ 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11,
+ 0x56f: 0x12,
+}
+
+// nfcSparseOffset: 151 entries, 302 bytes
+var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xd0, 0xd2, 0xd7, 0xe8, 0xf4, 0xf6, 0xfc, 0xfe, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10b, 0x10e, 0x110, 0x113, 0x116, 0x11a, 0x11f, 0x128, 0x12a, 0x12d, 0x12f, 0x13a, 0x13e, 0x14c, 0x14f, 0x155, 0x15b, 0x166, 0x16a, 0x16c, 0x16e, 0x170, 0x172, 0x174, 0x17a, 0x17e, 0x180, 0x182, 0x18a, 0x18e, 0x191, 0x193, 0x195, 0x197, 0x19a, 0x19c, 0x19e, 0x1a0, 0x1a2, 0x1a8, 0x1ab, 0x1ad, 0x1b4, 0x1ba, 0x1c0, 0x1c8, 0x1ce, 0x1d4, 0x1da, 0x1de, 0x1ec, 0x1f5, 0x1f8, 0x1fb, 0x1fd, 0x200, 0x202, 0x206, 0x20b, 0x20d, 0x20f, 0x214, 0x21a, 0x21c, 0x21e, 0x220, 0x226, 0x229, 0x22b, 0x231, 0x234, 0x23c, 0x243, 0x246, 0x249, 0x24b, 0x24e, 0x256, 0x25a, 0x261, 0x264, 0x26a, 0x26c, 0x26f, 0x271, 0x274, 0x276, 0x278, 0x27a, 0x27c, 0x27f, 0x281, 0x283, 0x285, 0x287, 0x294, 0x29e, 0x2a0, 0x2a2, 0x2a8, 0x2aa, 0x2ac, 0x2af}
+
+// nfcSparseValues: 689 entries, 2756 bytes
+var nfcSparseValues = [689]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x04},
+ {value: 0xa100, lo: 0xa8, hi: 0xa8},
+ {value: 0x8100, lo: 0xaf, hi: 0xaf},
+ {value: 0x8100, lo: 0xb4, hi: 0xb4},
+ {value: 0x8100, lo: 0xb8, hi: 0xb8},
+ // Block 0x1, offset 0x5
+ {value: 0x0091, lo: 0x03},
+ {value: 0x46e5, lo: 0xa0, hi: 0xa1},
+ {value: 0x4717, lo: 0xaf, hi: 0xb0},
+ {value: 0xa000, lo: 0xb7, hi: 0xb7},
+ // Block 0x2, offset 0x9
+ {value: 0x0000, lo: 0x01},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ // Block 0x3, offset 0xb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x98, hi: 0x9d},
+ // Block 0x4, offset 0xd
+ {value: 0x0006, lo: 0x0a},
+ {value: 0xa000, lo: 0x81, hi: 0x81},
+ {value: 0xa000, lo: 0x85, hi: 0x85},
+ {value: 0xa000, lo: 0x89, hi: 0x89},
+ {value: 0x4843, lo: 0x8a, hi: 0x8a},
+ {value: 0x4861, lo: 0x8b, hi: 0x8b},
+ {value: 0x36ca, lo: 0x8c, hi: 0x8c},
+ {value: 0x36e2, lo: 0x8d, hi: 0x8d},
+ {value: 0x4879, lo: 0x8e, hi: 0x8e},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x3700, lo: 0x93, hi: 0x94},
+ // Block 0x5, offset 0x18
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0xa000, lo: 0x8d, hi: 0x8d},
+ {value: 0x37a8, lo: 0x90, hi: 0x90},
+ {value: 0x37b4, lo: 0x91, hi: 0x91},
+ {value: 0x37a2, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x96, hi: 0x96},
+ {value: 0x381a, lo: 0x97, hi: 0x97},
+ {value: 0x37e4, lo: 0x9c, hi: 0x9c},
+ {value: 0x37cc, lo: 0x9d, hi: 0x9d},
+ {value: 0x37f6, lo: 0x9e, hi: 0x9e},
+ {value: 0xa000, lo: 0xb4, hi: 0xb5},
+ {value: 0x3820, lo: 0xb6, hi: 0xb6},
+ {value: 0x3826, lo: 0xb7, hi: 0xb7},
+ // Block 0x6, offset 0x28
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x83, hi: 0x87},
+ // Block 0x7, offset 0x2a
+ {value: 0x0001, lo: 0x04},
+ {value: 0x8113, lo: 0x81, hi: 0x82},
+ {value: 0x8132, lo: 0x84, hi: 0x84},
+ {value: 0x812d, lo: 0x85, hi: 0x85},
+ {value: 0x810d, lo: 0x87, hi: 0x87},
+ // Block 0x8, offset 0x2f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x97},
+ {value: 0x8119, lo: 0x98, hi: 0x98},
+ {value: 0x811a, lo: 0x99, hi: 0x99},
+ {value: 0x811b, lo: 0x9a, hi: 0x9a},
+ {value: 0x3844, lo: 0xa2, hi: 0xa2},
+ {value: 0x384a, lo: 0xa3, hi: 0xa3},
+ {value: 0x3856, lo: 0xa4, hi: 0xa4},
+ {value: 0x3850, lo: 0xa5, hi: 0xa5},
+ {value: 0x385c, lo: 0xa6, hi: 0xa6},
+ {value: 0xa000, lo: 0xa7, hi: 0xa7},
+ // Block 0x9, offset 0x3a
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x386e, lo: 0x80, hi: 0x80},
+ {value: 0xa000, lo: 0x81, hi: 0x81},
+ {value: 0x3862, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x3868, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x95, hi: 0x95},
+ {value: 0x8132, lo: 0x96, hi: 0x9c},
+ {value: 0x8132, lo: 0x9f, hi: 0xa2},
+ {value: 0x812d, lo: 0xa3, hi: 0xa3},
+ {value: 0x8132, lo: 0xa4, hi: 0xa4},
+ {value: 0x8132, lo: 0xa7, hi: 0xa8},
+ {value: 0x812d, lo: 0xaa, hi: 0xaa},
+ {value: 0x8132, lo: 0xab, hi: 0xac},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ // Block 0xa, offset 0x49
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x811f, lo: 0x91, hi: 0x91},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x812d, lo: 0xb1, hi: 0xb1},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb5, hi: 0xb6},
+ {value: 0x812d, lo: 0xb7, hi: 0xb9},
+ {value: 0x8132, lo: 0xba, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbc},
+ {value: 0x8132, lo: 0xbd, hi: 0xbd},
+ {value: 0x812d, lo: 0xbe, hi: 0xbe},
+ {value: 0x8132, lo: 0xbf, hi: 0xbf},
+ // Block 0xb, offset 0x56
+ {value: 0x0005, lo: 0x07},
+ {value: 0x8132, lo: 0x80, hi: 0x80},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x812d, lo: 0x82, hi: 0x83},
+ {value: 0x812d, lo: 0x84, hi: 0x85},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x812d, lo: 0x88, hi: 0x89},
+ {value: 0x8132, lo: 0x8a, hi: 0x8a},
+ // Block 0xc, offset 0x5e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xab, hi: 0xb1},
+ {value: 0x812d, lo: 0xb2, hi: 0xb2},
+ {value: 0x8132, lo: 0xb3, hi: 0xb3},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0xd, offset 0x63
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0x96, hi: 0x99},
+ {value: 0x8132, lo: 0x9b, hi: 0xa3},
+ {value: 0x8132, lo: 0xa5, hi: 0xa7},
+ {value: 0x8132, lo: 0xa9, hi: 0xad},
+ // Block 0xe, offset 0x68
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x99, hi: 0x9b},
+ // Block 0xf, offset 0x6a
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0xa8, hi: 0xa8},
+ {value: 0x3edb, lo: 0xa9, hi: 0xa9},
+ {value: 0xa000, lo: 0xb0, hi: 0xb0},
+ {value: 0x3ee3, lo: 0xb1, hi: 0xb1},
+ {value: 0xa000, lo: 0xb3, hi: 0xb3},
+ {value: 0x3eeb, lo: 0xb4, hi: 0xb4},
+ {value: 0x9902, lo: 0xbc, hi: 0xbc},
+ // Block 0x10, offset 0x72
+ {value: 0x0008, lo: 0x06},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x91, hi: 0x91},
+ {value: 0x812d, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x93, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x94},
+ {value: 0x451f, lo: 0x98, hi: 0x9f},
+ // Block 0x11, offset 0x79
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x12, offset 0x7c
+ {value: 0x0008, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2ca1, lo: 0x8b, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x455f, lo: 0x9c, hi: 0x9d},
+ {value: 0x456f, lo: 0x9f, hi: 0x9f},
+ {value: 0x8132, lo: 0xbe, hi: 0xbe},
+ // Block 0x13, offset 0x84
+ {value: 0x0000, lo: 0x03},
+ {value: 0x4597, lo: 0xb3, hi: 0xb3},
+ {value: 0x459f, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x14, offset 0x88
+ {value: 0x0008, lo: 0x03},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x4577, lo: 0x99, hi: 0x9b},
+ {value: 0x458f, lo: 0x9e, hi: 0x9e},
+ // Block 0x15, offset 0x8c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x16, offset 0x8e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ // Block 0x17, offset 0x90
+ {value: 0x0000, lo: 0x08},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2cb9, lo: 0x88, hi: 0x88},
+ {value: 0x2cb1, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cc1, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x96, hi: 0x97},
+ {value: 0x45a7, lo: 0x9c, hi: 0x9c},
+ {value: 0x45af, lo: 0x9d, hi: 0x9d},
+ // Block 0x18, offset 0x99
+ {value: 0x0000, lo: 0x03},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x2cc9, lo: 0x94, hi: 0x94},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x19, offset 0x9d
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2cd1, lo: 0x8a, hi: 0x8a},
+ {value: 0x2ce1, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cd9, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x1a, offset 0xa4
+ {value: 0x1801, lo: 0x04},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x3ef3, lo: 0x88, hi: 0x88},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8120, lo: 0x95, hi: 0x96},
+ // Block 0x1b, offset 0xa9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0xa000, lo: 0xbf, hi: 0xbf},
+ // Block 0x1c, offset 0xac
+ {value: 0x0000, lo: 0x09},
+ {value: 0x2ce9, lo: 0x80, hi: 0x80},
+ {value: 0x9900, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x2cf1, lo: 0x87, hi: 0x87},
+ {value: 0x2cf9, lo: 0x88, hi: 0x88},
+ {value: 0x2f53, lo: 0x8a, hi: 0x8a},
+ {value: 0x2ddb, lo: 0x8b, hi: 0x8b},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x95, hi: 0x96},
+ // Block 0x1d, offset 0xb6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x1e, offset 0xb9
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2d01, lo: 0x8a, hi: 0x8a},
+ {value: 0x2d11, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d09, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x1f, offset 0xc0
+ {value: 0x6be7, lo: 0x07},
+ {value: 0x9904, lo: 0x8a, hi: 0x8a},
+ {value: 0x9900, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x3efb, lo: 0x9a, hi: 0x9a},
+ {value: 0x2f5b, lo: 0x9c, hi: 0x9c},
+ {value: 0x2de6, lo: 0x9d, hi: 0x9d},
+ {value: 0x2d19, lo: 0x9e, hi: 0x9f},
+ // Block 0x20, offset 0xc8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8122, lo: 0xb8, hi: 0xb9},
+ {value: 0x8104, lo: 0xba, hi: 0xba},
+ // Block 0x21, offset 0xcb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8123, lo: 0x88, hi: 0x8b},
+ // Block 0x22, offset 0xcd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8124, lo: 0xb8, hi: 0xb9},
+ {value: 0x8104, lo: 0xba, hi: 0xba},
+ // Block 0x23, offset 0xd0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8125, lo: 0x88, hi: 0x8b},
+ // Block 0x24, offset 0xd2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x812d, lo: 0x98, hi: 0x99},
+ {value: 0x812d, lo: 0xb5, hi: 0xb5},
+ {value: 0x812d, lo: 0xb7, hi: 0xb7},
+ {value: 0x812b, lo: 0xb9, hi: 0xb9},
+ // Block 0x25, offset 0xd7
+ {value: 0x0000, lo: 0x10},
+ {value: 0x2647, lo: 0x83, hi: 0x83},
+ {value: 0x264e, lo: 0x8d, hi: 0x8d},
+ {value: 0x2655, lo: 0x92, hi: 0x92},
+ {value: 0x265c, lo: 0x97, hi: 0x97},
+ {value: 0x2663, lo: 0x9c, hi: 0x9c},
+ {value: 0x2640, lo: 0xa9, hi: 0xa9},
+ {value: 0x8126, lo: 0xb1, hi: 0xb1},
+ {value: 0x8127, lo: 0xb2, hi: 0xb2},
+ {value: 0x4a87, lo: 0xb3, hi: 0xb3},
+ {value: 0x8128, lo: 0xb4, hi: 0xb4},
+ {value: 0x4a90, lo: 0xb5, hi: 0xb5},
+ {value: 0x45b7, lo: 0xb6, hi: 0xb6},
+ {value: 0x8200, lo: 0xb7, hi: 0xb7},
+ {value: 0x45bf, lo: 0xb8, hi: 0xb8},
+ {value: 0x8200, lo: 0xb9, hi: 0xb9},
+ {value: 0x8127, lo: 0xba, hi: 0xbd},
+ // Block 0x26, offset 0xe8
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x8127, lo: 0x80, hi: 0x80},
+ {value: 0x4a99, lo: 0x81, hi: 0x81},
+ {value: 0x8132, lo: 0x82, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0x86, hi: 0x87},
+ {value: 0x2671, lo: 0x93, hi: 0x93},
+ {value: 0x2678, lo: 0x9d, hi: 0x9d},
+ {value: 0x267f, lo: 0xa2, hi: 0xa2},
+ {value: 0x2686, lo: 0xa7, hi: 0xa7},
+ {value: 0x268d, lo: 0xac, hi: 0xac},
+ {value: 0x266a, lo: 0xb9, hi: 0xb9},
+ // Block 0x27, offset 0xf4
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x86, hi: 0x86},
+ // Block 0x28, offset 0xf6
+ {value: 0x0000, lo: 0x05},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x2d21, lo: 0xa6, hi: 0xa6},
+ {value: 0x9900, lo: 0xae, hi: 0xae},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x29, offset 0xfc
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ // Block 0x2a, offset 0xfe
+ {value: 0x0000, lo: 0x01},
+ {value: 0xa000, lo: 0x80, hi: 0x92},
+ // Block 0x2b, offset 0x100
+ {value: 0x0000, lo: 0x01},
+ {value: 0xb900, lo: 0xa1, hi: 0xb5},
+ // Block 0x2c, offset 0x102
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0xa8, hi: 0xbf},
+ // Block 0x2d, offset 0x104
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0x80, hi: 0x82},
+ // Block 0x2e, offset 0x106
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x9d, hi: 0x9f},
+ // Block 0x2f, offset 0x108
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x94, hi: 0x94},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x30, offset 0x10b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x9d, hi: 0x9d},
+ // Block 0x31, offset 0x10e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8131, lo: 0xa9, hi: 0xa9},
+ // Block 0x32, offset 0x110
+ {value: 0x0004, lo: 0x02},
+ {value: 0x812e, lo: 0xb9, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbb},
+ // Block 0x33, offset 0x113
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x97, hi: 0x97},
+ {value: 0x812d, lo: 0x98, hi: 0x98},
+ // Block 0x34, offset 0x116
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0xa0, hi: 0xa0},
+ {value: 0x8132, lo: 0xb5, hi: 0xbc},
+ {value: 0x812d, lo: 0xbf, hi: 0xbf},
+ // Block 0x35, offset 0x11a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ {value: 0x812d, lo: 0xb5, hi: 0xba},
+ {value: 0x8132, lo: 0xbb, hi: 0xbc},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x36, offset 0x11f
+ {value: 0x0000, lo: 0x08},
+ {value: 0x2d69, lo: 0x80, hi: 0x80},
+ {value: 0x2d71, lo: 0x81, hi: 0x81},
+ {value: 0xa000, lo: 0x82, hi: 0x82},
+ {value: 0x2d79, lo: 0x83, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xab, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xac},
+ {value: 0x8132, lo: 0xad, hi: 0xb3},
+ // Block 0x37, offset 0x128
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xaa, hi: 0xab},
+ // Block 0x38, offset 0x12a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xa6, hi: 0xa6},
+ {value: 0x8104, lo: 0xb2, hi: 0xb3},
+ // Block 0x39, offset 0x12d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x3a, offset 0x12f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x92},
+ {value: 0x8101, lo: 0x94, hi: 0x94},
+ {value: 0x812d, lo: 0x95, hi: 0x99},
+ {value: 0x8132, lo: 0x9a, hi: 0x9b},
+ {value: 0x812d, lo: 0x9c, hi: 0x9f},
+ {value: 0x8132, lo: 0xa0, hi: 0xa0},
+ {value: 0x8101, lo: 0xa2, hi: 0xa8},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ {value: 0x8132, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb8, hi: 0xb9},
+ // Block 0x3b, offset 0x13a
+ {value: 0x0004, lo: 0x03},
+ {value: 0x0433, lo: 0x80, hi: 0x81},
+ {value: 0x8100, lo: 0x97, hi: 0x97},
+ {value: 0x8100, lo: 0xbe, hi: 0xbe},
+ // Block 0x3c, offset 0x13e
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x8132, lo: 0x90, hi: 0x91},
+ {value: 0x8101, lo: 0x92, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x97},
+ {value: 0x8101, lo: 0x98, hi: 0x9a},
+ {value: 0x8132, lo: 0x9b, hi: 0x9c},
+ {value: 0x8132, lo: 0xa1, hi: 0xa1},
+ {value: 0x8101, lo: 0xa5, hi: 0xa6},
+ {value: 0x8132, lo: 0xa7, hi: 0xa7},
+ {value: 0x812d, lo: 0xa8, hi: 0xa8},
+ {value: 0x8132, lo: 0xa9, hi: 0xa9},
+ {value: 0x8101, lo: 0xaa, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xaf},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ // Block 0x3d, offset 0x14c
+ {value: 0x427e, lo: 0x02},
+ {value: 0x01b8, lo: 0xa6, hi: 0xa6},
+ {value: 0x0057, lo: 0xaa, hi: 0xab},
+ // Block 0x3e, offset 0x14f
+ {value: 0x0007, lo: 0x05},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ {value: 0x3bbc, lo: 0x9a, hi: 0x9b},
+ {value: 0x3bca, lo: 0xae, hi: 0xae},
+ // Block 0x3f, offset 0x155
+ {value: 0x000e, lo: 0x05},
+ {value: 0x3bd1, lo: 0x8d, hi: 0x8e},
+ {value: 0x3bd8, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ // Block 0x40, offset 0x15b
+ {value: 0x6405, lo: 0x0a},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0x3be6, lo: 0x84, hi: 0x84},
+ {value: 0xa000, lo: 0x88, hi: 0x88},
+ {value: 0x3bed, lo: 0x89, hi: 0x89},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0x3bf4, lo: 0x8c, hi: 0x8c},
+ {value: 0xa000, lo: 0xa3, hi: 0xa3},
+ {value: 0x3bfb, lo: 0xa4, hi: 0xa5},
+ {value: 0x3c02, lo: 0xa6, hi: 0xa6},
+ {value: 0xa000, lo: 0xbc, hi: 0xbc},
+ // Block 0x41, offset 0x166
+ {value: 0x0007, lo: 0x03},
+ {value: 0x3c6b, lo: 0xa0, hi: 0xa1},
+ {value: 0x3c95, lo: 0xa2, hi: 0xa3},
+ {value: 0x3cbf, lo: 0xaa, hi: 0xad},
+ // Block 0x42, offset 0x16a
+ {value: 0x0004, lo: 0x01},
+ {value: 0x048b, lo: 0xa9, hi: 0xaa},
+ // Block 0x43, offset 0x16c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x44e0, lo: 0x9c, hi: 0x9c},
+ // Block 0x44, offset 0x16e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xaf, hi: 0xb1},
+ // Block 0x45, offset 0x170
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x46, offset 0x172
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa0, hi: 0xbf},
+ // Block 0x47, offset 0x174
+ {value: 0x0000, lo: 0x05},
+ {value: 0x812c, lo: 0xaa, hi: 0xaa},
+ {value: 0x8131, lo: 0xab, hi: 0xab},
+ {value: 0x8133, lo: 0xac, hi: 0xac},
+ {value: 0x812e, lo: 0xad, hi: 0xad},
+ {value: 0x812f, lo: 0xae, hi: 0xaf},
+ // Block 0x48, offset 0x17a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x4aa2, lo: 0xb3, hi: 0xb3},
+ {value: 0x4aa2, lo: 0xb5, hi: 0xb6},
+ {value: 0x4aa2, lo: 0xba, hi: 0xbf},
+ // Block 0x49, offset 0x17e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x4aa2, lo: 0x8f, hi: 0xa3},
+ // Block 0x4a, offset 0x180
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0xae, hi: 0xbe},
+ // Block 0x4b, offset 0x182
+ {value: 0x0000, lo: 0x07},
+ {value: 0x8100, lo: 0x84, hi: 0x84},
+ {value: 0x8100, lo: 0x87, hi: 0x87},
+ {value: 0x8100, lo: 0x90, hi: 0x90},
+ {value: 0x8100, lo: 0x9e, hi: 0x9e},
+ {value: 0x8100, lo: 0xa1, hi: 0xa1},
+ {value: 0x8100, lo: 0xb2, hi: 0xb2},
+ {value: 0x8100, lo: 0xbb, hi: 0xbb},
+ // Block 0x4c, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8100, lo: 0x80, hi: 0x80},
+ {value: 0x8100, lo: 0x8b, hi: 0x8b},
+ {value: 0x8100, lo: 0x8e, hi: 0x8e},
+ // Block 0x4d, offset 0x18e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xaf, hi: 0xaf},
+ {value: 0x8132, lo: 0xb4, hi: 0xbd},
+ // Block 0x4e, offset 0x191
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x9e, hi: 0x9f},
+ // Block 0x4f, offset 0x193
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb1},
+ // Block 0x50, offset 0x195
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ // Block 0x51, offset 0x197
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xa0, hi: 0xb1},
+ // Block 0x52, offset 0x19a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xab, hi: 0xad},
+ // Block 0x53, offset 0x19c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x93, hi: 0x93},
+ // Block 0x54, offset 0x19e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb3, hi: 0xb3},
+ // Block 0x55, offset 0x1a0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ // Block 0x56, offset 0x1a2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb7, hi: 0xb8},
+ {value: 0x8132, lo: 0xbe, hi: 0xbf},
+ // Block 0x57, offset 0x1a8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ // Block 0x58, offset 0x1ab
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xad, hi: 0xad},
+ // Block 0x59, offset 0x1ad
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe500, lo: 0x80, hi: 0x80},
+ {value: 0xc600, lo: 0x81, hi: 0x9b},
+ {value: 0xe500, lo: 0x9c, hi: 0x9c},
+ {value: 0xc600, lo: 0x9d, hi: 0xb7},
+ {value: 0xe500, lo: 0xb8, hi: 0xb8},
+ {value: 0xc600, lo: 0xb9, hi: 0xbf},
+ // Block 0x5a, offset 0x1b4
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x93},
+ {value: 0xe500, lo: 0x94, hi: 0x94},
+ {value: 0xc600, lo: 0x95, hi: 0xaf},
+ {value: 0xe500, lo: 0xb0, hi: 0xb0},
+ {value: 0xc600, lo: 0xb1, hi: 0xbf},
+ // Block 0x5b, offset 0x1ba
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8b},
+ {value: 0xe500, lo: 0x8c, hi: 0x8c},
+ {value: 0xc600, lo: 0x8d, hi: 0xa7},
+ {value: 0xe500, lo: 0xa8, hi: 0xa8},
+ {value: 0xc600, lo: 0xa9, hi: 0xbf},
+ // Block 0x5c, offset 0x1c0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xc600, lo: 0x80, hi: 0x83},
+ {value: 0xe500, lo: 0x84, hi: 0x84},
+ {value: 0xc600, lo: 0x85, hi: 0x9f},
+ {value: 0xe500, lo: 0xa0, hi: 0xa0},
+ {value: 0xc600, lo: 0xa1, hi: 0xbb},
+ {value: 0xe500, lo: 0xbc, hi: 0xbc},
+ {value: 0xc600, lo: 0xbd, hi: 0xbf},
+ // Block 0x5d, offset 0x1c8
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x97},
+ {value: 0xe500, lo: 0x98, hi: 0x98},
+ {value: 0xc600, lo: 0x99, hi: 0xb3},
+ {value: 0xe500, lo: 0xb4, hi: 0xb4},
+ {value: 0xc600, lo: 0xb5, hi: 0xbf},
+ // Block 0x5e, offset 0x1ce
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8f},
+ {value: 0xe500, lo: 0x90, hi: 0x90},
+ {value: 0xc600, lo: 0x91, hi: 0xab},
+ {value: 0xe500, lo: 0xac, hi: 0xac},
+ {value: 0xc600, lo: 0xad, hi: 0xbf},
+ // Block 0x5f, offset 0x1d4
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ {value: 0xe500, lo: 0xa4, hi: 0xa4},
+ {value: 0xc600, lo: 0xa5, hi: 0xbf},
+ // Block 0x60, offset 0x1da
+ {value: 0x0000, lo: 0x03},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ // Block 0x61, offset 0x1de
+ {value: 0x0006, lo: 0x0d},
+ {value: 0x4393, lo: 0x9d, hi: 0x9d},
+ {value: 0x8115, lo: 0x9e, hi: 0x9e},
+ {value: 0x4405, lo: 0x9f, hi: 0x9f},
+ {value: 0x43f3, lo: 0xaa, hi: 0xab},
+ {value: 0x44f7, lo: 0xac, hi: 0xac},
+ {value: 0x44ff, lo: 0xad, hi: 0xad},
+ {value: 0x434b, lo: 0xae, hi: 0xb1},
+ {value: 0x4369, lo: 0xb2, hi: 0xb4},
+ {value: 0x4381, lo: 0xb5, hi: 0xb6},
+ {value: 0x438d, lo: 0xb8, hi: 0xb8},
+ {value: 0x4399, lo: 0xb9, hi: 0xbb},
+ {value: 0x43b1, lo: 0xbc, hi: 0xbc},
+ {value: 0x43b7, lo: 0xbe, hi: 0xbe},
+ // Block 0x62, offset 0x1ec
+ {value: 0x0006, lo: 0x08},
+ {value: 0x43bd, lo: 0x80, hi: 0x81},
+ {value: 0x43c9, lo: 0x83, hi: 0x84},
+ {value: 0x43db, lo: 0x86, hi: 0x89},
+ {value: 0x43ff, lo: 0x8a, hi: 0x8a},
+ {value: 0x437b, lo: 0x8b, hi: 0x8b},
+ {value: 0x4363, lo: 0x8c, hi: 0x8c},
+ {value: 0x43ab, lo: 0x8d, hi: 0x8d},
+ {value: 0x43d5, lo: 0x8e, hi: 0x8e},
+ // Block 0x63, offset 0x1f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8100, lo: 0xa4, hi: 0xa5},
+ {value: 0x8100, lo: 0xb0, hi: 0xb1},
+ // Block 0x64, offset 0x1f8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8100, lo: 0x9b, hi: 0x9d},
+ {value: 0x8200, lo: 0x9e, hi: 0xa3},
+ // Block 0x65, offset 0x1fb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x90, hi: 0x90},
+ // Block 0x66, offset 0x1fd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8100, lo: 0x99, hi: 0x99},
+ {value: 0x8200, lo: 0xb2, hi: 0xb4},
+ // Block 0x67, offset 0x200
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0xbc, hi: 0xbd},
+ // Block 0x68, offset 0x202
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8132, lo: 0xa0, hi: 0xa6},
+ {value: 0x812d, lo: 0xa7, hi: 0xad},
+ {value: 0x8132, lo: 0xae, hi: 0xaf},
+ // Block 0x69, offset 0x206
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8100, lo: 0x89, hi: 0x8c},
+ {value: 0x8100, lo: 0xb0, hi: 0xb2},
+ {value: 0x8100, lo: 0xb4, hi: 0xb4},
+ {value: 0x8100, lo: 0xb6, hi: 0xbf},
+ // Block 0x6a, offset 0x20b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x81, hi: 0x8c},
+ // Block 0x6b, offset 0x20d
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0xb5, hi: 0xba},
+ // Block 0x6c, offset 0x20f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x4aa2, lo: 0x9e, hi: 0x9f},
+ {value: 0x4aa2, lo: 0xa3, hi: 0xa3},
+ {value: 0x4aa2, lo: 0xa5, hi: 0xa6},
+ {value: 0x4aa2, lo: 0xaa, hi: 0xaf},
+ // Block 0x6d, offset 0x214
+ {value: 0x0000, lo: 0x05},
+ {value: 0x4aa2, lo: 0x82, hi: 0x87},
+ {value: 0x4aa2, lo: 0x8a, hi: 0x8f},
+ {value: 0x4aa2, lo: 0x92, hi: 0x97},
+ {value: 0x4aa2, lo: 0x9a, hi: 0x9c},
+ {value: 0x8100, lo: 0xa3, hi: 0xa3},
+ // Block 0x6e, offset 0x21a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x6f, offset 0x21c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xa0, hi: 0xa0},
+ // Block 0x70, offset 0x21e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb6, hi: 0xba},
+ // Block 0x71, offset 0x220
+ {value: 0x002c, lo: 0x05},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x8f, hi: 0x8f},
+ {value: 0x8132, lo: 0xb8, hi: 0xb8},
+ {value: 0x8101, lo: 0xb9, hi: 0xba},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x72, offset 0x226
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xa5, hi: 0xa5},
+ {value: 0x812d, lo: 0xa6, hi: 0xa6},
+ // Block 0x73, offset 0x229
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa4, hi: 0xa7},
+ // Block 0x74, offset 0x22b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x8132, lo: 0x88, hi: 0x8a},
+ {value: 0x812d, lo: 0x8b, hi: 0x8b},
+ {value: 0x8132, lo: 0x8c, hi: 0x8c},
+ {value: 0x812d, lo: 0x8d, hi: 0x90},
+ // Block 0x75, offset 0x231
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x76, offset 0x234
+ {value: 0x17fe, lo: 0x07},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x423b, lo: 0x9a, hi: 0x9a},
+ {value: 0xa000, lo: 0x9b, hi: 0x9b},
+ {value: 0x4245, lo: 0x9c, hi: 0x9c},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x424f, lo: 0xab, hi: 0xab},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x77, offset 0x23c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x8132, lo: 0x80, hi: 0x82},
+ {value: 0x9900, lo: 0xa7, hi: 0xa7},
+ {value: 0x2d81, lo: 0xae, hi: 0xae},
+ {value: 0x2d8b, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb1, hi: 0xb2},
+ {value: 0x8104, lo: 0xb3, hi: 0xb4},
+ // Block 0x78, offset 0x243
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0x79, offset 0x246
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb5, hi: 0xb5},
+ {value: 0x8102, lo: 0xb6, hi: 0xb6},
+ // Block 0x7a, offset 0x249
+ {value: 0x0002, lo: 0x01},
+ {value: 0x8102, lo: 0xa9, hi: 0xaa},
+ // Block 0x7b, offset 0x24b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x7c, offset 0x24e
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2d95, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d9f, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x8132, lo: 0xa6, hi: 0xac},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ // Block 0x7d, offset 0x256
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x86, hi: 0x86},
+ {value: 0x8132, lo: 0x9e, hi: 0x9e},
+ // Block 0x7e, offset 0x25a
+ {value: 0x6b57, lo: 0x06},
+ {value: 0x9900, lo: 0xb0, hi: 0xb0},
+ {value: 0xa000, lo: 0xb9, hi: 0xb9},
+ {value: 0x9900, lo: 0xba, hi: 0xba},
+ {value: 0x2db3, lo: 0xbb, hi: 0xbb},
+ {value: 0x2da9, lo: 0xbc, hi: 0xbd},
+ {value: 0x2dbd, lo: 0xbe, hi: 0xbe},
+ // Block 0x7f, offset 0x261
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x83, hi: 0x83},
+ // Block 0x80, offset 0x264
+ {value: 0x0000, lo: 0x05},
+ {value: 0x9900, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb8, hi: 0xb9},
+ {value: 0x2dc7, lo: 0xba, hi: 0xba},
+ {value: 0x2dd1, lo: 0xbb, hi: 0xbb},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x81, offset 0x26a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0x80, hi: 0x80},
+ // Block 0x82, offset 0x26c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x83, offset 0x26f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xab, hi: 0xab},
+ // Block 0x84, offset 0x271
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb9, hi: 0xb9},
+ {value: 0x8102, lo: 0xba, hi: 0xba},
+ // Block 0x85, offset 0x274
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xa0, hi: 0xa0},
+ // Block 0x86, offset 0x276
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x87, offset 0x278
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x87, hi: 0x87},
+ // Block 0x88, offset 0x27a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x99, hi: 0x99},
+ // Block 0x89, offset 0x27c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0x82, hi: 0x82},
+ {value: 0x8104, lo: 0x84, hi: 0x85},
+ // Block 0x8a, offset 0x27f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x97, hi: 0x97},
+ // Block 0x8b, offset 0x281
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0xb0, hi: 0xb4},
+ // Block 0x8c, offset 0x283
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb6},
+ // Block 0x8d, offset 0x285
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0x9e, hi: 0x9e},
+ // Block 0x8e, offset 0x287
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x45cf, lo: 0x9e, hi: 0x9e},
+ {value: 0x45d9, lo: 0x9f, hi: 0x9f},
+ {value: 0x460d, lo: 0xa0, hi: 0xa0},
+ {value: 0x461b, lo: 0xa1, hi: 0xa1},
+ {value: 0x4629, lo: 0xa2, hi: 0xa2},
+ {value: 0x4637, lo: 0xa3, hi: 0xa3},
+ {value: 0x4645, lo: 0xa4, hi: 0xa4},
+ {value: 0x812b, lo: 0xa5, hi: 0xa6},
+ {value: 0x8101, lo: 0xa7, hi: 0xa9},
+ {value: 0x8130, lo: 0xad, hi: 0xad},
+ {value: 0x812b, lo: 0xae, hi: 0xb2},
+ {value: 0x812d, lo: 0xbb, hi: 0xbf},
+ // Block 0x8f, offset 0x294
+ {value: 0x0000, lo: 0x09},
+ {value: 0x812d, lo: 0x80, hi: 0x82},
+ {value: 0x8132, lo: 0x85, hi: 0x89},
+ {value: 0x812d, lo: 0x8a, hi: 0x8b},
+ {value: 0x8132, lo: 0xaa, hi: 0xad},
+ {value: 0x45e3, lo: 0xbb, hi: 0xbb},
+ {value: 0x45ed, lo: 0xbc, hi: 0xbc},
+ {value: 0x4653, lo: 0xbd, hi: 0xbd},
+ {value: 0x466f, lo: 0xbe, hi: 0xbe},
+ {value: 0x4661, lo: 0xbf, hi: 0xbf},
+ // Block 0x90, offset 0x29e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x467d, lo: 0x80, hi: 0x80},
+ // Block 0x91, offset 0x2a0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x82, hi: 0x84},
+ // Block 0x92, offset 0x2a2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0x80, hi: 0x86},
+ {value: 0x8132, lo: 0x88, hi: 0x98},
+ {value: 0x8132, lo: 0x9b, hi: 0xa1},
+ {value: 0x8132, lo: 0xa3, hi: 0xa4},
+ {value: 0x8132, lo: 0xa6, hi: 0xaa},
+ // Block 0x93, offset 0x2a8
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xac, hi: 0xaf},
+ // Block 0x94, offset 0x2aa
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x90, hi: 0x96},
+ // Block 0x95, offset 0x2ac
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x84, hi: 0x89},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0x96, offset 0x2af
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8100, lo: 0x93, hi: 0x93},
+}
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfkcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfkcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfkcValues[c0]
+ }
+ i := nfkcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return nfkcValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := nfkcIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = nfkcIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = nfkcIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return nfkcValues[c0]
+ }
+ i := nfkcIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = nfkcIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// nfkcTrie. Total size: 18684 bytes (18.25 KiB). Checksum: 113e23c477adfabd.
+type nfkcTrie struct{}
+
+func newNfkcTrie(i int) *nfkcTrie {
+ return &nfkcTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 92:
+ return uint16(nfkcValues[n<<6+uint32(b)])
+ default:
+ n -= 92
+ return uint16(nfkcSparse.lookup(n, b))
+ }
+}
+
+// nfkcValues: 94 blocks, 6016 entries, 12032 bytes
+// The third block is the zero block.
+var nfkcValues = [6016]uint16{
+ // Block 0x0, offset 0x0
+ 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000,
+ // Block 0x1, offset 0x40
+ 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000,
+ 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000,
+ 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000,
+ 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000,
+ 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000,
+ 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000,
+ 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000,
+ 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000,
+ 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000,
+ 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x2f72, 0xc1: 0x2f77, 0xc2: 0x468b, 0xc3: 0x2f7c, 0xc4: 0x469a, 0xc5: 0x469f,
+ 0xc6: 0xa000, 0xc7: 0x46a9, 0xc8: 0x2fe5, 0xc9: 0x2fea, 0xca: 0x46ae, 0xcb: 0x2ffe,
+ 0xcc: 0x3071, 0xcd: 0x3076, 0xce: 0x307b, 0xcf: 0x46c2, 0xd1: 0x3107,
+ 0xd2: 0x312a, 0xd3: 0x312f, 0xd4: 0x46cc, 0xd5: 0x46d1, 0xd6: 0x46e0,
+ 0xd8: 0xa000, 0xd9: 0x31b6, 0xda: 0x31bb, 0xdb: 0x31c0, 0xdc: 0x4712, 0xdd: 0x3238,
+ 0xe0: 0x327e, 0xe1: 0x3283, 0xe2: 0x471c, 0xe3: 0x3288,
+ 0xe4: 0x472b, 0xe5: 0x4730, 0xe6: 0xa000, 0xe7: 0x473a, 0xe8: 0x32f1, 0xe9: 0x32f6,
+ 0xea: 0x473f, 0xeb: 0x330a, 0xec: 0x3382, 0xed: 0x3387, 0xee: 0x338c, 0xef: 0x4753,
+ 0xf1: 0x3418, 0xf2: 0x343b, 0xf3: 0x3440, 0xf4: 0x475d, 0xf5: 0x4762,
+ 0xf6: 0x4771, 0xf8: 0xa000, 0xf9: 0x34cc, 0xfa: 0x34d1, 0xfb: 0x34d6,
+ 0xfc: 0x47a3, 0xfd: 0x3553, 0xff: 0x356c,
+ // Block 0x4, offset 0x100
+ 0x100: 0x2f81, 0x101: 0x328d, 0x102: 0x4690, 0x103: 0x4721, 0x104: 0x2f9f, 0x105: 0x32ab,
+ 0x106: 0x2fb3, 0x107: 0x32bf, 0x108: 0x2fb8, 0x109: 0x32c4, 0x10a: 0x2fbd, 0x10b: 0x32c9,
+ 0x10c: 0x2fc2, 0x10d: 0x32ce, 0x10e: 0x2fcc, 0x10f: 0x32d8,
+ 0x112: 0x46b3, 0x113: 0x4744, 0x114: 0x2ff4, 0x115: 0x3300, 0x116: 0x2ff9, 0x117: 0x3305,
+ 0x118: 0x3017, 0x119: 0x3323, 0x11a: 0x3008, 0x11b: 0x3314, 0x11c: 0x3030, 0x11d: 0x333c,
+ 0x11e: 0x303a, 0x11f: 0x3346, 0x120: 0x303f, 0x121: 0x334b, 0x122: 0x3049, 0x123: 0x3355,
+ 0x124: 0x304e, 0x125: 0x335a, 0x128: 0x3080, 0x129: 0x3391,
+ 0x12a: 0x3085, 0x12b: 0x3396, 0x12c: 0x308a, 0x12d: 0x339b, 0x12e: 0x30ad, 0x12f: 0x33b9,
+ 0x130: 0x308f, 0x132: 0x195d, 0x133: 0x19ea, 0x134: 0x30b7, 0x135: 0x33c3,
+ 0x136: 0x30cb, 0x137: 0x33dc, 0x139: 0x30d5, 0x13a: 0x33e6, 0x13b: 0x30df,
+ 0x13c: 0x33f0, 0x13d: 0x30da, 0x13e: 0x33eb, 0x13f: 0x1baf,
+ // Block 0x5, offset 0x140
+ 0x140: 0x1c37, 0x143: 0x3102, 0x144: 0x3413, 0x145: 0x311b,
+ 0x146: 0x342c, 0x147: 0x3111, 0x148: 0x3422, 0x149: 0x1c5f,
+ 0x14c: 0x46d6, 0x14d: 0x4767, 0x14e: 0x3134, 0x14f: 0x3445, 0x150: 0x313e, 0x151: 0x344f,
+ 0x154: 0x315c, 0x155: 0x346d, 0x156: 0x3175, 0x157: 0x3486,
+ 0x158: 0x3166, 0x159: 0x3477, 0x15a: 0x46f9, 0x15b: 0x478a, 0x15c: 0x317f, 0x15d: 0x3490,
+ 0x15e: 0x318e, 0x15f: 0x349f, 0x160: 0x46fe, 0x161: 0x478f, 0x162: 0x31a7, 0x163: 0x34bd,
+ 0x164: 0x3198, 0x165: 0x34ae, 0x168: 0x4708, 0x169: 0x4799,
+ 0x16a: 0x470d, 0x16b: 0x479e, 0x16c: 0x31c5, 0x16d: 0x34db, 0x16e: 0x31cf, 0x16f: 0x34e5,
+ 0x170: 0x31d4, 0x171: 0x34ea, 0x172: 0x31f2, 0x173: 0x3508, 0x174: 0x3215, 0x175: 0x352b,
+ 0x176: 0x323d, 0x177: 0x3558, 0x178: 0x3251, 0x179: 0x3260, 0x17a: 0x3580, 0x17b: 0x326a,
+ 0x17c: 0x358a, 0x17d: 0x326f, 0x17e: 0x358f, 0x17f: 0x00a7,
+ // Block 0x6, offset 0x180
+ 0x184: 0x2df1, 0x185: 0x2df7,
+ 0x186: 0x2dfd, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a0b, 0x18a: 0x198a, 0x18b: 0x198d,
+ 0x18c: 0x1a41, 0x18d: 0x2f8b, 0x18e: 0x3297, 0x18f: 0x3099, 0x190: 0x33a5, 0x191: 0x3143,
+ 0x192: 0x3454, 0x193: 0x31d9, 0x194: 0x34ef, 0x195: 0x39d2, 0x196: 0x3b61, 0x197: 0x39cb,
+ 0x198: 0x3b5a, 0x199: 0x39d9, 0x19a: 0x3b68, 0x19b: 0x39c4, 0x19c: 0x3b53,
+ 0x19e: 0x38b3, 0x19f: 0x3a42, 0x1a0: 0x38ac, 0x1a1: 0x3a3b, 0x1a2: 0x35b6, 0x1a3: 0x35c8,
+ 0x1a6: 0x3044, 0x1a7: 0x3350, 0x1a8: 0x30c1, 0x1a9: 0x33d2,
+ 0x1aa: 0x46ef, 0x1ab: 0x4780, 0x1ac: 0x3993, 0x1ad: 0x3b22, 0x1ae: 0x35da, 0x1af: 0x35e0,
+ 0x1b0: 0x33c8, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19d2, 0x1b4: 0x302b, 0x1b5: 0x3337,
+ 0x1b8: 0x30fd, 0x1b9: 0x340e, 0x1ba: 0x38ba, 0x1bb: 0x3a49,
+ 0x1bc: 0x35b0, 0x1bd: 0x35c2, 0x1be: 0x35bc, 0x1bf: 0x35ce,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x2f90, 0x1c1: 0x329c, 0x1c2: 0x2f95, 0x1c3: 0x32a1, 0x1c4: 0x300d, 0x1c5: 0x3319,
+ 0x1c6: 0x3012, 0x1c7: 0x331e, 0x1c8: 0x309e, 0x1c9: 0x33aa, 0x1ca: 0x30a3, 0x1cb: 0x33af,
+ 0x1cc: 0x3148, 0x1cd: 0x3459, 0x1ce: 0x314d, 0x1cf: 0x345e, 0x1d0: 0x316b, 0x1d1: 0x347c,
+ 0x1d2: 0x3170, 0x1d3: 0x3481, 0x1d4: 0x31de, 0x1d5: 0x34f4, 0x1d6: 0x31e3, 0x1d7: 0x34f9,
+ 0x1d8: 0x3189, 0x1d9: 0x349a, 0x1da: 0x31a2, 0x1db: 0x34b8,
+ 0x1de: 0x305d, 0x1df: 0x3369,
+ 0x1e6: 0x4695, 0x1e7: 0x4726, 0x1e8: 0x46bd, 0x1e9: 0x474e,
+ 0x1ea: 0x3962, 0x1eb: 0x3af1, 0x1ec: 0x393f, 0x1ed: 0x3ace, 0x1ee: 0x46db, 0x1ef: 0x476c,
+ 0x1f0: 0x395b, 0x1f1: 0x3aea, 0x1f2: 0x3247, 0x1f3: 0x3562,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132,
+ 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932,
+ 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932,
+ 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d,
+ 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d,
+ 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d,
+ 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d,
+ 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d,
+ 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101,
+ 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d,
+ 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132,
+ // Block 0x9, offset 0x240
+ 0x240: 0x49b1, 0x241: 0x49b6, 0x242: 0x9932, 0x243: 0x49bb, 0x244: 0x4a74, 0x245: 0x9936,
+ 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132,
+ 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132,
+ 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132,
+ 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135,
+ 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132,
+ 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132,
+ 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132,
+ 0x274: 0x0170,
+ 0x27a: 0x42a8,
+ 0x27e: 0x0037,
+ // Block 0xa, offset 0x280
+ 0x284: 0x425d, 0x285: 0x447e,
+ 0x286: 0x35ec, 0x287: 0x00ce, 0x288: 0x360a, 0x289: 0x3616, 0x28a: 0x3628,
+ 0x28c: 0x3646, 0x28e: 0x3658, 0x28f: 0x3676, 0x290: 0x3e0b, 0x291: 0xa000,
+ 0x295: 0xa000, 0x297: 0xa000,
+ 0x299: 0xa000,
+ 0x29f: 0xa000, 0x2a1: 0xa000,
+ 0x2a5: 0xa000, 0x2a9: 0xa000,
+ 0x2aa: 0x363a, 0x2ab: 0x366a, 0x2ac: 0x4801, 0x2ad: 0x369a, 0x2ae: 0x482b, 0x2af: 0x36ac,
+ 0x2b0: 0x3e73, 0x2b1: 0xa000, 0x2b5: 0xa000,
+ 0x2b7: 0xa000, 0x2b9: 0xa000,
+ 0x2bf: 0xa000,
+ // Block 0xb, offset 0x2c0
+ 0x2c1: 0xa000, 0x2c5: 0xa000,
+ 0x2c9: 0xa000, 0x2ca: 0x4843, 0x2cb: 0x4861,
+ 0x2cc: 0x36ca, 0x2cd: 0x36e2, 0x2ce: 0x4879, 0x2d0: 0x01be, 0x2d1: 0x01d0,
+ 0x2d2: 0x01ac, 0x2d3: 0x430f, 0x2d4: 0x4315, 0x2d5: 0x01fa, 0x2d6: 0x01e8,
+ 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7,
+ 0x2f9: 0x01a6,
+ // Block 0xc, offset 0x300
+ 0x300: 0x3724, 0x301: 0x3730, 0x303: 0x371e,
+ 0x306: 0xa000, 0x307: 0x370c,
+ 0x30c: 0x3760, 0x30d: 0x3748, 0x30e: 0x3772, 0x310: 0xa000,
+ 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000,
+ 0x318: 0xa000, 0x319: 0x3754, 0x31a: 0xa000,
+ 0x31e: 0xa000, 0x323: 0xa000,
+ 0x327: 0xa000,
+ 0x32b: 0xa000, 0x32d: 0xa000,
+ 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000,
+ 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d8, 0x33a: 0xa000,
+ 0x33e: 0xa000,
+ // Block 0xd, offset 0x340
+ 0x341: 0x3736, 0x342: 0x37ba,
+ 0x350: 0x3712, 0x351: 0x3796,
+ 0x352: 0x3718, 0x353: 0x379c, 0x356: 0x372a, 0x357: 0x37ae,
+ 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x382c, 0x35b: 0x3832, 0x35c: 0x373c, 0x35d: 0x37c0,
+ 0x35e: 0x3742, 0x35f: 0x37c6, 0x362: 0x374e, 0x363: 0x37d2,
+ 0x364: 0x375a, 0x365: 0x37de, 0x366: 0x3766, 0x367: 0x37ea, 0x368: 0xa000, 0x369: 0xa000,
+ 0x36a: 0x3838, 0x36b: 0x383e, 0x36c: 0x3790, 0x36d: 0x3814, 0x36e: 0x376c, 0x36f: 0x37f0,
+ 0x370: 0x3778, 0x371: 0x37fc, 0x372: 0x377e, 0x373: 0x3802, 0x374: 0x3784, 0x375: 0x3808,
+ 0x378: 0x378a, 0x379: 0x380e,
+ // Block 0xe, offset 0x380
+ 0x387: 0x1d64,
+ 0x391: 0x812d,
+ 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132,
+ 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132,
+ 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d,
+ 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132,
+ 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132,
+ 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a,
+ 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f,
+ 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112,
+ // Block 0xf, offset 0x3c0
+ 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116,
+ 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c,
+ 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132,
+ 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132,
+ 0x3de: 0x8132, 0x3df: 0x812d,
+ 0x3f0: 0x811e, 0x3f5: 0x1d87,
+ 0x3f6: 0x2016, 0x3f7: 0x2052, 0x3f8: 0x204d,
+ // Block 0x10, offset 0x400
+ 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132,
+ 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132,
+ 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d,
+ 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d,
+ 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d,
+ 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132,
+ 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132,
+ 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132,
+ // Block 0x11, offset 0x440
+ 0x445: 0xa000,
+ 0x446: 0x2d29, 0x447: 0xa000, 0x448: 0x2d31, 0x449: 0xa000, 0x44a: 0x2d39, 0x44b: 0xa000,
+ 0x44c: 0x2d41, 0x44d: 0xa000, 0x44e: 0x2d49, 0x451: 0xa000,
+ 0x452: 0x2d51,
+ 0x474: 0x8102, 0x475: 0x9900,
+ 0x47a: 0xa000, 0x47b: 0x2d59,
+ 0x47c: 0xa000, 0x47d: 0x2d61, 0x47e: 0xa000, 0x47f: 0xa000,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8,
+ 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107,
+ 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0,
+ 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9,
+ 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be,
+ 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5,
+ 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa,
+ 0x4aa: 0x01fd,
+ 0x4b8: 0x020c,
+ // Block 0x13, offset 0x4c0
+ 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101,
+ 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116,
+ 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128,
+ 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137,
+ 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec,
+ 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5,
+ 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0,
+ // Block 0x14, offset 0x500
+ 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132,
+ 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132,
+ 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132,
+ 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132,
+ 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132,
+ 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132,
+ 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132,
+ 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132,
+ 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132,
+ 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132,
+ 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d,
+ // Block 0x15, offset 0x540
+ 0x540: 0x2f9a, 0x541: 0x32a6, 0x542: 0x2fa4, 0x543: 0x32b0, 0x544: 0x2fa9, 0x545: 0x32b5,
+ 0x546: 0x2fae, 0x547: 0x32ba, 0x548: 0x38cf, 0x549: 0x3a5e, 0x54a: 0x2fc7, 0x54b: 0x32d3,
+ 0x54c: 0x2fd1, 0x54d: 0x32dd, 0x54e: 0x2fe0, 0x54f: 0x32ec, 0x550: 0x2fd6, 0x551: 0x32e2,
+ 0x552: 0x2fdb, 0x553: 0x32e7, 0x554: 0x38f2, 0x555: 0x3a81, 0x556: 0x38f9, 0x557: 0x3a88,
+ 0x558: 0x301c, 0x559: 0x3328, 0x55a: 0x3021, 0x55b: 0x332d, 0x55c: 0x3907, 0x55d: 0x3a96,
+ 0x55e: 0x3026, 0x55f: 0x3332, 0x560: 0x3035, 0x561: 0x3341, 0x562: 0x3053, 0x563: 0x335f,
+ 0x564: 0x3062, 0x565: 0x336e, 0x566: 0x3058, 0x567: 0x3364, 0x568: 0x3067, 0x569: 0x3373,
+ 0x56a: 0x306c, 0x56b: 0x3378, 0x56c: 0x30b2, 0x56d: 0x33be, 0x56e: 0x390e, 0x56f: 0x3a9d,
+ 0x570: 0x30bc, 0x571: 0x33cd, 0x572: 0x30c6, 0x573: 0x33d7, 0x574: 0x30d0, 0x575: 0x33e1,
+ 0x576: 0x46c7, 0x577: 0x4758, 0x578: 0x3915, 0x579: 0x3aa4, 0x57a: 0x30e9, 0x57b: 0x33fa,
+ 0x57c: 0x30e4, 0x57d: 0x33f5, 0x57e: 0x30ee, 0x57f: 0x33ff,
+ // Block 0x16, offset 0x580
+ 0x580: 0x30f3, 0x581: 0x3404, 0x582: 0x30f8, 0x583: 0x3409, 0x584: 0x310c, 0x585: 0x341d,
+ 0x586: 0x3116, 0x587: 0x3427, 0x588: 0x3125, 0x589: 0x3436, 0x58a: 0x3120, 0x58b: 0x3431,
+ 0x58c: 0x3938, 0x58d: 0x3ac7, 0x58e: 0x3946, 0x58f: 0x3ad5, 0x590: 0x394d, 0x591: 0x3adc,
+ 0x592: 0x3954, 0x593: 0x3ae3, 0x594: 0x3152, 0x595: 0x3463, 0x596: 0x3157, 0x597: 0x3468,
+ 0x598: 0x3161, 0x599: 0x3472, 0x59a: 0x46f4, 0x59b: 0x4785, 0x59c: 0x399a, 0x59d: 0x3b29,
+ 0x59e: 0x317a, 0x59f: 0x348b, 0x5a0: 0x3184, 0x5a1: 0x3495, 0x5a2: 0x4703, 0x5a3: 0x4794,
+ 0x5a4: 0x39a1, 0x5a5: 0x3b30, 0x5a6: 0x39a8, 0x5a7: 0x3b37, 0x5a8: 0x39af, 0x5a9: 0x3b3e,
+ 0x5aa: 0x3193, 0x5ab: 0x34a4, 0x5ac: 0x319d, 0x5ad: 0x34b3, 0x5ae: 0x31b1, 0x5af: 0x34c7,
+ 0x5b0: 0x31ac, 0x5b1: 0x34c2, 0x5b2: 0x31ed, 0x5b3: 0x3503, 0x5b4: 0x31fc, 0x5b5: 0x3512,
+ 0x5b6: 0x31f7, 0x5b7: 0x350d, 0x5b8: 0x39b6, 0x5b9: 0x3b45, 0x5ba: 0x39bd, 0x5bb: 0x3b4c,
+ 0x5bc: 0x3201, 0x5bd: 0x3517, 0x5be: 0x3206, 0x5bf: 0x351c,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x320b, 0x5c1: 0x3521, 0x5c2: 0x3210, 0x5c3: 0x3526, 0x5c4: 0x321f, 0x5c5: 0x3535,
+ 0x5c6: 0x321a, 0x5c7: 0x3530, 0x5c8: 0x3224, 0x5c9: 0x353f, 0x5ca: 0x3229, 0x5cb: 0x3544,
+ 0x5cc: 0x322e, 0x5cd: 0x3549, 0x5ce: 0x324c, 0x5cf: 0x3567, 0x5d0: 0x3265, 0x5d1: 0x3585,
+ 0x5d2: 0x3274, 0x5d3: 0x3594, 0x5d4: 0x3279, 0x5d5: 0x3599, 0x5d6: 0x337d, 0x5d7: 0x34a9,
+ 0x5d8: 0x353a, 0x5d9: 0x3576, 0x5da: 0x1be3, 0x5db: 0x42da,
+ 0x5e0: 0x46a4, 0x5e1: 0x4735, 0x5e2: 0x2f86, 0x5e3: 0x3292,
+ 0x5e4: 0x387b, 0x5e5: 0x3a0a, 0x5e6: 0x3874, 0x5e7: 0x3a03, 0x5e8: 0x3889, 0x5e9: 0x3a18,
+ 0x5ea: 0x3882, 0x5eb: 0x3a11, 0x5ec: 0x38c1, 0x5ed: 0x3a50, 0x5ee: 0x3897, 0x5ef: 0x3a26,
+ 0x5f0: 0x3890, 0x5f1: 0x3a1f, 0x5f2: 0x38a5, 0x5f3: 0x3a34, 0x5f4: 0x389e, 0x5f5: 0x3a2d,
+ 0x5f6: 0x38c8, 0x5f7: 0x3a57, 0x5f8: 0x46b8, 0x5f9: 0x4749, 0x5fa: 0x3003, 0x5fb: 0x330f,
+ 0x5fc: 0x2fef, 0x5fd: 0x32fb, 0x5fe: 0x38dd, 0x5ff: 0x3a6c,
+ // Block 0x18, offset 0x600
+ 0x600: 0x38d6, 0x601: 0x3a65, 0x602: 0x38eb, 0x603: 0x3a7a, 0x604: 0x38e4, 0x605: 0x3a73,
+ 0x606: 0x3900, 0x607: 0x3a8f, 0x608: 0x3094, 0x609: 0x33a0, 0x60a: 0x30a8, 0x60b: 0x33b4,
+ 0x60c: 0x46ea, 0x60d: 0x477b, 0x60e: 0x3139, 0x60f: 0x344a, 0x610: 0x3923, 0x611: 0x3ab2,
+ 0x612: 0x391c, 0x613: 0x3aab, 0x614: 0x3931, 0x615: 0x3ac0, 0x616: 0x392a, 0x617: 0x3ab9,
+ 0x618: 0x398c, 0x619: 0x3b1b, 0x61a: 0x3970, 0x61b: 0x3aff, 0x61c: 0x3969, 0x61d: 0x3af8,
+ 0x61e: 0x397e, 0x61f: 0x3b0d, 0x620: 0x3977, 0x621: 0x3b06, 0x622: 0x3985, 0x623: 0x3b14,
+ 0x624: 0x31e8, 0x625: 0x34fe, 0x626: 0x31ca, 0x627: 0x34e0, 0x628: 0x39e7, 0x629: 0x3b76,
+ 0x62a: 0x39e0, 0x62b: 0x3b6f, 0x62c: 0x39f5, 0x62d: 0x3b84, 0x62e: 0x39ee, 0x62f: 0x3b7d,
+ 0x630: 0x39fc, 0x631: 0x3b8b, 0x632: 0x3233, 0x633: 0x354e, 0x634: 0x325b, 0x635: 0x357b,
+ 0x636: 0x3256, 0x637: 0x3571, 0x638: 0x3242, 0x639: 0x355d,
+ // Block 0x19, offset 0x640
+ 0x640: 0x4807, 0x641: 0x480d, 0x642: 0x4921, 0x643: 0x4939, 0x644: 0x4929, 0x645: 0x4941,
+ 0x646: 0x4931, 0x647: 0x4949, 0x648: 0x47ad, 0x649: 0x47b3, 0x64a: 0x4891, 0x64b: 0x48a9,
+ 0x64c: 0x4899, 0x64d: 0x48b1, 0x64e: 0x48a1, 0x64f: 0x48b9, 0x650: 0x4819, 0x651: 0x481f,
+ 0x652: 0x3dbb, 0x653: 0x3dcb, 0x654: 0x3dc3, 0x655: 0x3dd3,
+ 0x658: 0x47b9, 0x659: 0x47bf, 0x65a: 0x3ceb, 0x65b: 0x3cfb, 0x65c: 0x3cf3, 0x65d: 0x3d03,
+ 0x660: 0x4831, 0x661: 0x4837, 0x662: 0x4951, 0x663: 0x4969,
+ 0x664: 0x4959, 0x665: 0x4971, 0x666: 0x4961, 0x667: 0x4979, 0x668: 0x47c5, 0x669: 0x47cb,
+ 0x66a: 0x48c1, 0x66b: 0x48d9, 0x66c: 0x48c9, 0x66d: 0x48e1, 0x66e: 0x48d1, 0x66f: 0x48e9,
+ 0x670: 0x4849, 0x671: 0x484f, 0x672: 0x3e1b, 0x673: 0x3e33, 0x674: 0x3e23, 0x675: 0x3e3b,
+ 0x676: 0x3e2b, 0x677: 0x3e43, 0x678: 0x47d1, 0x679: 0x47d7, 0x67a: 0x3d1b, 0x67b: 0x3d33,
+ 0x67c: 0x3d23, 0x67d: 0x3d3b, 0x67e: 0x3d2b, 0x67f: 0x3d43,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x4855, 0x681: 0x485b, 0x682: 0x3e4b, 0x683: 0x3e5b, 0x684: 0x3e53, 0x685: 0x3e63,
+ 0x688: 0x47dd, 0x689: 0x47e3, 0x68a: 0x3d4b, 0x68b: 0x3d5b,
+ 0x68c: 0x3d53, 0x68d: 0x3d63, 0x690: 0x4867, 0x691: 0x486d,
+ 0x692: 0x3e83, 0x693: 0x3e9b, 0x694: 0x3e8b, 0x695: 0x3ea3, 0x696: 0x3e93, 0x697: 0x3eab,
+ 0x699: 0x47e9, 0x69b: 0x3d6b, 0x69d: 0x3d73,
+ 0x69f: 0x3d7b, 0x6a0: 0x487f, 0x6a1: 0x4885, 0x6a2: 0x4981, 0x6a3: 0x4999,
+ 0x6a4: 0x4989, 0x6a5: 0x49a1, 0x6a6: 0x4991, 0x6a7: 0x49a9, 0x6a8: 0x47ef, 0x6a9: 0x47f5,
+ 0x6aa: 0x48f1, 0x6ab: 0x4909, 0x6ac: 0x48f9, 0x6ad: 0x4911, 0x6ae: 0x4901, 0x6af: 0x4919,
+ 0x6b0: 0x47fb, 0x6b1: 0x4321, 0x6b2: 0x3694, 0x6b3: 0x4327, 0x6b4: 0x4825, 0x6b5: 0x432d,
+ 0x6b6: 0x36a6, 0x6b7: 0x4333, 0x6b8: 0x36c4, 0x6b9: 0x4339, 0x6ba: 0x36dc, 0x6bb: 0x433f,
+ 0x6bc: 0x4873, 0x6bd: 0x4345,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3da3, 0x6c1: 0x3dab, 0x6c2: 0x4187, 0x6c3: 0x41a5, 0x6c4: 0x4191, 0x6c5: 0x41af,
+ 0x6c6: 0x419b, 0x6c7: 0x41b9, 0x6c8: 0x3cdb, 0x6c9: 0x3ce3, 0x6ca: 0x40d3, 0x6cb: 0x40f1,
+ 0x6cc: 0x40dd, 0x6cd: 0x40fb, 0x6ce: 0x40e7, 0x6cf: 0x4105, 0x6d0: 0x3deb, 0x6d1: 0x3df3,
+ 0x6d2: 0x41c3, 0x6d3: 0x41e1, 0x6d4: 0x41cd, 0x6d5: 0x41eb, 0x6d6: 0x41d7, 0x6d7: 0x41f5,
+ 0x6d8: 0x3d0b, 0x6d9: 0x3d13, 0x6da: 0x410f, 0x6db: 0x412d, 0x6dc: 0x4119, 0x6dd: 0x4137,
+ 0x6de: 0x4123, 0x6df: 0x4141, 0x6e0: 0x3ec3, 0x6e1: 0x3ecb, 0x6e2: 0x41ff, 0x6e3: 0x421d,
+ 0x6e4: 0x4209, 0x6e5: 0x4227, 0x6e6: 0x4213, 0x6e7: 0x4231, 0x6e8: 0x3d83, 0x6e9: 0x3d8b,
+ 0x6ea: 0x414b, 0x6eb: 0x4169, 0x6ec: 0x4155, 0x6ed: 0x4173, 0x6ee: 0x415f, 0x6ef: 0x417d,
+ 0x6f0: 0x3688, 0x6f1: 0x3682, 0x6f2: 0x3d93, 0x6f3: 0x368e, 0x6f4: 0x3d9b,
+ 0x6f6: 0x4813, 0x6f7: 0x3db3, 0x6f8: 0x35f8, 0x6f9: 0x35f2, 0x6fa: 0x35e6, 0x6fb: 0x42f1,
+ 0x6fc: 0x35fe, 0x6fd: 0x428a, 0x6fe: 0x01d3, 0x6ff: 0x428a,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x42a3, 0x701: 0x4485, 0x702: 0x3ddb, 0x703: 0x36a0, 0x704: 0x3de3,
+ 0x706: 0x483d, 0x707: 0x3dfb, 0x708: 0x3604, 0x709: 0x42f7, 0x70a: 0x3610, 0x70b: 0x42fd,
+ 0x70c: 0x361c, 0x70d: 0x448c, 0x70e: 0x4493, 0x70f: 0x449a, 0x710: 0x36b8, 0x711: 0x36b2,
+ 0x712: 0x3e03, 0x713: 0x44e7, 0x716: 0x36be, 0x717: 0x3e13,
+ 0x718: 0x3634, 0x719: 0x362e, 0x71a: 0x3622, 0x71b: 0x4303, 0x71d: 0x44a1,
+ 0x71e: 0x44a8, 0x71f: 0x44af, 0x720: 0x36ee, 0x721: 0x36e8, 0x722: 0x3e6b, 0x723: 0x44ef,
+ 0x724: 0x36d0, 0x725: 0x36d6, 0x726: 0x36f4, 0x727: 0x3e7b, 0x728: 0x3664, 0x729: 0x365e,
+ 0x72a: 0x3652, 0x72b: 0x430f, 0x72c: 0x364c, 0x72d: 0x4477, 0x72e: 0x447e, 0x72f: 0x0081,
+ 0x732: 0x3eb3, 0x733: 0x36fa, 0x734: 0x3ebb,
+ 0x736: 0x488b, 0x737: 0x3ed3, 0x738: 0x3640, 0x739: 0x4309, 0x73a: 0x3670, 0x73b: 0x431b,
+ 0x73c: 0x367c, 0x73d: 0x425d, 0x73e: 0x428f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x1bdb, 0x741: 0x1bdf, 0x742: 0x0047, 0x743: 0x1c57, 0x745: 0x1beb,
+ 0x746: 0x1bef, 0x747: 0x00e9, 0x749: 0x1c5b, 0x74a: 0x008f, 0x74b: 0x0051,
+ 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053,
+ 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1990,
+ 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065,
+ 0x760: 0x19a2, 0x761: 0x1bcb, 0x762: 0x19ab,
+ 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075,
+ 0x76a: 0x0057, 0x76b: 0x42d5, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b,
+ 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215,
+ 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b9b,
+ 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x0463, 0x785: 0x0049,
+ 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095,
+ 0x790: 0x2231, 0x791: 0x223d,
+ 0x792: 0x22f1, 0x793: 0x2219, 0x794: 0x229d, 0x795: 0x2225, 0x796: 0x22a3, 0x797: 0x22bb,
+ 0x798: 0x22c7, 0x799: 0x222b, 0x79a: 0x22cd, 0x79b: 0x2237, 0x79c: 0x22c1, 0x79d: 0x22d3,
+ 0x79e: 0x22d9, 0x79f: 0x1cbf, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba7, 0x7a3: 0x1963,
+ 0x7a4: 0x006d, 0x7a5: 0x19ae, 0x7a6: 0x1bd3, 0x7a7: 0x1d4b, 0x7a8: 0x1966, 0x7a9: 0x0071,
+ 0x7aa: 0x19ba, 0x7ab: 0x1bd7, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b,
+ 0x7b0: 0x0093, 0x7b1: 0x19e7, 0x7b2: 0x1c1b, 0x7b3: 0x19f0, 0x7b4: 0x00ad, 0x7b5: 0x1a65,
+ 0x7b6: 0x1c4f, 0x7b7: 0x1d5f, 0x7b8: 0x19f3, 0x7b9: 0x00b1, 0x7ba: 0x1a68, 0x7bb: 0x1c53,
+ 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b,
+ // Block 0x1f, offset 0x7c0
+ 0x7c1: 0x3c09, 0x7c3: 0xa000, 0x7c4: 0x3c10, 0x7c5: 0xa000,
+ 0x7c7: 0x3c17, 0x7c8: 0xa000, 0x7c9: 0x3c1e,
+ 0x7cd: 0xa000,
+ 0x7e0: 0x2f68, 0x7e1: 0xa000, 0x7e2: 0x3c2c,
+ 0x7e4: 0xa000, 0x7e5: 0xa000,
+ 0x7ed: 0x3c25, 0x7ee: 0x2f63, 0x7ef: 0x2f6d,
+ 0x7f0: 0x3c33, 0x7f1: 0x3c3a, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c41, 0x7f5: 0x3c48,
+ 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4f, 0x7f9: 0x3c56, 0x7fa: 0xa000, 0x7fb: 0xa000,
+ 0x7fc: 0xa000, 0x7fd: 0xa000,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3c5d, 0x801: 0x3c64, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c79, 0x805: 0x3c80,
+ 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c87, 0x809: 0x3c8e,
+ 0x811: 0xa000,
+ 0x812: 0xa000,
+ 0x822: 0xa000,
+ 0x828: 0xa000, 0x829: 0xa000,
+ 0x82b: 0xa000, 0x82c: 0x3ca3, 0x82d: 0x3caa, 0x82e: 0x3cb1, 0x82f: 0x3cb8,
+ 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000,
+ // Block 0x21, offset 0x840
+ 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029,
+ 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882,
+ 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894,
+ 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a6b, 0x875: 0x1a6f,
+ 0x876: 0x1a73, 0x877: 0x1a77, 0x878: 0x1a7b, 0x879: 0x1a7f, 0x87a: 0x1a83, 0x87b: 0x1a87,
+ 0x87c: 0x1a8b, 0x87d: 0x1c83, 0x87e: 0x1c88, 0x87f: 0x1c8d,
+ // Block 0x22, offset 0x880
+ 0x880: 0x1c92, 0x881: 0x1c97, 0x882: 0x1c9c, 0x883: 0x1ca1, 0x884: 0x1ca6, 0x885: 0x1cab,
+ 0x886: 0x1cb0, 0x887: 0x1cb5, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb,
+ 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b63,
+ 0x892: 0x1b67, 0x893: 0x1b6b, 0x894: 0x1b6f, 0x895: 0x1b73, 0x896: 0x1b77, 0x897: 0x1b7b,
+ 0x898: 0x1b7f, 0x899: 0x1b83, 0x89a: 0x1b87, 0x89b: 0x1b8b, 0x89c: 0x1af7, 0x89d: 0x1afb,
+ 0x89e: 0x1aff, 0x89f: 0x1b03, 0x8a0: 0x1b07, 0x8a1: 0x1b0b, 0x8a2: 0x1b0f, 0x8a3: 0x1b13,
+ 0x8a4: 0x1b17, 0x8a5: 0x1b1b, 0x8a6: 0x1b1f, 0x8a7: 0x1b23, 0x8a8: 0x1b27, 0x8a9: 0x1b2b,
+ 0x8aa: 0x1b2f, 0x8ab: 0x1b33, 0x8ac: 0x1b37, 0x8ad: 0x1b3b, 0x8ae: 0x1b3f, 0x8af: 0x1b43,
+ 0x8b0: 0x1b47, 0x8b1: 0x1b4b, 0x8b2: 0x1b4f, 0x8b3: 0x1b53, 0x8b4: 0x1b57, 0x8b5: 0x1b5b,
+ 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d,
+ 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713,
+ 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab,
+ 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803,
+ 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887,
+ 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db,
+ 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb,
+ 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b,
+ 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7,
+ 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33,
+ 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63,
+ 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb,
+ 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b,
+ 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb,
+ 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3,
+ 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f,
+ 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83,
+ 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7,
+ 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f,
+ 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf,
+ 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f,
+ 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187,
+ // Block 0x25, offset 0x940
+ 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3,
+ 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb,
+ 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b,
+ 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b,
+ 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf,
+ 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f,
+ 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f,
+ 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503,
+ 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f,
+ 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f,
+ 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593,
+ // Block 0x26, offset 0x980
+ 0x986: 0xa000, 0x98b: 0xa000,
+ 0x98c: 0x3f0b, 0x98d: 0xa000, 0x98e: 0x3f13, 0x98f: 0xa000, 0x990: 0x3f1b, 0x991: 0xa000,
+ 0x992: 0x3f23, 0x993: 0xa000, 0x994: 0x3f2b, 0x995: 0xa000, 0x996: 0x3f33, 0x997: 0xa000,
+ 0x998: 0x3f3b, 0x999: 0xa000, 0x99a: 0x3f43, 0x99b: 0xa000, 0x99c: 0x3f4b, 0x99d: 0xa000,
+ 0x99e: 0x3f53, 0x99f: 0xa000, 0x9a0: 0x3f5b, 0x9a1: 0xa000, 0x9a2: 0x3f63,
+ 0x9a4: 0xa000, 0x9a5: 0x3f6b, 0x9a6: 0xa000, 0x9a7: 0x3f73, 0x9a8: 0xa000, 0x9a9: 0x3f7b,
+ 0x9af: 0xa000,
+ 0x9b0: 0x3f83, 0x9b1: 0x3f8b, 0x9b2: 0xa000, 0x9b3: 0x3f93, 0x9b4: 0x3f9b, 0x9b5: 0xa000,
+ 0x9b6: 0x3fa3, 0x9b7: 0x3fab, 0x9b8: 0xa000, 0x9b9: 0x3fb3, 0x9ba: 0x3fbb, 0x9bb: 0xa000,
+ 0x9bc: 0x3fc3, 0x9bd: 0x3fcb,
+ // Block 0x27, offset 0x9c0
+ 0x9d4: 0x3f03,
+ 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42df, 0x9dc: 0x42e5, 0x9dd: 0xa000,
+ 0x9de: 0x3fd3, 0x9df: 0x26b7,
+ 0x9e6: 0xa000,
+ 0x9eb: 0xa000, 0x9ec: 0x3fe3, 0x9ed: 0xa000, 0x9ee: 0x3feb, 0x9ef: 0xa000,
+ 0x9f0: 0x3ff3, 0x9f1: 0xa000, 0x9f2: 0x3ffb, 0x9f3: 0xa000, 0x9f4: 0x4003, 0x9f5: 0xa000,
+ 0x9f6: 0x400b, 0x9f7: 0xa000, 0x9f8: 0x4013, 0x9f9: 0xa000, 0x9fa: 0x401b, 0x9fb: 0xa000,
+ 0x9fc: 0x4023, 0x9fd: 0xa000, 0x9fe: 0x402b, 0x9ff: 0xa000,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x4033, 0xa01: 0xa000, 0xa02: 0x403b, 0xa04: 0xa000, 0xa05: 0x4043,
+ 0xa06: 0xa000, 0xa07: 0x404b, 0xa08: 0xa000, 0xa09: 0x4053,
+ 0xa0f: 0xa000, 0xa10: 0x405b, 0xa11: 0x4063,
+ 0xa12: 0xa000, 0xa13: 0x406b, 0xa14: 0x4073, 0xa15: 0xa000, 0xa16: 0x407b, 0xa17: 0x4083,
+ 0xa18: 0xa000, 0xa19: 0x408b, 0xa1a: 0x4093, 0xa1b: 0xa000, 0xa1c: 0x409b, 0xa1d: 0x40a3,
+ 0xa2f: 0xa000,
+ 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fdb,
+ 0xa37: 0x40ab, 0xa38: 0x40b3, 0xa39: 0x40bb, 0xa3a: 0x40c3,
+ 0xa3d: 0xa000, 0xa3e: 0x40cb, 0xa3f: 0x26cc,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337,
+ 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f,
+ 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49c0, 0xa50: 0x49c6, 0xa51: 0x49cc,
+ 0xa52: 0x49d2, 0xa53: 0x49d8, 0xa54: 0x49de, 0xa55: 0x49e4, 0xa56: 0x49ea, 0xa57: 0x49f0,
+ 0xa58: 0x49f6, 0xa59: 0x49fc, 0xa5a: 0x4a02, 0xa5b: 0x4a08, 0xa5c: 0x4a0e, 0xa5d: 0x4a14,
+ 0xa5e: 0x4a1a, 0xa5f: 0x4a20, 0xa60: 0x4a26, 0xa61: 0x4a2c, 0xa62: 0x4a32, 0xa63: 0x4a38,
+ 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef,
+ 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403,
+ 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383,
+ 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b,
+ 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb,
+ 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7,
+ 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3,
+ 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7,
+ 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff,
+ 0xa9e: 0x098f, 0xa9f: 0x072f,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0x2057, 0xac1: 0x205d, 0xac2: 0x2063, 0xac3: 0x2069, 0xac4: 0x206f, 0xac5: 0x2075,
+ 0xac6: 0x207b, 0xac7: 0x2081, 0xac8: 0x2087, 0xac9: 0x208d, 0xaca: 0x2093, 0xacb: 0x2099,
+ 0xacc: 0x209f, 0xacd: 0x20a5, 0xace: 0x2729, 0xacf: 0x2732, 0xad0: 0x273b, 0xad1: 0x2744,
+ 0xad2: 0x274d, 0xad3: 0x2756, 0xad4: 0x275f, 0xad5: 0x2768, 0xad6: 0x2771, 0xad7: 0x2783,
+ 0xad8: 0x278c, 0xad9: 0x2795, 0xada: 0x279e, 0xadb: 0x27a7, 0xadc: 0x277a, 0xadd: 0x2baf,
+ 0xade: 0x2af0, 0xae0: 0x20ab, 0xae1: 0x20c3, 0xae2: 0x20b7, 0xae3: 0x210b,
+ 0xae4: 0x20c9, 0xae5: 0x20e7, 0xae6: 0x20b1, 0xae7: 0x20e1, 0xae8: 0x20bd, 0xae9: 0x20f3,
+ 0xaea: 0x2123, 0xaeb: 0x2141, 0xaec: 0x213b, 0xaed: 0x212f, 0xaee: 0x217d, 0xaef: 0x2111,
+ 0xaf0: 0x211d, 0xaf1: 0x2135, 0xaf2: 0x2129, 0xaf3: 0x2153, 0xaf4: 0x20ff, 0xaf5: 0x2147,
+ 0xaf6: 0x2171, 0xaf7: 0x2159, 0xaf8: 0x20ed, 0xaf9: 0x20cf, 0xafa: 0x2105, 0xafb: 0x2117,
+ 0xafc: 0x214d, 0xafd: 0x20d5, 0xafe: 0x2177, 0xaff: 0x20f9,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x215f, 0xb01: 0x20db, 0xb02: 0x2165, 0xb03: 0x216b, 0xb04: 0x092f, 0xb05: 0x0b03,
+ 0xb06: 0x0ca7, 0xb07: 0x10c7,
+ 0xb10: 0x1bc7, 0xb11: 0x18a9,
+ 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb,
+ 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3,
+ 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327,
+ 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b,
+ 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e1b, 0xb2f: 0x2e23,
+ 0xb30: 0x2e2b, 0xb31: 0x2e33, 0xb32: 0x2e3b, 0xb33: 0x2e43, 0xb34: 0x2e4b, 0xb35: 0x2e53,
+ 0xb36: 0x2e63, 0xb37: 0x2e6b, 0xb38: 0x2e73, 0xb39: 0x2e7b, 0xb3a: 0x2e83, 0xb3b: 0x2e8b,
+ 0xb3c: 0x2ed6, 0xb3d: 0x2e9e, 0xb3e: 0x2e5b,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af,
+ 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f,
+ 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b,
+ 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f,
+ 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f,
+ 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b,
+ 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f,
+ 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b,
+ 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee,
+ 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900,
+ 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x1cc9, 0xb81: 0x1cd8, 0xb82: 0x1ce7, 0xb83: 0x1cf6, 0xb84: 0x1d05, 0xb85: 0x1d14,
+ 0xb86: 0x1d23, 0xb87: 0x1d32, 0xb88: 0x1d41, 0xb89: 0x218f, 0xb8a: 0x21a1, 0xb8b: 0x21b3,
+ 0xb8c: 0x1954, 0xb8d: 0x1c07, 0xb8e: 0x19d5, 0xb8f: 0x1bab, 0xb90: 0x04cb, 0xb91: 0x04d3,
+ 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7,
+ 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f,
+ 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b,
+ 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543,
+ 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b,
+ 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f,
+ 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597,
+ 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x2b0f, 0xbc1: 0x29ab, 0xbc2: 0x2b1f, 0xbc3: 0x2883, 0xbc4: 0x2ee7, 0xbc5: 0x288d,
+ 0xbc6: 0x2897, 0xbc7: 0x2f2b, 0xbc8: 0x29b8, 0xbc9: 0x28a1, 0xbca: 0x28ab, 0xbcb: 0x28b5,
+ 0xbcc: 0x29df, 0xbcd: 0x29ec, 0xbce: 0x29c5, 0xbcf: 0x29d2, 0xbd0: 0x2eac, 0xbd1: 0x29f9,
+ 0xbd2: 0x2a06, 0xbd3: 0x2bc1, 0xbd4: 0x26be, 0xbd5: 0x2bd4, 0xbd6: 0x2be7, 0xbd7: 0x2b2f,
+ 0xbd8: 0x2a13, 0xbd9: 0x2bfa, 0xbda: 0x2c0d, 0xbdb: 0x2a20, 0xbdc: 0x28bf, 0xbdd: 0x28c9,
+ 0xbde: 0x2eba, 0xbdf: 0x2a2d, 0xbe0: 0x2b3f, 0xbe1: 0x2ef8, 0xbe2: 0x28d3, 0xbe3: 0x28dd,
+ 0xbe4: 0x2a3a, 0xbe5: 0x28e7, 0xbe6: 0x28f1, 0xbe7: 0x26d3, 0xbe8: 0x26da, 0xbe9: 0x28fb,
+ 0xbea: 0x2905, 0xbeb: 0x2c20, 0xbec: 0x2a47, 0xbed: 0x2b4f, 0xbee: 0x2c33, 0xbef: 0x2a54,
+ 0xbf0: 0x2919, 0xbf1: 0x290f, 0xbf2: 0x2f3f, 0xbf3: 0x2a61, 0xbf4: 0x2c46, 0xbf5: 0x2923,
+ 0xbf6: 0x2b5f, 0xbf7: 0x292d, 0xbf8: 0x2a7b, 0xbf9: 0x2937, 0xbfa: 0x2a88, 0xbfb: 0x2f09,
+ 0xbfc: 0x2a6e, 0xbfd: 0x2b6f, 0xbfe: 0x2a95, 0xbff: 0x26e1,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x2f1a, 0xc01: 0x2941, 0xc02: 0x294b, 0xc03: 0x2aa2, 0xc04: 0x2955, 0xc05: 0x295f,
+ 0xc06: 0x2969, 0xc07: 0x2b7f, 0xc08: 0x2aaf, 0xc09: 0x26e8, 0xc0a: 0x2c59, 0xc0b: 0x2e93,
+ 0xc0c: 0x2b8f, 0xc0d: 0x2abc, 0xc0e: 0x2ec8, 0xc0f: 0x2973, 0xc10: 0x297d, 0xc11: 0x2ac9,
+ 0xc12: 0x26ef, 0xc13: 0x2ad6, 0xc14: 0x2b9f, 0xc15: 0x26f6, 0xc16: 0x2c6c, 0xc17: 0x2987,
+ 0xc18: 0x1cba, 0xc19: 0x1cce, 0xc1a: 0x1cdd, 0xc1b: 0x1cec, 0xc1c: 0x1cfb, 0xc1d: 0x1d0a,
+ 0xc1e: 0x1d19, 0xc1f: 0x1d28, 0xc20: 0x1d37, 0xc21: 0x1d46, 0xc22: 0x2195, 0xc23: 0x21a7,
+ 0xc24: 0x21b9, 0xc25: 0x21c5, 0xc26: 0x21d1, 0xc27: 0x21dd, 0xc28: 0x21e9, 0xc29: 0x21f5,
+ 0xc2a: 0x2201, 0xc2b: 0x220d, 0xc2c: 0x2249, 0xc2d: 0x2255, 0xc2e: 0x2261, 0xc2f: 0x226d,
+ 0xc30: 0x2279, 0xc31: 0x1c17, 0xc32: 0x19c9, 0xc33: 0x1936, 0xc34: 0x1be7, 0xc35: 0x1a4a,
+ 0xc36: 0x1a59, 0xc37: 0x19cf, 0xc38: 0x1bff, 0xc39: 0x1c03, 0xc3a: 0x1960, 0xc3b: 0x2704,
+ 0xc3c: 0x2712, 0xc3d: 0x26fd, 0xc3e: 0x270b, 0xc3f: 0x2ae3,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1a4d, 0xc41: 0x1a35, 0xc42: 0x1c63, 0xc43: 0x1a1d, 0xc44: 0x19f6, 0xc45: 0x1969,
+ 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf3, 0xc49: 0x1d55, 0xc4a: 0x1a50, 0xc4b: 0x1a38,
+ 0xc4c: 0x1c67, 0xc4d: 0x1c73, 0xc4e: 0x1a29, 0xc4f: 0x19ff, 0xc50: 0x1957, 0xc51: 0x1c1f,
+ 0xc52: 0x1bb3, 0xc53: 0x1b9f, 0xc54: 0x1bcf, 0xc55: 0x1c77, 0xc56: 0x1a2c, 0xc57: 0x19cc,
+ 0xc58: 0x1a02, 0xc59: 0x19e1, 0xc5a: 0x1a44, 0xc5b: 0x1c7b, 0xc5c: 0x1a2f, 0xc5d: 0x19c3,
+ 0xc5e: 0x1a05, 0xc5f: 0x1c3f, 0xc60: 0x1bf7, 0xc61: 0x1a17, 0xc62: 0x1c27, 0xc63: 0x1c43,
+ 0xc64: 0x1bfb, 0xc65: 0x1a1a, 0xc66: 0x1c2b, 0xc67: 0x22eb, 0xc68: 0x22ff, 0xc69: 0x1999,
+ 0xc6a: 0x1c23, 0xc6b: 0x1bb7, 0xc6c: 0x1ba3, 0xc6d: 0x1c4b, 0xc6e: 0x2719, 0xc6f: 0x27b0,
+ 0xc70: 0x1a5c, 0xc71: 0x1a47, 0xc72: 0x1c7f, 0xc73: 0x1a32, 0xc74: 0x1a53, 0xc75: 0x1a3b,
+ 0xc76: 0x1c6b, 0xc77: 0x1a20, 0xc78: 0x19f9, 0xc79: 0x1984, 0xc7a: 0x1a56, 0xc7b: 0x1a3e,
+ 0xc7c: 0x1c6f, 0xc7d: 0x1a23, 0xc7e: 0x19fc, 0xc7f: 0x1987,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x1c2f, 0xc81: 0x1bbb, 0xc82: 0x1d50, 0xc83: 0x1939, 0xc84: 0x19bd, 0xc85: 0x19c0,
+ 0xc86: 0x22f8, 0xc87: 0x1b97, 0xc88: 0x19c6, 0xc89: 0x194b, 0xc8a: 0x19e4, 0xc8b: 0x194e,
+ 0xc8c: 0x19ed, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a08, 0xc90: 0x1a0e, 0xc91: 0x1a11,
+ 0xc92: 0x1c33, 0xc93: 0x1a14, 0xc94: 0x1a26, 0xc95: 0x1c3b, 0xc96: 0x1c47, 0xc97: 0x1993,
+ 0xc98: 0x1d5a, 0xc99: 0x1bbf, 0xc9a: 0x1996, 0xc9b: 0x1a5f, 0xc9c: 0x19a8, 0xc9d: 0x19b7,
+ 0xc9e: 0x22e5, 0xc9f: 0x22df, 0xca0: 0x1cc4, 0xca1: 0x1cd3, 0xca2: 0x1ce2, 0xca3: 0x1cf1,
+ 0xca4: 0x1d00, 0xca5: 0x1d0f, 0xca6: 0x1d1e, 0xca7: 0x1d2d, 0xca8: 0x1d3c, 0xca9: 0x2189,
+ 0xcaa: 0x219b, 0xcab: 0x21ad, 0xcac: 0x21bf, 0xcad: 0x21cb, 0xcae: 0x21d7, 0xcaf: 0x21e3,
+ 0xcb0: 0x21ef, 0xcb1: 0x21fb, 0xcb2: 0x2207, 0xcb3: 0x2243, 0xcb4: 0x224f, 0xcb5: 0x225b,
+ 0xcb6: 0x2267, 0xcb7: 0x2273, 0xcb8: 0x227f, 0xcb9: 0x2285, 0xcba: 0x228b, 0xcbb: 0x2291,
+ 0xcbc: 0x2297, 0xcbd: 0x22a9, 0xcbe: 0x22af, 0xcbf: 0x1c13,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb,
+ 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943,
+ 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3,
+ 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43,
+ 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87,
+ 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283,
+ 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f,
+ 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853,
+ 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b,
+ 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b,
+ 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b,
+ 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f,
+ 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7,
+ 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127,
+ 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357,
+ 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873,
+ 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3,
+ 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b,
+ 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57,
+ 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb,
+ 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f,
+ 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3,
+ 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83,
+ 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193,
+ 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b,
+ 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b,
+ 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f,
+ 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b,
+ 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753,
+ 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777,
+ 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3,
+ 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47,
+ 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af,
+ 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df,
+ 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817,
+ 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3,
+ 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457,
+ 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b,
+ 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27,
+ 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f,
+ 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03,
+ 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27,
+ 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af,
+ 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3,
+ 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb,
+ 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353,
+ 0xde5: 0x1407, 0xde6: 0x1433,
+ 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7,
+ 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897,
+ 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93,
+ 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b,
+ 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f,
+ 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f,
+ 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f,
+ 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff,
+ 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f,
+ 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f,
+ 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3,
+ 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7,
+ 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963,
+ 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b,
+ 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb,
+ 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf,
+ 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f,
+ 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013,
+ 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f,
+ 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b,
+ 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b,
+ 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb,
+ 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343,
+ 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b,
+ 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b,
+ 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2,
+ 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809,
+ 0xe98: 0x1617, 0xe99: 0x1627,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x19d8, 0xec1: 0x19db, 0xec2: 0x19de, 0xec3: 0x1c0b, 0xec4: 0x1c0f, 0xec5: 0x1a62,
+ 0xec6: 0x1a62,
+ 0xed3: 0x1d78, 0xed4: 0x1d69, 0xed5: 0x1d6e, 0xed6: 0x1d7d, 0xed7: 0x1d73,
+ 0xedd: 0x4393,
+ 0xede: 0x8115, 0xedf: 0x4405, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221,
+ 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017,
+ 0xeea: 0x43f3, 0xeeb: 0x43f9, 0xeec: 0x44f7, 0xeed: 0x44ff, 0xeee: 0x434b, 0xeef: 0x4351,
+ 0xef0: 0x4357, 0xef1: 0x435d, 0xef2: 0x4369, 0xef3: 0x436f, 0xef4: 0x4375, 0xef5: 0x4381,
+ 0xef6: 0x4387, 0xef8: 0x438d, 0xef9: 0x4399, 0xefa: 0x439f, 0xefb: 0x43a5,
+ 0xefc: 0x43b1, 0xefe: 0x43b7,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x43bd, 0xf01: 0x43c3, 0xf03: 0x43c9, 0xf04: 0x43cf,
+ 0xf06: 0x43db, 0xf07: 0x43e1, 0xf08: 0x43e7, 0xf09: 0x43ed, 0xf0a: 0x43ff, 0xf0b: 0x437b,
+ 0xf0c: 0x4363, 0xf0d: 0x43ab, 0xf0e: 0x43d5, 0xf0f: 0x1d82, 0xf10: 0x0299, 0xf11: 0x0299,
+ 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5,
+ 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab,
+ 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8,
+ 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c,
+ 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2,
+ 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1,
+ 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4,
+ 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd,
+ 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9,
+ 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5,
+ 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1,
+ 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de,
+ 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7,
+ 0xf64: 0x4471, 0xf65: 0x4471, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed,
+ 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308,
+ 0xf70: 0x446b, 0xf71: 0x446b,
+ // Block 0x3e, offset 0xf80
+ 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6,
+ 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x2052,
+ 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc,
+ 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d,
+ 0xfaa: 0x25ad, 0xfab: 0x25ad, 0xfac: 0x261d, 0xfad: 0x261d, 0xfae: 0x25ec, 0xfaf: 0x25ec,
+ 0xfb0: 0x2608, 0xfb1: 0x2608, 0xfb2: 0x2601, 0xfb3: 0x2601, 0xfb4: 0x260f, 0xfb5: 0x260f,
+ 0xfb6: 0x2616, 0xfb7: 0x2616, 0xfb8: 0x2616, 0xfb9: 0x25f3, 0xfba: 0x25f3, 0xfbb: 0x25f3,
+ 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x25b4, 0xfc1: 0x25bb, 0xfc2: 0x25d7, 0xfc3: 0x25f3, 0xfc4: 0x25fa, 0xfc5: 0x1d8c,
+ 0xfc6: 0x1d91, 0xfc7: 0x1d96, 0xfc8: 0x1da5, 0xfc9: 0x1db4, 0xfca: 0x1db9, 0xfcb: 0x1dbe,
+ 0xfcc: 0x1dc3, 0xfcd: 0x1dc8, 0xfce: 0x1dd7, 0xfcf: 0x1de6, 0xfd0: 0x1deb, 0xfd1: 0x1df0,
+ 0xfd2: 0x1dff, 0xfd3: 0x1e0e, 0xfd4: 0x1e13, 0xfd5: 0x1e18, 0xfd6: 0x1e1d, 0xfd7: 0x1e2c,
+ 0xfd8: 0x1e31, 0xfd9: 0x1e40, 0xfda: 0x1e45, 0xfdb: 0x1e4a, 0xfdc: 0x1e59, 0xfdd: 0x1e5e,
+ 0xfde: 0x1e63, 0xfdf: 0x1e6d, 0xfe0: 0x1ea9, 0xfe1: 0x1eb8, 0xfe2: 0x1ec7, 0xfe3: 0x1ecc,
+ 0xfe4: 0x1ed1, 0xfe5: 0x1edb, 0xfe6: 0x1eea, 0xfe7: 0x1eef, 0xfe8: 0x1efe, 0xfe9: 0x1f03,
+ 0xfea: 0x1f08, 0xfeb: 0x1f17, 0xfec: 0x1f1c, 0xfed: 0x1f2b, 0xfee: 0x1f30, 0xfef: 0x1f35,
+ 0xff0: 0x1f3a, 0xff1: 0x1f3f, 0xff2: 0x1f44, 0xff3: 0x1f49, 0xff4: 0x1f4e, 0xff5: 0x1f53,
+ 0xff6: 0x1f58, 0xff7: 0x1f5d, 0xff8: 0x1f62, 0xff9: 0x1f67, 0xffa: 0x1f6c, 0xffb: 0x1f71,
+ 0xffc: 0x1f76, 0xffd: 0x1f7b, 0xffe: 0x1f80, 0xfff: 0x1f8a,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x1f8f, 0x1001: 0x1f94, 0x1002: 0x1f99, 0x1003: 0x1fa3, 0x1004: 0x1fa8, 0x1005: 0x1fb2,
+ 0x1006: 0x1fb7, 0x1007: 0x1fbc, 0x1008: 0x1fc1, 0x1009: 0x1fc6, 0x100a: 0x1fcb, 0x100b: 0x1fd0,
+ 0x100c: 0x1fd5, 0x100d: 0x1fda, 0x100e: 0x1fe9, 0x100f: 0x1ff8, 0x1010: 0x1ffd, 0x1011: 0x2002,
+ 0x1012: 0x2007, 0x1013: 0x200c, 0x1014: 0x2011, 0x1015: 0x201b, 0x1016: 0x2020, 0x1017: 0x2025,
+ 0x1018: 0x2034, 0x1019: 0x2043, 0x101a: 0x2048, 0x101b: 0x4423, 0x101c: 0x4429, 0x101d: 0x445f,
+ 0x101e: 0x44b6, 0x101f: 0x44bd, 0x1020: 0x44c4, 0x1021: 0x44cb, 0x1022: 0x44d2, 0x1023: 0x44d9,
+ 0x1024: 0x25c9, 0x1025: 0x25d0, 0x1026: 0x25d7, 0x1027: 0x25de, 0x1028: 0x25f3, 0x1029: 0x25fa,
+ 0x102a: 0x1d9b, 0x102b: 0x1da0, 0x102c: 0x1da5, 0x102d: 0x1daa, 0x102e: 0x1db4, 0x102f: 0x1db9,
+ 0x1030: 0x1dcd, 0x1031: 0x1dd2, 0x1032: 0x1dd7, 0x1033: 0x1ddc, 0x1034: 0x1de6, 0x1035: 0x1deb,
+ 0x1036: 0x1df5, 0x1037: 0x1dfa, 0x1038: 0x1dff, 0x1039: 0x1e04, 0x103a: 0x1e0e, 0x103b: 0x1e13,
+ 0x103c: 0x1f3f, 0x103d: 0x1f44, 0x103e: 0x1f53, 0x103f: 0x1f58,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x1f5d, 0x1041: 0x1f71, 0x1042: 0x1f76, 0x1043: 0x1f7b, 0x1044: 0x1f80, 0x1045: 0x1f99,
+ 0x1046: 0x1fa3, 0x1047: 0x1fa8, 0x1048: 0x1fad, 0x1049: 0x1fc1, 0x104a: 0x1fdf, 0x104b: 0x1fe4,
+ 0x104c: 0x1fe9, 0x104d: 0x1fee, 0x104e: 0x1ff8, 0x104f: 0x1ffd, 0x1050: 0x445f, 0x1051: 0x202a,
+ 0x1052: 0x202f, 0x1053: 0x2034, 0x1054: 0x2039, 0x1055: 0x2043, 0x1056: 0x2048, 0x1057: 0x25b4,
+ 0x1058: 0x25bb, 0x1059: 0x25c2, 0x105a: 0x25d7, 0x105b: 0x25e5, 0x105c: 0x1d8c, 0x105d: 0x1d91,
+ 0x105e: 0x1d96, 0x105f: 0x1da5, 0x1060: 0x1daf, 0x1061: 0x1dbe, 0x1062: 0x1dc3, 0x1063: 0x1dc8,
+ 0x1064: 0x1dd7, 0x1065: 0x1de1, 0x1066: 0x1dff, 0x1067: 0x1e18, 0x1068: 0x1e1d, 0x1069: 0x1e2c,
+ 0x106a: 0x1e31, 0x106b: 0x1e40, 0x106c: 0x1e4a, 0x106d: 0x1e59, 0x106e: 0x1e5e, 0x106f: 0x1e63,
+ 0x1070: 0x1e6d, 0x1071: 0x1ea9, 0x1072: 0x1eae, 0x1073: 0x1eb8, 0x1074: 0x1ec7, 0x1075: 0x1ecc,
+ 0x1076: 0x1ed1, 0x1077: 0x1edb, 0x1078: 0x1eea, 0x1079: 0x1efe, 0x107a: 0x1f03, 0x107b: 0x1f08,
+ 0x107c: 0x1f17, 0x107d: 0x1f1c, 0x107e: 0x1f2b, 0x107f: 0x1f30,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x1f35, 0x1081: 0x1f3a, 0x1082: 0x1f49, 0x1083: 0x1f4e, 0x1084: 0x1f62, 0x1085: 0x1f67,
+ 0x1086: 0x1f6c, 0x1087: 0x1f71, 0x1088: 0x1f76, 0x1089: 0x1f8a, 0x108a: 0x1f8f, 0x108b: 0x1f94,
+ 0x108c: 0x1f99, 0x108d: 0x1f9e, 0x108e: 0x1fb2, 0x108f: 0x1fb7, 0x1090: 0x1fbc, 0x1091: 0x1fc1,
+ 0x1092: 0x1fd0, 0x1093: 0x1fd5, 0x1094: 0x1fda, 0x1095: 0x1fe9, 0x1096: 0x1ff3, 0x1097: 0x2002,
+ 0x1098: 0x2007, 0x1099: 0x4453, 0x109a: 0x201b, 0x109b: 0x2020, 0x109c: 0x2025, 0x109d: 0x2034,
+ 0x109e: 0x203e, 0x109f: 0x25d7, 0x10a0: 0x25e5, 0x10a1: 0x1da5, 0x10a2: 0x1daf, 0x10a3: 0x1dd7,
+ 0x10a4: 0x1de1, 0x10a5: 0x1dff, 0x10a6: 0x1e09, 0x10a7: 0x1e6d, 0x10a8: 0x1e72, 0x10a9: 0x1e95,
+ 0x10aa: 0x1e9a, 0x10ab: 0x1f71, 0x10ac: 0x1f76, 0x10ad: 0x1f99, 0x10ae: 0x1fe9, 0x10af: 0x1ff3,
+ 0x10b0: 0x2034, 0x10b1: 0x203e, 0x10b2: 0x4507, 0x10b3: 0x450f, 0x10b4: 0x4517, 0x10b5: 0x1ef4,
+ 0x10b6: 0x1ef9, 0x10b7: 0x1f0d, 0x10b8: 0x1f12, 0x10b9: 0x1f21, 0x10ba: 0x1f26, 0x10bb: 0x1e77,
+ 0x10bc: 0x1e7c, 0x10bd: 0x1e9f, 0x10be: 0x1ea4, 0x10bf: 0x1e36,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x1e3b, 0x10c1: 0x1e22, 0x10c2: 0x1e27, 0x10c3: 0x1e4f, 0x10c4: 0x1e54, 0x10c5: 0x1ebd,
+ 0x10c6: 0x1ec2, 0x10c7: 0x1ee0, 0x10c8: 0x1ee5, 0x10c9: 0x1e81, 0x10ca: 0x1e86, 0x10cb: 0x1e8b,
+ 0x10cc: 0x1e95, 0x10cd: 0x1e90, 0x10ce: 0x1e68, 0x10cf: 0x1eb3, 0x10d0: 0x1ed6, 0x10d1: 0x1ef4,
+ 0x10d2: 0x1ef9, 0x10d3: 0x1f0d, 0x10d4: 0x1f12, 0x10d5: 0x1f21, 0x10d6: 0x1f26, 0x10d7: 0x1e77,
+ 0x10d8: 0x1e7c, 0x10d9: 0x1e9f, 0x10da: 0x1ea4, 0x10db: 0x1e36, 0x10dc: 0x1e3b, 0x10dd: 0x1e22,
+ 0x10de: 0x1e27, 0x10df: 0x1e4f, 0x10e0: 0x1e54, 0x10e1: 0x1ebd, 0x10e2: 0x1ec2, 0x10e3: 0x1ee0,
+ 0x10e4: 0x1ee5, 0x10e5: 0x1e81, 0x10e6: 0x1e86, 0x10e7: 0x1e8b, 0x10e8: 0x1e95, 0x10e9: 0x1e90,
+ 0x10ea: 0x1e68, 0x10eb: 0x1eb3, 0x10ec: 0x1ed6, 0x10ed: 0x1e81, 0x10ee: 0x1e86, 0x10ef: 0x1e8b,
+ 0x10f0: 0x1e95, 0x10f1: 0x1e72, 0x10f2: 0x1e9a, 0x10f3: 0x1eef, 0x10f4: 0x1e59, 0x10f5: 0x1e5e,
+ 0x10f6: 0x1e63, 0x10f7: 0x1e81, 0x10f8: 0x1e86, 0x10f9: 0x1e8b, 0x10fa: 0x1eef, 0x10fb: 0x1efe,
+ 0x10fc: 0x440b, 0x10fd: 0x440b,
+ // Block 0x44, offset 0x1100
+ 0x1110: 0x2314, 0x1111: 0x2329,
+ 0x1112: 0x2329, 0x1113: 0x2330, 0x1114: 0x2337, 0x1115: 0x234c, 0x1116: 0x2353, 0x1117: 0x235a,
+ 0x1118: 0x237d, 0x1119: 0x237d, 0x111a: 0x23a0, 0x111b: 0x2399, 0x111c: 0x23b5, 0x111d: 0x23a7,
+ 0x111e: 0x23ae, 0x111f: 0x23d1, 0x1120: 0x23d1, 0x1121: 0x23ca, 0x1122: 0x23d8, 0x1123: 0x23d8,
+ 0x1124: 0x2402, 0x1125: 0x2402, 0x1126: 0x241e, 0x1127: 0x23e6, 0x1128: 0x23e6, 0x1129: 0x23df,
+ 0x112a: 0x23f4, 0x112b: 0x23f4, 0x112c: 0x23fb, 0x112d: 0x23fb, 0x112e: 0x2425, 0x112f: 0x2433,
+ 0x1130: 0x2433, 0x1131: 0x243a, 0x1132: 0x243a, 0x1133: 0x2441, 0x1134: 0x2448, 0x1135: 0x244f,
+ 0x1136: 0x2456, 0x1137: 0x2456, 0x1138: 0x245d, 0x1139: 0x246b, 0x113a: 0x2479, 0x113b: 0x2472,
+ 0x113c: 0x2480, 0x113d: 0x2480, 0x113e: 0x2495, 0x113f: 0x249c,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0x24cd, 0x1141: 0x24db, 0x1142: 0x24d4, 0x1143: 0x24b8, 0x1144: 0x24b8, 0x1145: 0x24e2,
+ 0x1146: 0x24e2, 0x1147: 0x24e9, 0x1148: 0x24e9, 0x1149: 0x2513, 0x114a: 0x251a, 0x114b: 0x2521,
+ 0x114c: 0x24f7, 0x114d: 0x2505, 0x114e: 0x2528, 0x114f: 0x252f,
+ 0x1152: 0x24fe, 0x1153: 0x2583, 0x1154: 0x258a, 0x1155: 0x2560, 0x1156: 0x2567, 0x1157: 0x254b,
+ 0x1158: 0x254b, 0x1159: 0x2552, 0x115a: 0x257c, 0x115b: 0x2575, 0x115c: 0x259f, 0x115d: 0x259f,
+ 0x115e: 0x230d, 0x115f: 0x2322, 0x1160: 0x231b, 0x1161: 0x2345, 0x1162: 0x233e, 0x1163: 0x2368,
+ 0x1164: 0x2361, 0x1165: 0x238b, 0x1166: 0x236f, 0x1167: 0x2384, 0x1168: 0x23bc, 0x1169: 0x2409,
+ 0x116a: 0x23ed, 0x116b: 0x242c, 0x116c: 0x24c6, 0x116d: 0x24f0, 0x116e: 0x2598, 0x116f: 0x2591,
+ 0x1170: 0x25a6, 0x1171: 0x253d, 0x1172: 0x24a3, 0x1173: 0x256e, 0x1174: 0x2495, 0x1175: 0x24cd,
+ 0x1176: 0x2464, 0x1177: 0x24b1, 0x1178: 0x2544, 0x1179: 0x2536, 0x117a: 0x24bf, 0x117b: 0x24aa,
+ 0x117c: 0x24bf, 0x117d: 0x2544, 0x117e: 0x2376, 0x117f: 0x2392,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x250c, 0x1181: 0x2487, 0x1182: 0x2306, 0x1183: 0x24aa, 0x1184: 0x244f, 0x1185: 0x241e,
+ 0x1186: 0x23c3, 0x1187: 0x2559,
+ 0x11b0: 0x2417, 0x11b1: 0x248e, 0x11b2: 0x27c2, 0x11b3: 0x27b9, 0x11b4: 0x27ef, 0x11b5: 0x27dd,
+ 0x11b6: 0x27cb, 0x11b7: 0x27e6, 0x11b8: 0x27f8, 0x11b9: 0x2410, 0x11ba: 0x2c7f, 0x11bb: 0x2aff,
+ 0x11bc: 0x27d4,
+ // Block 0x47, offset 0x11c0
+ 0x11d0: 0x0019, 0x11d1: 0x0483,
+ 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf,
+ 0x11d8: 0x04c3, 0x11d9: 0x1b5f,
+ 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132,
+ 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d,
+ 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132,
+ 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011,
+ 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab,
+ 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7,
+ 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x426c, 0x120a: 0x426c, 0x120b: 0x426c,
+ 0x120c: 0x426c, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483,
+ 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003,
+ 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7,
+ 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b,
+ 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009,
+ 0x122a: 0x000b, 0x122b: 0x0041,
+ 0x1230: 0x42ad, 0x1231: 0x442f, 0x1232: 0x42b2, 0x1234: 0x42b7,
+ 0x1236: 0x42bc, 0x1237: 0x4435, 0x1238: 0x42c1, 0x1239: 0x443b, 0x123a: 0x42c6, 0x123b: 0x4441,
+ 0x123c: 0x42cb, 0x123d: 0x4447, 0x123e: 0x42d0, 0x123f: 0x444d,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x0236, 0x1241: 0x4411, 0x1242: 0x4411, 0x1243: 0x4417, 0x1244: 0x4417, 0x1245: 0x4459,
+ 0x1246: 0x4459, 0x1247: 0x441d, 0x1248: 0x441d, 0x1249: 0x4465, 0x124a: 0x4465, 0x124b: 0x4465,
+ 0x124c: 0x4465, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c,
+ 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242,
+ 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248,
+ 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b,
+ 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251,
+ 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a,
+ 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260,
+ 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263,
+ 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c,
+ 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f,
+ 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275,
+ 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278,
+ 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e,
+ 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281,
+ 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287,
+ 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d,
+ 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e03,
+ 0x12b6: 0x2e03, 0x12b7: 0x2e0b, 0x12b8: 0x2e0b, 0x12b9: 0x2e13, 0x12ba: 0x2e13, 0x12bb: 0x1f85,
+ 0x12bc: 0x1f85,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b,
+ 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097,
+ 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3,
+ 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af,
+ 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb,
+ 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f,
+ 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7,
+ 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f,
+ 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb,
+ 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503,
+ 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f,
+ 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547,
+ 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f,
+ 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583,
+ 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7,
+ 0x131e: 0x4a7b, 0x131f: 0x4a81, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3e,
+ 0x1324: 0x031b, 0x1325: 0x4a44, 0x1326: 0x4a4a, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327,
+ 0x132a: 0x4a50, 0x132b: 0x4a56, 0x132c: 0x4a5c, 0x132d: 0x4a62, 0x132e: 0x4a68, 0x132f: 0x4a6e,
+ 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337,
+ 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f,
+ 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b,
+ // Block 0x4d, offset 0x1340
+ 0x1342: 0x49c0, 0x1343: 0x49c6, 0x1344: 0x49cc, 0x1345: 0x49d2,
+ 0x1346: 0x49d8, 0x1347: 0x49de, 0x134a: 0x49e4, 0x134b: 0x49ea,
+ 0x134c: 0x49f0, 0x134d: 0x49f6, 0x134e: 0x49fc, 0x134f: 0x4a02,
+ 0x1352: 0x4a08, 0x1353: 0x4a0e, 0x1354: 0x4a14, 0x1355: 0x4a1a, 0x1356: 0x4a20, 0x1357: 0x4a26,
+ 0x135a: 0x4a2c, 0x135b: 0x4a32, 0x135c: 0x4a38,
+ 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4267,
+ 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b,
+ 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d,
+ 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085,
+ 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091,
+ 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d,
+ 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9,
+ 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5,
+ 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176,
+ 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188,
+ 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a,
+ 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9,
+ 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0,
+ 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209,
+ 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027,
+ 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033,
+ 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b,
+ 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023,
+ 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f,
+ 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027,
+ 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033,
+ 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b,
+ 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a,
+ 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e,
+ 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263,
+ 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e,
+ 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4,
+ 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248,
+ 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290,
+ 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f,
+ 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242,
+ 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272,
+ // Block 0x51, offset 0x1440
+ 0x1442: 0x0248,
+ 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e,
+ 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263,
+ 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e,
+ 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4,
+ 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248,
+ 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290,
+ 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f,
+ 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242,
+ 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272,
+ 0x147c: 0x0293, 0x147e: 0x02cc,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a,
+ 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e,
+ 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263,
+ 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e,
+ 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272,
+ 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251,
+ 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290,
+ 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f,
+ 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242,
+ 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8,
+ 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927,
+ 0x14d0: 0x1a8f, 0x14d1: 0x1a93,
+ 0x14d2: 0x1a97, 0x14d3: 0x1a9b, 0x14d4: 0x1a9f, 0x14d5: 0x1aa3, 0x14d6: 0x1aa7, 0x14d7: 0x1aab,
+ 0x14d8: 0x1aaf, 0x14d9: 0x1ab3, 0x14da: 0x1ab7, 0x14db: 0x1abb, 0x14dc: 0x1abf, 0x14dd: 0x1ac3,
+ 0x14de: 0x1ac7, 0x14df: 0x1acb, 0x14e0: 0x1acf, 0x14e1: 0x1ad3, 0x14e2: 0x1ad7, 0x14e3: 0x1adb,
+ 0x14e4: 0x1adf, 0x14e5: 0x1ae3, 0x14e6: 0x1ae7, 0x14e7: 0x1aeb, 0x14e8: 0x1aef, 0x14e9: 0x1af3,
+ 0x14ea: 0x2721, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b4,
+ 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d,
+ 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059,
+ 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x26b0, 0x1501: 0x26c5, 0x1502: 0x0503,
+ 0x1510: 0x0c0f, 0x1511: 0x0a47,
+ 0x1512: 0x08d3, 0x1513: 0x45c7, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff,
+ 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b,
+ 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b,
+ 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf,
+ 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b,
+ 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43,
+ 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757,
+ 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773,
+ 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3,
+ 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf,
+ 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff,
+ 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f,
+ 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867,
+ 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af,
+ 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93,
+ 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3,
+ 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f,
+ 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983,
+ 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf,
+ 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3,
+ 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef,
+ 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23,
+ 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37,
+ 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63,
+ 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f,
+ 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692,
+ 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb,
+ 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f,
+ 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6,
+ 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9,
+ 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83,
+ 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3,
+ 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf,
+ 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7,
+ 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f,
+ 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b,
+ 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87,
+ 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb,
+ 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7,
+ 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663,
+ 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd,
+ 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7,
+ 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b,
+ 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f,
+ 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7,
+ 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700,
+ 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23,
+ 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53,
+ 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714,
+ 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b,
+ 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719,
+ 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728,
+ 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37,
+ 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57,
+ 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737,
+ 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741,
+ 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff,
+ 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637,
+ 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f,
+ 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093,
+ 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782,
+ 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3,
+ 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7,
+ 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133,
+ 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa,
+ 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4,
+ 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7,
+ 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7,
+ 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b,
+ 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd,
+ 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f,
+ 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f,
+ 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273,
+ 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677,
+ 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7,
+ 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb,
+ 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5,
+ 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa,
+ 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b,
+ 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7,
+ 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665,
+ 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f,
+ 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477,
+ 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693,
+ 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb,
+ 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b,
+ 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567,
+ 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7,
+ 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7,
+ 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef,
+ 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868,
+}
+
+// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes
+// Block 0 is the zero block.
+var nfkcIndex = [1408]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04,
+ 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09,
+ 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62,
+ 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05,
+ 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a,
+ 0xf0: 0x13,
+ // Block 0x4, offset 0x100
+ 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d,
+ 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74,
+ 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a,
+ 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82,
+ // Block 0x5, offset 0x140
+ 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89,
+ 0x14d: 0x8a,
+ 0x15c: 0x8b, 0x15f: 0x8c,
+ 0x162: 0x8d, 0x164: 0x8e,
+ 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94,
+ 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12,
+ 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a,
+ // Block 0x6, offset 0x180
+ 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d,
+ 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0,
+ 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1,
+ 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4,
+ 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8,
+ 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28,
+ 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30,
+ // Block 0x8, offset 0x200
+ 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2,
+ 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8,
+ 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc,
+ 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd,
+ 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe,
+ // Block 0x9, offset 0x240
+ 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf,
+ 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0,
+ 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1,
+ 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2,
+ 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3,
+ 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd,
+ 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe,
+ 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf,
+ // Block 0xa, offset 0x280
+ 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0,
+ 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1,
+ 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2,
+ 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3,
+ 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd,
+ 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe,
+ 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf,
+ 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1,
+ 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2,
+ 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3,
+ 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4,
+ // Block 0xc, offset 0x300
+ 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34,
+ 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c,
+ 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44,
+ 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b,
+ // Block 0xd, offset 0x340
+ 0x347: 0xc6,
+ 0x34b: 0xc7, 0x34d: 0xc8,
+ 0x368: 0xc9, 0x36b: 0xca,
+ 0x374: 0xcb,
+ 0x37d: 0xcc,
+ // Block 0xe, offset 0x380
+ 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0,
+ 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4,
+ 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9,
+ 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc,
+ 0x3a0: 0xdd, 0x3a7: 0xde,
+ 0x3a8: 0xdf, 0x3a9: 0xe0, 0x3aa: 0xe1,
+ 0x3b0: 0xda, 0x3b5: 0xe2, 0x3b6: 0xe3,
+ // Block 0xf, offset 0x3c0
+ 0x3eb: 0xe4, 0x3ec: 0xe5,
+ // Block 0x10, offset 0x400
+ 0x432: 0xe6,
+ // Block 0x11, offset 0x440
+ 0x445: 0xe7, 0x446: 0xe8, 0x447: 0xe9,
+ 0x449: 0xea,
+ 0x450: 0xeb, 0x451: 0xec, 0x452: 0xed, 0x453: 0xee, 0x454: 0xef, 0x455: 0xf0, 0x456: 0xf1, 0x457: 0xf2,
+ 0x458: 0xf3, 0x459: 0xf4, 0x45a: 0x4c, 0x45b: 0xf5, 0x45c: 0xf6, 0x45d: 0xf7, 0x45e: 0xf8, 0x45f: 0x4d,
+ // Block 0x12, offset 0x480
+ 0x480: 0xf9, 0x484: 0xe5,
+ 0x48b: 0xfa,
+ 0x4a3: 0xfb, 0x4a5: 0xfc,
+ 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50,
+ // Block 0x13, offset 0x4c0
+ 0x4c4: 0x51, 0x4c5: 0xfd, 0x4c6: 0xfe,
+ 0x4c8: 0x52, 0x4c9: 0xff,
+ // Block 0x14, offset 0x500
+ 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a,
+ 0x528: 0x5b,
+ // Block 0x15, offset 0x540
+ 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d,
+ 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11,
+ 0x56f: 0x12,
+}
+
+// nfkcSparseOffset: 164 entries, 328 bytes
+var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xdf, 0xe3, 0xe9, 0xfa, 0x106, 0x108, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x118, 0x11a, 0x11c, 0x11f, 0x122, 0x124, 0x127, 0x12a, 0x12e, 0x133, 0x13c, 0x13e, 0x141, 0x143, 0x14e, 0x159, 0x167, 0x175, 0x185, 0x193, 0x19a, 0x1a0, 0x1af, 0x1b3, 0x1b5, 0x1b9, 0x1bb, 0x1be, 0x1c0, 0x1c3, 0x1c5, 0x1c8, 0x1ca, 0x1cc, 0x1ce, 0x1da, 0x1e4, 0x1ee, 0x1f1, 0x1f5, 0x1f7, 0x1f9, 0x1fb, 0x1fd, 0x200, 0x202, 0x204, 0x206, 0x208, 0x20e, 0x211, 0x215, 0x217, 0x21e, 0x224, 0x22a, 0x232, 0x238, 0x23e, 0x244, 0x248, 0x24a, 0x24c, 0x24e, 0x250, 0x256, 0x259, 0x25b, 0x261, 0x264, 0x26c, 0x273, 0x276, 0x279, 0x27b, 0x27e, 0x286, 0x28a, 0x291, 0x294, 0x29a, 0x29c, 0x29e, 0x2a1, 0x2a3, 0x2a6, 0x2a8, 0x2aa, 0x2ac, 0x2ae, 0x2b1, 0x2b3, 0x2b5, 0x2b7, 0x2b9, 0x2c6, 0x2d0, 0x2d2, 0x2d4, 0x2d8, 0x2dd, 0x2e9, 0x2ee, 0x2f7, 0x2fd, 0x302, 0x306, 0x30b, 0x30f, 0x31f, 0x32d, 0x33b, 0x349, 0x34f, 0x351, 0x353, 0x356, 0x361, 0x363}
+
+// nfkcSparseValues: 877 entries, 3508 bytes
+var nfkcSparseValues = [877]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0002, lo: 0x0d},
+ {value: 0x0001, lo: 0xa0, hi: 0xa0},
+ {value: 0x427b, lo: 0xa8, hi: 0xa8},
+ {value: 0x0083, lo: 0xaa, hi: 0xaa},
+ {value: 0x4267, lo: 0xaf, hi: 0xaf},
+ {value: 0x0025, lo: 0xb2, hi: 0xb3},
+ {value: 0x425d, lo: 0xb4, hi: 0xb4},
+ {value: 0x01dc, lo: 0xb5, hi: 0xb5},
+ {value: 0x4294, lo: 0xb8, hi: 0xb8},
+ {value: 0x0023, lo: 0xb9, hi: 0xb9},
+ {value: 0x009f, lo: 0xba, hi: 0xba},
+ {value: 0x221f, lo: 0xbc, hi: 0xbc},
+ {value: 0x2213, lo: 0xbd, hi: 0xbd},
+ {value: 0x22b5, lo: 0xbe, hi: 0xbe},
+ // Block 0x1, offset 0xe
+ {value: 0x0091, lo: 0x03},
+ {value: 0x46e5, lo: 0xa0, hi: 0xa1},
+ {value: 0x4717, lo: 0xaf, hi: 0xb0},
+ {value: 0xa000, lo: 0xb7, hi: 0xb7},
+ // Block 0x2, offset 0x12
+ {value: 0x0003, lo: 0x08},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x0091, lo: 0xb0, hi: 0xb0},
+ {value: 0x0119, lo: 0xb1, hi: 0xb1},
+ {value: 0x0095, lo: 0xb2, hi: 0xb2},
+ {value: 0x00a5, lo: 0xb3, hi: 0xb3},
+ {value: 0x0143, lo: 0xb4, hi: 0xb6},
+ {value: 0x00af, lo: 0xb7, hi: 0xb7},
+ {value: 0x00b3, lo: 0xb8, hi: 0xb8},
+ // Block 0x3, offset 0x1b
+ {value: 0x000a, lo: 0x09},
+ {value: 0x4271, lo: 0x98, hi: 0x98},
+ {value: 0x4276, lo: 0x99, hi: 0x9a},
+ {value: 0x4299, lo: 0x9b, hi: 0x9b},
+ {value: 0x4262, lo: 0x9c, hi: 0x9c},
+ {value: 0x4285, lo: 0x9d, hi: 0x9d},
+ {value: 0x0113, lo: 0xa0, hi: 0xa0},
+ {value: 0x0099, lo: 0xa1, hi: 0xa1},
+ {value: 0x00a7, lo: 0xa2, hi: 0xa3},
+ {value: 0x0167, lo: 0xa4, hi: 0xa4},
+ // Block 0x4, offset 0x25
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0xa000, lo: 0x8d, hi: 0x8d},
+ {value: 0x37a8, lo: 0x90, hi: 0x90},
+ {value: 0x37b4, lo: 0x91, hi: 0x91},
+ {value: 0x37a2, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x96, hi: 0x96},
+ {value: 0x381a, lo: 0x97, hi: 0x97},
+ {value: 0x37e4, lo: 0x9c, hi: 0x9c},
+ {value: 0x37cc, lo: 0x9d, hi: 0x9d},
+ {value: 0x37f6, lo: 0x9e, hi: 0x9e},
+ {value: 0xa000, lo: 0xb4, hi: 0xb5},
+ {value: 0x3820, lo: 0xb6, hi: 0xb6},
+ {value: 0x3826, lo: 0xb7, hi: 0xb7},
+ // Block 0x5, offset 0x35
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x83, hi: 0x87},
+ // Block 0x6, offset 0x37
+ {value: 0x0001, lo: 0x04},
+ {value: 0x8113, lo: 0x81, hi: 0x82},
+ {value: 0x8132, lo: 0x84, hi: 0x84},
+ {value: 0x812d, lo: 0x85, hi: 0x85},
+ {value: 0x810d, lo: 0x87, hi: 0x87},
+ // Block 0x7, offset 0x3c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x97},
+ {value: 0x8119, lo: 0x98, hi: 0x98},
+ {value: 0x811a, lo: 0x99, hi: 0x99},
+ {value: 0x811b, lo: 0x9a, hi: 0x9a},
+ {value: 0x3844, lo: 0xa2, hi: 0xa2},
+ {value: 0x384a, lo: 0xa3, hi: 0xa3},
+ {value: 0x3856, lo: 0xa4, hi: 0xa4},
+ {value: 0x3850, lo: 0xa5, hi: 0xa5},
+ {value: 0x385c, lo: 0xa6, hi: 0xa6},
+ {value: 0xa000, lo: 0xa7, hi: 0xa7},
+ // Block 0x8, offset 0x47
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x386e, lo: 0x80, hi: 0x80},
+ {value: 0xa000, lo: 0x81, hi: 0x81},
+ {value: 0x3862, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x3868, lo: 0x93, hi: 0x93},
+ {value: 0xa000, lo: 0x95, hi: 0x95},
+ {value: 0x8132, lo: 0x96, hi: 0x9c},
+ {value: 0x8132, lo: 0x9f, hi: 0xa2},
+ {value: 0x812d, lo: 0xa3, hi: 0xa3},
+ {value: 0x8132, lo: 0xa4, hi: 0xa4},
+ {value: 0x8132, lo: 0xa7, hi: 0xa8},
+ {value: 0x812d, lo: 0xaa, hi: 0xaa},
+ {value: 0x8132, lo: 0xab, hi: 0xac},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ // Block 0x9, offset 0x56
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x811f, lo: 0x91, hi: 0x91},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x812d, lo: 0xb1, hi: 0xb1},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb5, hi: 0xb6},
+ {value: 0x812d, lo: 0xb7, hi: 0xb9},
+ {value: 0x8132, lo: 0xba, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbc},
+ {value: 0x8132, lo: 0xbd, hi: 0xbd},
+ {value: 0x812d, lo: 0xbe, hi: 0xbe},
+ {value: 0x8132, lo: 0xbf, hi: 0xbf},
+ // Block 0xa, offset 0x63
+ {value: 0x0005, lo: 0x07},
+ {value: 0x8132, lo: 0x80, hi: 0x80},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x812d, lo: 0x82, hi: 0x83},
+ {value: 0x812d, lo: 0x84, hi: 0x85},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x812d, lo: 0x88, hi: 0x89},
+ {value: 0x8132, lo: 0x8a, hi: 0x8a},
+ // Block 0xb, offset 0x6b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xab, hi: 0xb1},
+ {value: 0x812d, lo: 0xb2, hi: 0xb2},
+ {value: 0x8132, lo: 0xb3, hi: 0xb3},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0xc, offset 0x70
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0x96, hi: 0x99},
+ {value: 0x8132, lo: 0x9b, hi: 0xa3},
+ {value: 0x8132, lo: 0xa5, hi: 0xa7},
+ {value: 0x8132, lo: 0xa9, hi: 0xad},
+ // Block 0xd, offset 0x75
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x99, hi: 0x9b},
+ // Block 0xe, offset 0x77
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0xa8, hi: 0xa8},
+ {value: 0x3edb, lo: 0xa9, hi: 0xa9},
+ {value: 0xa000, lo: 0xb0, hi: 0xb0},
+ {value: 0x3ee3, lo: 0xb1, hi: 0xb1},
+ {value: 0xa000, lo: 0xb3, hi: 0xb3},
+ {value: 0x3eeb, lo: 0xb4, hi: 0xb4},
+ {value: 0x9902, lo: 0xbc, hi: 0xbc},
+ // Block 0xf, offset 0x7f
+ {value: 0x0008, lo: 0x06},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x91, hi: 0x91},
+ {value: 0x812d, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x93, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x94},
+ {value: 0x451f, lo: 0x98, hi: 0x9f},
+ // Block 0x10, offset 0x86
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x11, offset 0x89
+ {value: 0x0008, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2ca1, lo: 0x8b, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x455f, lo: 0x9c, hi: 0x9d},
+ {value: 0x456f, lo: 0x9f, hi: 0x9f},
+ {value: 0x8132, lo: 0xbe, hi: 0xbe},
+ // Block 0x12, offset 0x91
+ {value: 0x0000, lo: 0x03},
+ {value: 0x4597, lo: 0xb3, hi: 0xb3},
+ {value: 0x459f, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x13, offset 0x95
+ {value: 0x0008, lo: 0x03},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x4577, lo: 0x99, hi: 0x9b},
+ {value: 0x458f, lo: 0x9e, hi: 0x9e},
+ // Block 0x14, offset 0x99
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ // Block 0x15, offset 0x9b
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ // Block 0x16, offset 0x9d
+ {value: 0x0000, lo: 0x08},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2cb9, lo: 0x88, hi: 0x88},
+ {value: 0x2cb1, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cc1, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x96, hi: 0x97},
+ {value: 0x45a7, lo: 0x9c, hi: 0x9c},
+ {value: 0x45af, lo: 0x9d, hi: 0x9d},
+ // Block 0x17, offset 0xa6
+ {value: 0x0000, lo: 0x03},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0x2cc9, lo: 0x94, hi: 0x94},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x18, offset 0xaa
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2cd1, lo: 0x8a, hi: 0x8a},
+ {value: 0x2ce1, lo: 0x8b, hi: 0x8b},
+ {value: 0x2cd9, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x19, offset 0xb1
+ {value: 0x1801, lo: 0x04},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x3ef3, lo: 0x88, hi: 0x88},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x8120, lo: 0x95, hi: 0x96},
+ // Block 0x1a, offset 0xb6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbc, hi: 0xbc},
+ {value: 0xa000, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0xb9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x2ce9, lo: 0x80, hi: 0x80},
+ {value: 0x9900, lo: 0x82, hi: 0x82},
+ {value: 0xa000, lo: 0x86, hi: 0x86},
+ {value: 0x2cf1, lo: 0x87, hi: 0x87},
+ {value: 0x2cf9, lo: 0x88, hi: 0x88},
+ {value: 0x2f53, lo: 0x8a, hi: 0x8a},
+ {value: 0x2ddb, lo: 0x8b, hi: 0x8b},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x95, hi: 0x96},
+ // Block 0x1c, offset 0xc3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x1d, offset 0xc6
+ {value: 0x0000, lo: 0x06},
+ {value: 0xa000, lo: 0x86, hi: 0x87},
+ {value: 0x2d01, lo: 0x8a, hi: 0x8a},
+ {value: 0x2d11, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d09, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ // Block 0x1e, offset 0xcd
+ {value: 0x6be7, lo: 0x07},
+ {value: 0x9904, lo: 0x8a, hi: 0x8a},
+ {value: 0x9900, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x3efb, lo: 0x9a, hi: 0x9a},
+ {value: 0x2f5b, lo: 0x9c, hi: 0x9c},
+ {value: 0x2de6, lo: 0x9d, hi: 0x9d},
+ {value: 0x2d19, lo: 0x9e, hi: 0x9f},
+ // Block 0x1f, offset 0xd5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x2624, lo: 0xb3, hi: 0xb3},
+ {value: 0x8122, lo: 0xb8, hi: 0xb9},
+ {value: 0x8104, lo: 0xba, hi: 0xba},
+ // Block 0x20, offset 0xd9
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8123, lo: 0x88, hi: 0x8b},
+ // Block 0x21, offset 0xdb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x2639, lo: 0xb3, hi: 0xb3},
+ {value: 0x8124, lo: 0xb8, hi: 0xb9},
+ {value: 0x8104, lo: 0xba, hi: 0xba},
+ // Block 0x22, offset 0xdf
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8125, lo: 0x88, hi: 0x8b},
+ {value: 0x262b, lo: 0x9c, hi: 0x9c},
+ {value: 0x2632, lo: 0x9d, hi: 0x9d},
+ // Block 0x23, offset 0xe3
+ {value: 0x0000, lo: 0x05},
+ {value: 0x030b, lo: 0x8c, hi: 0x8c},
+ {value: 0x812d, lo: 0x98, hi: 0x99},
+ {value: 0x812d, lo: 0xb5, hi: 0xb5},
+ {value: 0x812d, lo: 0xb7, hi: 0xb7},
+ {value: 0x812b, lo: 0xb9, hi: 0xb9},
+ // Block 0x24, offset 0xe9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x2647, lo: 0x83, hi: 0x83},
+ {value: 0x264e, lo: 0x8d, hi: 0x8d},
+ {value: 0x2655, lo: 0x92, hi: 0x92},
+ {value: 0x265c, lo: 0x97, hi: 0x97},
+ {value: 0x2663, lo: 0x9c, hi: 0x9c},
+ {value: 0x2640, lo: 0xa9, hi: 0xa9},
+ {value: 0x8126, lo: 0xb1, hi: 0xb1},
+ {value: 0x8127, lo: 0xb2, hi: 0xb2},
+ {value: 0x4a87, lo: 0xb3, hi: 0xb3},
+ {value: 0x8128, lo: 0xb4, hi: 0xb4},
+ {value: 0x4a90, lo: 0xb5, hi: 0xb5},
+ {value: 0x45b7, lo: 0xb6, hi: 0xb6},
+ {value: 0x45f7, lo: 0xb7, hi: 0xb7},
+ {value: 0x45bf, lo: 0xb8, hi: 0xb8},
+ {value: 0x4602, lo: 0xb9, hi: 0xb9},
+ {value: 0x8127, lo: 0xba, hi: 0xbd},
+ // Block 0x25, offset 0xfa
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x8127, lo: 0x80, hi: 0x80},
+ {value: 0x4a99, lo: 0x81, hi: 0x81},
+ {value: 0x8132, lo: 0x82, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0x86, hi: 0x87},
+ {value: 0x2671, lo: 0x93, hi: 0x93},
+ {value: 0x2678, lo: 0x9d, hi: 0x9d},
+ {value: 0x267f, lo: 0xa2, hi: 0xa2},
+ {value: 0x2686, lo: 0xa7, hi: 0xa7},
+ {value: 0x268d, lo: 0xac, hi: 0xac},
+ {value: 0x266a, lo: 0xb9, hi: 0xb9},
+ // Block 0x26, offset 0x106
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x86, hi: 0x86},
+ // Block 0x27, offset 0x108
+ {value: 0x0000, lo: 0x05},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x2d21, lo: 0xa6, hi: 0xa6},
+ {value: 0x9900, lo: 0xae, hi: 0xae},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x28, offset 0x10e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ // Block 0x29, offset 0x110
+ {value: 0x0000, lo: 0x01},
+ {value: 0x030f, lo: 0xbc, hi: 0xbc},
+ // Block 0x2a, offset 0x112
+ {value: 0x0000, lo: 0x01},
+ {value: 0xa000, lo: 0x80, hi: 0x92},
+ // Block 0x2b, offset 0x114
+ {value: 0x0000, lo: 0x01},
+ {value: 0xb900, lo: 0xa1, hi: 0xb5},
+ // Block 0x2c, offset 0x116
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0xa8, hi: 0xbf},
+ // Block 0x2d, offset 0x118
+ {value: 0x0000, lo: 0x01},
+ {value: 0x9900, lo: 0x80, hi: 0x82},
+ // Block 0x2e, offset 0x11a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x9d, hi: 0x9f},
+ // Block 0x2f, offset 0x11c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x94, hi: 0x94},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x30, offset 0x11f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x92, hi: 0x92},
+ {value: 0x8132, lo: 0x9d, hi: 0x9d},
+ // Block 0x31, offset 0x122
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8131, lo: 0xa9, hi: 0xa9},
+ // Block 0x32, offset 0x124
+ {value: 0x0004, lo: 0x02},
+ {value: 0x812e, lo: 0xb9, hi: 0xba},
+ {value: 0x812d, lo: 0xbb, hi: 0xbb},
+ // Block 0x33, offset 0x127
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x97, hi: 0x97},
+ {value: 0x812d, lo: 0x98, hi: 0x98},
+ // Block 0x34, offset 0x12a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0xa0, hi: 0xa0},
+ {value: 0x8132, lo: 0xb5, hi: 0xbc},
+ {value: 0x812d, lo: 0xbf, hi: 0xbf},
+ // Block 0x35, offset 0x12e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ {value: 0x812d, lo: 0xb5, hi: 0xba},
+ {value: 0x8132, lo: 0xbb, hi: 0xbc},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x36, offset 0x133
+ {value: 0x0000, lo: 0x08},
+ {value: 0x2d69, lo: 0x80, hi: 0x80},
+ {value: 0x2d71, lo: 0x81, hi: 0x81},
+ {value: 0xa000, lo: 0x82, hi: 0x82},
+ {value: 0x2d79, lo: 0x83, hi: 0x83},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xab, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xac},
+ {value: 0x8132, lo: 0xad, hi: 0xb3},
+ // Block 0x37, offset 0x13c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xaa, hi: 0xab},
+ // Block 0x38, offset 0x13e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xa6, hi: 0xa6},
+ {value: 0x8104, lo: 0xb2, hi: 0xb3},
+ // Block 0x39, offset 0x141
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x3a, offset 0x143
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x8132, lo: 0x90, hi: 0x92},
+ {value: 0x8101, lo: 0x94, hi: 0x94},
+ {value: 0x812d, lo: 0x95, hi: 0x99},
+ {value: 0x8132, lo: 0x9a, hi: 0x9b},
+ {value: 0x812d, lo: 0x9c, hi: 0x9f},
+ {value: 0x8132, lo: 0xa0, hi: 0xa0},
+ {value: 0x8101, lo: 0xa2, hi: 0xa8},
+ {value: 0x812d, lo: 0xad, hi: 0xad},
+ {value: 0x8132, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb8, hi: 0xb9},
+ // Block 0x3b, offset 0x14e
+ {value: 0x0002, lo: 0x0a},
+ {value: 0x0043, lo: 0xac, hi: 0xac},
+ {value: 0x00d1, lo: 0xad, hi: 0xad},
+ {value: 0x0045, lo: 0xae, hi: 0xae},
+ {value: 0x0049, lo: 0xb0, hi: 0xb1},
+ {value: 0x00e6, lo: 0xb2, hi: 0xb2},
+ {value: 0x004f, lo: 0xb3, hi: 0xba},
+ {value: 0x005f, lo: 0xbc, hi: 0xbc},
+ {value: 0x00ef, lo: 0xbd, hi: 0xbd},
+ {value: 0x0061, lo: 0xbe, hi: 0xbe},
+ {value: 0x0065, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x159
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0001, lo: 0x80, hi: 0x8a},
+ {value: 0x043b, lo: 0x91, hi: 0x91},
+ {value: 0x429e, lo: 0x97, hi: 0x97},
+ {value: 0x001d, lo: 0xa4, hi: 0xa4},
+ {value: 0x1873, lo: 0xa5, hi: 0xa5},
+ {value: 0x1b5f, lo: 0xa6, hi: 0xa6},
+ {value: 0x0001, lo: 0xaf, hi: 0xaf},
+ {value: 0x2694, lo: 0xb3, hi: 0xb3},
+ {value: 0x2801, lo: 0xb4, hi: 0xb4},
+ {value: 0x269b, lo: 0xb6, hi: 0xb6},
+ {value: 0x280b, lo: 0xb7, hi: 0xb7},
+ {value: 0x186d, lo: 0xbc, hi: 0xbc},
+ {value: 0x426c, lo: 0xbe, hi: 0xbe},
+ // Block 0x3d, offset 0x167
+ {value: 0x0002, lo: 0x0d},
+ {value: 0x1933, lo: 0x87, hi: 0x87},
+ {value: 0x1930, lo: 0x88, hi: 0x88},
+ {value: 0x1870, lo: 0x89, hi: 0x89},
+ {value: 0x2991, lo: 0x97, hi: 0x97},
+ {value: 0x0001, lo: 0x9f, hi: 0x9f},
+ {value: 0x0021, lo: 0xb0, hi: 0xb0},
+ {value: 0x0093, lo: 0xb1, hi: 0xb1},
+ {value: 0x0029, lo: 0xb4, hi: 0xb9},
+ {value: 0x0017, lo: 0xba, hi: 0xba},
+ {value: 0x0467, lo: 0xbb, hi: 0xbb},
+ {value: 0x003b, lo: 0xbc, hi: 0xbc},
+ {value: 0x0011, lo: 0xbd, hi: 0xbe},
+ {value: 0x009d, lo: 0xbf, hi: 0xbf},
+ // Block 0x3e, offset 0x175
+ {value: 0x0002, lo: 0x0f},
+ {value: 0x0021, lo: 0x80, hi: 0x89},
+ {value: 0x0017, lo: 0x8a, hi: 0x8a},
+ {value: 0x0467, lo: 0x8b, hi: 0x8b},
+ {value: 0x003b, lo: 0x8c, hi: 0x8c},
+ {value: 0x0011, lo: 0x8d, hi: 0x8e},
+ {value: 0x0083, lo: 0x90, hi: 0x90},
+ {value: 0x008b, lo: 0x91, hi: 0x91},
+ {value: 0x009f, lo: 0x92, hi: 0x92},
+ {value: 0x00b1, lo: 0x93, hi: 0x93},
+ {value: 0x0104, lo: 0x94, hi: 0x94},
+ {value: 0x0091, lo: 0x95, hi: 0x95},
+ {value: 0x0097, lo: 0x96, hi: 0x99},
+ {value: 0x00a1, lo: 0x9a, hi: 0x9a},
+ {value: 0x00a7, lo: 0x9b, hi: 0x9c},
+ {value: 0x199c, lo: 0xa8, hi: 0xa8},
+ // Block 0x3f, offset 0x185
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x8132, lo: 0x90, hi: 0x91},
+ {value: 0x8101, lo: 0x92, hi: 0x93},
+ {value: 0x8132, lo: 0x94, hi: 0x97},
+ {value: 0x8101, lo: 0x98, hi: 0x9a},
+ {value: 0x8132, lo: 0x9b, hi: 0x9c},
+ {value: 0x8132, lo: 0xa1, hi: 0xa1},
+ {value: 0x8101, lo: 0xa5, hi: 0xa6},
+ {value: 0x8132, lo: 0xa7, hi: 0xa7},
+ {value: 0x812d, lo: 0xa8, hi: 0xa8},
+ {value: 0x8132, lo: 0xa9, hi: 0xa9},
+ {value: 0x8101, lo: 0xaa, hi: 0xab},
+ {value: 0x812d, lo: 0xac, hi: 0xaf},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ // Block 0x40, offset 0x193
+ {value: 0x0007, lo: 0x06},
+ {value: 0x2183, lo: 0x89, hi: 0x89},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ {value: 0x3bbc, lo: 0x9a, hi: 0x9b},
+ {value: 0x3bca, lo: 0xae, hi: 0xae},
+ // Block 0x41, offset 0x19a
+ {value: 0x000e, lo: 0x05},
+ {value: 0x3bd1, lo: 0x8d, hi: 0x8e},
+ {value: 0x3bd8, lo: 0x8f, hi: 0x8f},
+ {value: 0xa000, lo: 0x90, hi: 0x90},
+ {value: 0xa000, lo: 0x92, hi: 0x92},
+ {value: 0xa000, lo: 0x94, hi: 0x94},
+ // Block 0x42, offset 0x1a0
+ {value: 0x0173, lo: 0x0e},
+ {value: 0xa000, lo: 0x83, hi: 0x83},
+ {value: 0x3be6, lo: 0x84, hi: 0x84},
+ {value: 0xa000, lo: 0x88, hi: 0x88},
+ {value: 0x3bed, lo: 0x89, hi: 0x89},
+ {value: 0xa000, lo: 0x8b, hi: 0x8b},
+ {value: 0x3bf4, lo: 0x8c, hi: 0x8c},
+ {value: 0xa000, lo: 0xa3, hi: 0xa3},
+ {value: 0x3bfb, lo: 0xa4, hi: 0xa4},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x3c02, lo: 0xa6, hi: 0xa6},
+ {value: 0x26a2, lo: 0xac, hi: 0xad},
+ {value: 0x26a9, lo: 0xaf, hi: 0xaf},
+ {value: 0x281f, lo: 0xb0, hi: 0xb0},
+ {value: 0xa000, lo: 0xbc, hi: 0xbc},
+ // Block 0x43, offset 0x1af
+ {value: 0x0007, lo: 0x03},
+ {value: 0x3c6b, lo: 0xa0, hi: 0xa1},
+ {value: 0x3c95, lo: 0xa2, hi: 0xa3},
+ {value: 0x3cbf, lo: 0xaa, hi: 0xad},
+ // Block 0x44, offset 0x1b3
+ {value: 0x0004, lo: 0x01},
+ {value: 0x048b, lo: 0xa9, hi: 0xaa},
+ // Block 0x45, offset 0x1b5
+ {value: 0x0002, lo: 0x03},
+ {value: 0x0057, lo: 0x80, hi: 0x8f},
+ {value: 0x0083, lo: 0x90, hi: 0xa9},
+ {value: 0x0021, lo: 0xaa, hi: 0xaa},
+ // Block 0x46, offset 0x1b9
+ {value: 0x0000, lo: 0x01},
+ {value: 0x299e, lo: 0x8c, hi: 0x8c},
+ // Block 0x47, offset 0x1bb
+ {value: 0x0266, lo: 0x02},
+ {value: 0x1b8f, lo: 0xb4, hi: 0xb4},
+ {value: 0x192d, lo: 0xb5, hi: 0xb6},
+ // Block 0x48, offset 0x1be
+ {value: 0x0000, lo: 0x01},
+ {value: 0x44e0, lo: 0x9c, hi: 0x9c},
+ // Block 0x49, offset 0x1c0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0095, lo: 0xbc, hi: 0xbc},
+ {value: 0x006d, lo: 0xbd, hi: 0xbd},
+ // Block 0x4a, offset 0x1c3
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xaf, hi: 0xb1},
+ // Block 0x4b, offset 0x1c5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x047f, lo: 0xaf, hi: 0xaf},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x4c, offset 0x1c8
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa0, hi: 0xbf},
+ // Block 0x4d, offset 0x1ca
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0dc3, lo: 0x9f, hi: 0x9f},
+ // Block 0x4e, offset 0x1cc
+ {value: 0x0000, lo: 0x01},
+ {value: 0x162f, lo: 0xb3, hi: 0xb3},
+ // Block 0x4f, offset 0x1ce
+ {value: 0x0004, lo: 0x0b},
+ {value: 0x1597, lo: 0x80, hi: 0x82},
+ {value: 0x15af, lo: 0x83, hi: 0x83},
+ {value: 0x15c7, lo: 0x84, hi: 0x85},
+ {value: 0x15d7, lo: 0x86, hi: 0x89},
+ {value: 0x15eb, lo: 0x8a, hi: 0x8c},
+ {value: 0x15ff, lo: 0x8d, hi: 0x8d},
+ {value: 0x1607, lo: 0x8e, hi: 0x8e},
+ {value: 0x160f, lo: 0x8f, hi: 0x90},
+ {value: 0x161b, lo: 0x91, hi: 0x93},
+ {value: 0x162b, lo: 0x94, hi: 0x94},
+ {value: 0x1633, lo: 0x95, hi: 0x95},
+ // Block 0x50, offset 0x1da
+ {value: 0x0004, lo: 0x09},
+ {value: 0x0001, lo: 0x80, hi: 0x80},
+ {value: 0x812c, lo: 0xaa, hi: 0xaa},
+ {value: 0x8131, lo: 0xab, hi: 0xab},
+ {value: 0x8133, lo: 0xac, hi: 0xac},
+ {value: 0x812e, lo: 0xad, hi: 0xad},
+ {value: 0x812f, lo: 0xae, hi: 0xae},
+ {value: 0x812f, lo: 0xaf, hi: 0xaf},
+ {value: 0x04b3, lo: 0xb6, hi: 0xb6},
+ {value: 0x0887, lo: 0xb8, hi: 0xba},
+ // Block 0x51, offset 0x1e4
+ {value: 0x0006, lo: 0x09},
+ {value: 0x0313, lo: 0xb1, hi: 0xb1},
+ {value: 0x0317, lo: 0xb2, hi: 0xb2},
+ {value: 0x4a3e, lo: 0xb3, hi: 0xb3},
+ {value: 0x031b, lo: 0xb4, hi: 0xb4},
+ {value: 0x4a44, lo: 0xb5, hi: 0xb6},
+ {value: 0x031f, lo: 0xb7, hi: 0xb7},
+ {value: 0x0323, lo: 0xb8, hi: 0xb8},
+ {value: 0x0327, lo: 0xb9, hi: 0xb9},
+ {value: 0x4a50, lo: 0xba, hi: 0xbf},
+ // Block 0x52, offset 0x1ee
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xaf, hi: 0xaf},
+ {value: 0x8132, lo: 0xb4, hi: 0xbd},
+ // Block 0x53, offset 0x1f1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x020f, lo: 0x9c, hi: 0x9c},
+ {value: 0x0212, lo: 0x9d, hi: 0x9d},
+ {value: 0x8132, lo: 0x9e, hi: 0x9f},
+ // Block 0x54, offset 0x1f5
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb1},
+ // Block 0x55, offset 0x1f7
+ {value: 0x0000, lo: 0x01},
+ {value: 0x163b, lo: 0xb0, hi: 0xb0},
+ // Block 0x56, offset 0x1f9
+ {value: 0x000c, lo: 0x01},
+ {value: 0x00d7, lo: 0xb8, hi: 0xb9},
+ // Block 0x57, offset 0x1fb
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ // Block 0x58, offset 0x1fd
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x84, hi: 0x84},
+ {value: 0x8132, lo: 0xa0, hi: 0xb1},
+ // Block 0x59, offset 0x200
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xab, hi: 0xad},
+ // Block 0x5a, offset 0x202
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x93, hi: 0x93},
+ // Block 0x5b, offset 0x204
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0xb3, hi: 0xb3},
+ // Block 0x5c, offset 0x206
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ // Block 0x5d, offset 0x208
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0xb0, hi: 0xb0},
+ {value: 0x8132, lo: 0xb2, hi: 0xb3},
+ {value: 0x812d, lo: 0xb4, hi: 0xb4},
+ {value: 0x8132, lo: 0xb7, hi: 0xb8},
+ {value: 0x8132, lo: 0xbe, hi: 0xbf},
+ // Block 0x5e, offset 0x20e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x81, hi: 0x81},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ // Block 0x5f, offset 0x211
+ {value: 0x0008, lo: 0x03},
+ {value: 0x1637, lo: 0x9c, hi: 0x9d},
+ {value: 0x0125, lo: 0x9e, hi: 0x9e},
+ {value: 0x1643, lo: 0x9f, hi: 0x9f},
+ // Block 0x60, offset 0x215
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xad, hi: 0xad},
+ // Block 0x61, offset 0x217
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe500, lo: 0x80, hi: 0x80},
+ {value: 0xc600, lo: 0x81, hi: 0x9b},
+ {value: 0xe500, lo: 0x9c, hi: 0x9c},
+ {value: 0xc600, lo: 0x9d, hi: 0xb7},
+ {value: 0xe500, lo: 0xb8, hi: 0xb8},
+ {value: 0xc600, lo: 0xb9, hi: 0xbf},
+ // Block 0x62, offset 0x21e
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x93},
+ {value: 0xe500, lo: 0x94, hi: 0x94},
+ {value: 0xc600, lo: 0x95, hi: 0xaf},
+ {value: 0xe500, lo: 0xb0, hi: 0xb0},
+ {value: 0xc600, lo: 0xb1, hi: 0xbf},
+ // Block 0x63, offset 0x224
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8b},
+ {value: 0xe500, lo: 0x8c, hi: 0x8c},
+ {value: 0xc600, lo: 0x8d, hi: 0xa7},
+ {value: 0xe500, lo: 0xa8, hi: 0xa8},
+ {value: 0xc600, lo: 0xa9, hi: 0xbf},
+ // Block 0x64, offset 0x22a
+ {value: 0x0000, lo: 0x07},
+ {value: 0xc600, lo: 0x80, hi: 0x83},
+ {value: 0xe500, lo: 0x84, hi: 0x84},
+ {value: 0xc600, lo: 0x85, hi: 0x9f},
+ {value: 0xe500, lo: 0xa0, hi: 0xa0},
+ {value: 0xc600, lo: 0xa1, hi: 0xbb},
+ {value: 0xe500, lo: 0xbc, hi: 0xbc},
+ {value: 0xc600, lo: 0xbd, hi: 0xbf},
+ // Block 0x65, offset 0x232
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x97},
+ {value: 0xe500, lo: 0x98, hi: 0x98},
+ {value: 0xc600, lo: 0x99, hi: 0xb3},
+ {value: 0xe500, lo: 0xb4, hi: 0xb4},
+ {value: 0xc600, lo: 0xb5, hi: 0xbf},
+ // Block 0x66, offset 0x238
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x8f},
+ {value: 0xe500, lo: 0x90, hi: 0x90},
+ {value: 0xc600, lo: 0x91, hi: 0xab},
+ {value: 0xe500, lo: 0xac, hi: 0xac},
+ {value: 0xc600, lo: 0xad, hi: 0xbf},
+ // Block 0x67, offset 0x23e
+ {value: 0x0000, lo: 0x05},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ {value: 0xe500, lo: 0xa4, hi: 0xa4},
+ {value: 0xc600, lo: 0xa5, hi: 0xbf},
+ // Block 0x68, offset 0x244
+ {value: 0x0000, lo: 0x03},
+ {value: 0xc600, lo: 0x80, hi: 0x87},
+ {value: 0xe500, lo: 0x88, hi: 0x88},
+ {value: 0xc600, lo: 0x89, hi: 0xa3},
+ // Block 0x69, offset 0x248
+ {value: 0x0002, lo: 0x01},
+ {value: 0x0003, lo: 0x81, hi: 0xbf},
+ // Block 0x6a, offset 0x24a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xbd, hi: 0xbd},
+ // Block 0x6b, offset 0x24c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0xa0, hi: 0xa0},
+ // Block 0x6c, offset 0x24e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb6, hi: 0xba},
+ // Block 0x6d, offset 0x250
+ {value: 0x002c, lo: 0x05},
+ {value: 0x812d, lo: 0x8d, hi: 0x8d},
+ {value: 0x8132, lo: 0x8f, hi: 0x8f},
+ {value: 0x8132, lo: 0xb8, hi: 0xb8},
+ {value: 0x8101, lo: 0xb9, hi: 0xba},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x6e, offset 0x256
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0xa5, hi: 0xa5},
+ {value: 0x812d, lo: 0xa6, hi: 0xa6},
+ // Block 0x6f, offset 0x259
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xa4, hi: 0xa7},
+ // Block 0x70, offset 0x25b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x812d, lo: 0x86, hi: 0x87},
+ {value: 0x8132, lo: 0x88, hi: 0x8a},
+ {value: 0x812d, lo: 0x8b, hi: 0x8b},
+ {value: 0x8132, lo: 0x8c, hi: 0x8c},
+ {value: 0x812d, lo: 0x8d, hi: 0x90},
+ // Block 0x71, offset 0x261
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x86, hi: 0x86},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x72, offset 0x264
+ {value: 0x17fe, lo: 0x07},
+ {value: 0xa000, lo: 0x99, hi: 0x99},
+ {value: 0x423b, lo: 0x9a, hi: 0x9a},
+ {value: 0xa000, lo: 0x9b, hi: 0x9b},
+ {value: 0x4245, lo: 0x9c, hi: 0x9c},
+ {value: 0xa000, lo: 0xa5, hi: 0xa5},
+ {value: 0x424f, lo: 0xab, hi: 0xab},
+ {value: 0x8104, lo: 0xb9, hi: 0xba},
+ // Block 0x73, offset 0x26c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x8132, lo: 0x80, hi: 0x82},
+ {value: 0x9900, lo: 0xa7, hi: 0xa7},
+ {value: 0x2d81, lo: 0xae, hi: 0xae},
+ {value: 0x2d8b, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb1, hi: 0xb2},
+ {value: 0x8104, lo: 0xb3, hi: 0xb4},
+ // Block 0x74, offset 0x273
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x80, hi: 0x80},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0x75, offset 0x276
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb5, hi: 0xb5},
+ {value: 0x8102, lo: 0xb6, hi: 0xb6},
+ // Block 0x76, offset 0x279
+ {value: 0x0002, lo: 0x01},
+ {value: 0x8102, lo: 0xa9, hi: 0xaa},
+ // Block 0x77, offset 0x27b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0xbb, hi: 0xbc},
+ {value: 0x9900, lo: 0xbe, hi: 0xbe},
+ // Block 0x78, offset 0x27e
+ {value: 0x0000, lo: 0x07},
+ {value: 0xa000, lo: 0x87, hi: 0x87},
+ {value: 0x2d95, lo: 0x8b, hi: 0x8b},
+ {value: 0x2d9f, lo: 0x8c, hi: 0x8c},
+ {value: 0x8104, lo: 0x8d, hi: 0x8d},
+ {value: 0x9900, lo: 0x97, hi: 0x97},
+ {value: 0x8132, lo: 0xa6, hi: 0xac},
+ {value: 0x8132, lo: 0xb0, hi: 0xb4},
+ // Block 0x79, offset 0x286
+ {value: 0x0000, lo: 0x03},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x86, hi: 0x86},
+ {value: 0x8132, lo: 0x9e, hi: 0x9e},
+ // Block 0x7a, offset 0x28a
+ {value: 0x6b57, lo: 0x06},
+ {value: 0x9900, lo: 0xb0, hi: 0xb0},
+ {value: 0xa000, lo: 0xb9, hi: 0xb9},
+ {value: 0x9900, lo: 0xba, hi: 0xba},
+ {value: 0x2db3, lo: 0xbb, hi: 0xbb},
+ {value: 0x2da9, lo: 0xbc, hi: 0xbd},
+ {value: 0x2dbd, lo: 0xbe, hi: 0xbe},
+ // Block 0x7b, offset 0x291
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0x82, hi: 0x82},
+ {value: 0x8102, lo: 0x83, hi: 0x83},
+ // Block 0x7c, offset 0x294
+ {value: 0x0000, lo: 0x05},
+ {value: 0x9900, lo: 0xaf, hi: 0xaf},
+ {value: 0xa000, lo: 0xb8, hi: 0xb9},
+ {value: 0x2dc7, lo: 0xba, hi: 0xba},
+ {value: 0x2dd1, lo: 0xbb, hi: 0xbb},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x7d, offset 0x29a
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8102, lo: 0x80, hi: 0x80},
+ // Block 0x7e, offset 0x29c
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xbf, hi: 0xbf},
+ // Block 0x7f, offset 0x29e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb6, hi: 0xb6},
+ {value: 0x8102, lo: 0xb7, hi: 0xb7},
+ // Block 0x80, offset 0x2a1
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xab, hi: 0xab},
+ // Block 0x81, offset 0x2a3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8104, lo: 0xb9, hi: 0xb9},
+ {value: 0x8102, lo: 0xba, hi: 0xba},
+ // Block 0x82, offset 0x2a6
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xa0, hi: 0xa0},
+ // Block 0x83, offset 0x2a8
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0xb4, hi: 0xb4},
+ // Block 0x84, offset 0x2aa
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x87, hi: 0x87},
+ // Block 0x85, offset 0x2ac
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x99, hi: 0x99},
+ // Block 0x86, offset 0x2ae
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8102, lo: 0x82, hi: 0x82},
+ {value: 0x8104, lo: 0x84, hi: 0x85},
+ // Block 0x87, offset 0x2b1
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8104, lo: 0x97, hi: 0x97},
+ // Block 0x88, offset 0x2b3
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0xb0, hi: 0xb4},
+ // Block 0x89, offset 0x2b5
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xb0, hi: 0xb6},
+ // Block 0x8a, offset 0x2b7
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8101, lo: 0x9e, hi: 0x9e},
+ // Block 0x8b, offset 0x2b9
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x45cf, lo: 0x9e, hi: 0x9e},
+ {value: 0x45d9, lo: 0x9f, hi: 0x9f},
+ {value: 0x460d, lo: 0xa0, hi: 0xa0},
+ {value: 0x461b, lo: 0xa1, hi: 0xa1},
+ {value: 0x4629, lo: 0xa2, hi: 0xa2},
+ {value: 0x4637, lo: 0xa3, hi: 0xa3},
+ {value: 0x4645, lo: 0xa4, hi: 0xa4},
+ {value: 0x812b, lo: 0xa5, hi: 0xa6},
+ {value: 0x8101, lo: 0xa7, hi: 0xa9},
+ {value: 0x8130, lo: 0xad, hi: 0xad},
+ {value: 0x812b, lo: 0xae, hi: 0xb2},
+ {value: 0x812d, lo: 0xbb, hi: 0xbf},
+ // Block 0x8c, offset 0x2c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x812d, lo: 0x80, hi: 0x82},
+ {value: 0x8132, lo: 0x85, hi: 0x89},
+ {value: 0x812d, lo: 0x8a, hi: 0x8b},
+ {value: 0x8132, lo: 0xaa, hi: 0xad},
+ {value: 0x45e3, lo: 0xbb, hi: 0xbb},
+ {value: 0x45ed, lo: 0xbc, hi: 0xbc},
+ {value: 0x4653, lo: 0xbd, hi: 0xbd},
+ {value: 0x466f, lo: 0xbe, hi: 0xbe},
+ {value: 0x4661, lo: 0xbf, hi: 0xbf},
+ // Block 0x8d, offset 0x2d0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x467d, lo: 0x80, hi: 0x80},
+ // Block 0x8e, offset 0x2d2
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0x82, hi: 0x84},
+ // Block 0x8f, offset 0x2d4
+ {value: 0x0002, lo: 0x03},
+ {value: 0x0043, lo: 0x80, hi: 0x99},
+ {value: 0x0083, lo: 0x9a, hi: 0xb3},
+ {value: 0x0043, lo: 0xb4, hi: 0xbf},
+ // Block 0x90, offset 0x2d8
+ {value: 0x0002, lo: 0x04},
+ {value: 0x005b, lo: 0x80, hi: 0x8d},
+ {value: 0x0083, lo: 0x8e, hi: 0x94},
+ {value: 0x0093, lo: 0x96, hi: 0xa7},
+ {value: 0x0043, lo: 0xa8, hi: 0xbf},
+ // Block 0x91, offset 0x2dd
+ {value: 0x0002, lo: 0x0b},
+ {value: 0x0073, lo: 0x80, hi: 0x81},
+ {value: 0x0083, lo: 0x82, hi: 0x9b},
+ {value: 0x0043, lo: 0x9c, hi: 0x9c},
+ {value: 0x0047, lo: 0x9e, hi: 0x9f},
+ {value: 0x004f, lo: 0xa2, hi: 0xa2},
+ {value: 0x0055, lo: 0xa5, hi: 0xa6},
+ {value: 0x005d, lo: 0xa9, hi: 0xac},
+ {value: 0x0067, lo: 0xae, hi: 0xb5},
+ {value: 0x0083, lo: 0xb6, hi: 0xb9},
+ {value: 0x008d, lo: 0xbb, hi: 0xbb},
+ {value: 0x0091, lo: 0xbd, hi: 0xbf},
+ // Block 0x92, offset 0x2e9
+ {value: 0x0002, lo: 0x04},
+ {value: 0x0097, lo: 0x80, hi: 0x83},
+ {value: 0x00a1, lo: 0x85, hi: 0x8f},
+ {value: 0x0043, lo: 0x90, hi: 0xa9},
+ {value: 0x0083, lo: 0xaa, hi: 0xbf},
+ // Block 0x93, offset 0x2ee
+ {value: 0x0002, lo: 0x08},
+ {value: 0x00af, lo: 0x80, hi: 0x83},
+ {value: 0x0043, lo: 0x84, hi: 0x85},
+ {value: 0x0049, lo: 0x87, hi: 0x8a},
+ {value: 0x0055, lo: 0x8d, hi: 0x94},
+ {value: 0x0067, lo: 0x96, hi: 0x9c},
+ {value: 0x0083, lo: 0x9e, hi: 0xb7},
+ {value: 0x0043, lo: 0xb8, hi: 0xb9},
+ {value: 0x0049, lo: 0xbb, hi: 0xbe},
+ // Block 0x94, offset 0x2f7
+ {value: 0x0002, lo: 0x05},
+ {value: 0x0053, lo: 0x80, hi: 0x84},
+ {value: 0x005f, lo: 0x86, hi: 0x86},
+ {value: 0x0067, lo: 0x8a, hi: 0x90},
+ {value: 0x0083, lo: 0x92, hi: 0xab},
+ {value: 0x0043, lo: 0xac, hi: 0xbf},
+ // Block 0x95, offset 0x2fd
+ {value: 0x0002, lo: 0x04},
+ {value: 0x006b, lo: 0x80, hi: 0x85},
+ {value: 0x0083, lo: 0x86, hi: 0x9f},
+ {value: 0x0043, lo: 0xa0, hi: 0xb9},
+ {value: 0x0083, lo: 0xba, hi: 0xbf},
+ // Block 0x96, offset 0x302
+ {value: 0x0002, lo: 0x03},
+ {value: 0x008f, lo: 0x80, hi: 0x93},
+ {value: 0x0043, lo: 0x94, hi: 0xad},
+ {value: 0x0083, lo: 0xae, hi: 0xbf},
+ // Block 0x97, offset 0x306
+ {value: 0x0002, lo: 0x04},
+ {value: 0x00a7, lo: 0x80, hi: 0x87},
+ {value: 0x0043, lo: 0x88, hi: 0xa1},
+ {value: 0x0083, lo: 0xa2, hi: 0xbb},
+ {value: 0x0043, lo: 0xbc, hi: 0xbf},
+ // Block 0x98, offset 0x30b
+ {value: 0x0002, lo: 0x03},
+ {value: 0x004b, lo: 0x80, hi: 0x95},
+ {value: 0x0083, lo: 0x96, hi: 0xaf},
+ {value: 0x0043, lo: 0xb0, hi: 0xbf},
+ // Block 0x99, offset 0x30f
+ {value: 0x0003, lo: 0x0f},
+ {value: 0x01b8, lo: 0x80, hi: 0x80},
+ {value: 0x045f, lo: 0x81, hi: 0x81},
+ {value: 0x01bb, lo: 0x82, hi: 0x9a},
+ {value: 0x045b, lo: 0x9b, hi: 0x9b},
+ {value: 0x01c7, lo: 0x9c, hi: 0x9c},
+ {value: 0x01d0, lo: 0x9d, hi: 0x9d},
+ {value: 0x01d6, lo: 0x9e, hi: 0x9e},
+ {value: 0x01fa, lo: 0x9f, hi: 0x9f},
+ {value: 0x01eb, lo: 0xa0, hi: 0xa0},
+ {value: 0x01e8, lo: 0xa1, hi: 0xa1},
+ {value: 0x0173, lo: 0xa2, hi: 0xb2},
+ {value: 0x0188, lo: 0xb3, hi: 0xb3},
+ {value: 0x01a6, lo: 0xb4, hi: 0xba},
+ {value: 0x045f, lo: 0xbb, hi: 0xbb},
+ {value: 0x01bb, lo: 0xbc, hi: 0xbf},
+ // Block 0x9a, offset 0x31f
+ {value: 0x0003, lo: 0x0d},
+ {value: 0x01c7, lo: 0x80, hi: 0x94},
+ {value: 0x045b, lo: 0x95, hi: 0x95},
+ {value: 0x01c7, lo: 0x96, hi: 0x96},
+ {value: 0x01d0, lo: 0x97, hi: 0x97},
+ {value: 0x01d6, lo: 0x98, hi: 0x98},
+ {value: 0x01fa, lo: 0x99, hi: 0x99},
+ {value: 0x01eb, lo: 0x9a, hi: 0x9a},
+ {value: 0x01e8, lo: 0x9b, hi: 0x9b},
+ {value: 0x0173, lo: 0x9c, hi: 0xac},
+ {value: 0x0188, lo: 0xad, hi: 0xad},
+ {value: 0x01a6, lo: 0xae, hi: 0xb4},
+ {value: 0x045f, lo: 0xb5, hi: 0xb5},
+ {value: 0x01bb, lo: 0xb6, hi: 0xbf},
+ // Block 0x9b, offset 0x32d
+ {value: 0x0003, lo: 0x0d},
+ {value: 0x01d9, lo: 0x80, hi: 0x8e},
+ {value: 0x045b, lo: 0x8f, hi: 0x8f},
+ {value: 0x01c7, lo: 0x90, hi: 0x90},
+ {value: 0x01d0, lo: 0x91, hi: 0x91},
+ {value: 0x01d6, lo: 0x92, hi: 0x92},
+ {value: 0x01fa, lo: 0x93, hi: 0x93},
+ {value: 0x01eb, lo: 0x94, hi: 0x94},
+ {value: 0x01e8, lo: 0x95, hi: 0x95},
+ {value: 0x0173, lo: 0x96, hi: 0xa6},
+ {value: 0x0188, lo: 0xa7, hi: 0xa7},
+ {value: 0x01a6, lo: 0xa8, hi: 0xae},
+ {value: 0x045f, lo: 0xaf, hi: 0xaf},
+ {value: 0x01bb, lo: 0xb0, hi: 0xbf},
+ // Block 0x9c, offset 0x33b
+ {value: 0x0003, lo: 0x0d},
+ {value: 0x01eb, lo: 0x80, hi: 0x88},
+ {value: 0x045b, lo: 0x89, hi: 0x89},
+ {value: 0x01c7, lo: 0x8a, hi: 0x8a},
+ {value: 0x01d0, lo: 0x8b, hi: 0x8b},
+ {value: 0x01d6, lo: 0x8c, hi: 0x8c},
+ {value: 0x01fa, lo: 0x8d, hi: 0x8d},
+ {value: 0x01eb, lo: 0x8e, hi: 0x8e},
+ {value: 0x01e8, lo: 0x8f, hi: 0x8f},
+ {value: 0x0173, lo: 0x90, hi: 0xa0},
+ {value: 0x0188, lo: 0xa1, hi: 0xa1},
+ {value: 0x01a6, lo: 0xa2, hi: 0xa8},
+ {value: 0x045f, lo: 0xa9, hi: 0xa9},
+ {value: 0x01bb, lo: 0xaa, hi: 0xbf},
+ // Block 0x9d, offset 0x349
+ {value: 0x0000, lo: 0x05},
+ {value: 0x8132, lo: 0x80, hi: 0x86},
+ {value: 0x8132, lo: 0x88, hi: 0x98},
+ {value: 0x8132, lo: 0x9b, hi: 0xa1},
+ {value: 0x8132, lo: 0xa3, hi: 0xa4},
+ {value: 0x8132, lo: 0xa6, hi: 0xaa},
+ // Block 0x9e, offset 0x34f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x8132, lo: 0xac, hi: 0xaf},
+ // Block 0x9f, offset 0x351
+ {value: 0x0000, lo: 0x01},
+ {value: 0x812d, lo: 0x90, hi: 0x96},
+ // Block 0xa0, offset 0x353
+ {value: 0x0000, lo: 0x02},
+ {value: 0x8132, lo: 0x84, hi: 0x89},
+ {value: 0x8102, lo: 0x8a, hi: 0x8a},
+ // Block 0xa1, offset 0x356
+ {value: 0x0002, lo: 0x0a},
+ {value: 0x0063, lo: 0x80, hi: 0x89},
+ {value: 0x1951, lo: 0x8a, hi: 0x8a},
+ {value: 0x1984, lo: 0x8b, hi: 0x8b},
+ {value: 0x199f, lo: 0x8c, hi: 0x8c},
+ {value: 0x19a5, lo: 0x8d, hi: 0x8d},
+ {value: 0x1bc3, lo: 0x8e, hi: 0x8e},
+ {value: 0x19b1, lo: 0x8f, hi: 0x8f},
+ {value: 0x197b, lo: 0xaa, hi: 0xaa},
+ {value: 0x197e, lo: 0xab, hi: 0xab},
+ {value: 0x1981, lo: 0xac, hi: 0xac},
+ // Block 0xa2, offset 0x361
+ {value: 0x0000, lo: 0x01},
+ {value: 0x193f, lo: 0x90, hi: 0x90},
+ // Block 0xa3, offset 0x363
+ {value: 0x0028, lo: 0x09},
+ {value: 0x2865, lo: 0x80, hi: 0x80},
+ {value: 0x2829, lo: 0x81, hi: 0x81},
+ {value: 0x2833, lo: 0x82, hi: 0x82},
+ {value: 0x2847, lo: 0x83, hi: 0x84},
+ {value: 0x2851, lo: 0x85, hi: 0x86},
+ {value: 0x283d, lo: 0x87, hi: 0x87},
+ {value: 0x285b, lo: 0x88, hi: 0x88},
+ {value: 0x0b6f, lo: 0x90, hi: 0x90},
+ {value: 0x08e7, lo: 0x91, hi: 0x91},
+}
+
+// recompMap: 7520 bytes (entries only)
+var recompMap map[uint32]rune
+var recompMapOnce sync.Once
+
+const recompMapPacked = "" +
+ "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0
+ "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1
+ "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2
+ "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3
+ "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4
+ "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5
+ "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7
+ "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8
+ "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9
+ "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA
+ "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB
+ "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC
+ "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD
+ "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE
+ "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF
+ "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1
+ "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2
+ "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3
+ "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4
+ "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5
+ "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6
+ "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9
+ "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA
+ "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB
+ "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC
+ "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD
+ "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0
+ "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1
+ "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2
+ "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3
+ "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4
+ "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5
+ "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7
+ "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8
+ "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9
+ "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA
+ "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB
+ "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC
+ "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED
+ "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE
+ "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF
+ "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1
+ "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2
+ "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3
+ "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4
+ "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5
+ "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6
+ "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9
+ "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA
+ "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB
+ "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC
+ "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD
+ "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF
+ "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100
+ "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101
+ "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102
+ "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103
+ "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104
+ "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105
+ "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106
+ "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107
+ "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108
+ "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109
+ "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A
+ "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B
+ "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C
+ "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D
+ "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E
+ "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F
+ "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112
+ "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113
+ "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114
+ "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115
+ "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116
+ "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117
+ "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118
+ "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119
+ "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A
+ "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B
+ "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C
+ "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D
+ "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E
+ "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F
+ "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120
+ "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121
+ "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122
+ "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123
+ "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124
+ "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125
+ "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128
+ "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129
+ "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A
+ "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B
+ "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C
+ "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D
+ "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E
+ "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F
+ "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130
+ "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134
+ "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135
+ "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136
+ "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137
+ "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139
+ "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A
+ "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B
+ "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C
+ "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D
+ "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E
+ "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143
+ "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144
+ "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145
+ "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146
+ "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147
+ "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148
+ "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C
+ "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D
+ "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E
+ "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F
+ "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150
+ "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151
+ "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154
+ "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155
+ "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156
+ "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157
+ "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158
+ "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159
+ "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A
+ "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B
+ "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C
+ "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D
+ "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E
+ "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F
+ "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160
+ "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161
+ "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162
+ "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163
+ "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164
+ "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165
+ "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168
+ "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169
+ "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A
+ "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B
+ "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C
+ "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D
+ "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E
+ "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F
+ "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170
+ "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171
+ "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172
+ "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173
+ "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174
+ "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175
+ "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176
+ "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177
+ "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178
+ "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179
+ "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A
+ "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B
+ "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C
+ "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D
+ "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E
+ "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0
+ "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1
+ "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF
+ "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0
+ "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD
+ "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE
+ "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF
+ "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0
+ "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1
+ "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2
+ "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3
+ "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4
+ "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5
+ "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6
+ "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7
+ "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8
+ "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9
+ "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA
+ "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB
+ "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC
+ "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE
+ "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF
+ "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0
+ "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1
+ "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2
+ "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3
+ "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6
+ "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7
+ "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8
+ "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9
+ "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA
+ "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB
+ "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC
+ "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED
+ "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE
+ "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF
+ "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0
+ "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4
+ "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5
+ "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8
+ "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9
+ "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA
+ "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB
+ "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC
+ "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD
+ "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE
+ "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF
+ "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200
+ "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201
+ "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202
+ "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203
+ "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204
+ "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205
+ "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206
+ "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207
+ "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208
+ "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209
+ "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A
+ "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B
+ "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C
+ "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D
+ "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E
+ "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F
+ "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210
+ "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211
+ "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212
+ "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213
+ "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214
+ "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215
+ "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216
+ "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217
+ "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218
+ "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219
+ "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A
+ "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B
+ "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E
+ "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F
+ "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226
+ "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227
+ "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228
+ "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229
+ "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A
+ "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B
+ "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C
+ "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D
+ "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E
+ "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F
+ "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230
+ "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231
+ "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232
+ "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233
+ "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385
+ "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386
+ "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388
+ "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389
+ "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A
+ "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C
+ "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E
+ "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F
+ "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390
+ "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA
+ "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB
+ "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC
+ "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD
+ "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE
+ "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF
+ "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0
+ "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA
+ "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB
+ "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC
+ "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD
+ "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE
+ "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3
+ "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4
+ "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400
+ "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401
+ "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403
+ "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407
+ "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C
+ "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D
+ "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E
+ "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419
+ "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439
+ "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450
+ "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451
+ "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453
+ "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457
+ "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C
+ "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D
+ "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E
+ "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476
+ "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477
+ "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1
+ "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2
+ "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0
+ "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1
+ "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2
+ "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3
+ "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6
+ "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7
+ "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA
+ "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB
+ "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC
+ "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD
+ "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE
+ "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF
+ "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2
+ "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3
+ "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4
+ "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5
+ "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6
+ "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7
+ "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA
+ "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB
+ "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC
+ "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED
+ "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE
+ "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF
+ "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0
+ "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1
+ "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2
+ "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3
+ "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4
+ "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5
+ "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8
+ "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9
+ "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622
+ "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623
+ "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624
+ "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625
+ "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626
+ "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0
+ "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2
+ "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3
+ "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929
+ "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931
+ "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934
+ "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB
+ "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC
+ "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48
+ "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B
+ "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C
+ "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94
+ "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA
+ "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB
+ "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC
+ "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48
+ "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0
+ "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7
+ "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8
+ "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA
+ "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB
+ "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A
+ "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B
+ "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C
+ "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA
+ "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC
+ "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD
+ "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE
+ "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026
+ "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06
+ "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08
+ "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A
+ "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C
+ "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E
+ "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12
+ "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B
+ "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D
+ "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40
+ "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41
+ "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43
+ "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00
+ "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01
+ "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02
+ "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03
+ "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04
+ "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05
+ "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06
+ "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07
+ "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08
+ "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09
+ "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A
+ "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B
+ "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C
+ "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D
+ "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E
+ "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F
+ "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10
+ "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11
+ "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12
+ "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13
+ "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14
+ "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15
+ "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16
+ "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17
+ "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18
+ "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19
+ "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A
+ "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B
+ "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C
+ "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D
+ "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E
+ "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F
+ "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20
+ "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21
+ "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22
+ "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23
+ "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24
+ "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25
+ "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26
+ "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27
+ "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28
+ "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29
+ "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A
+ "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B
+ "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C
+ "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D
+ "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E
+ "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F
+ "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30
+ "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31
+ "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32
+ "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33
+ "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34
+ "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35
+ "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36
+ "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37
+ "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38
+ "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39
+ "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A
+ "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B
+ "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C
+ "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D
+ "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E
+ "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F
+ "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40
+ "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41
+ "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42
+ "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43
+ "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44
+ "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45
+ "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46
+ "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47
+ "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48
+ "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49
+ "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A
+ "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B
+ "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C
+ "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D
+ "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E
+ "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F
+ "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50
+ "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51
+ "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52
+ "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53
+ "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54
+ "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55
+ "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56
+ "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57
+ "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58
+ "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59
+ "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A
+ "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B
+ "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C
+ "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D
+ "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E
+ "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F
+ "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60
+ "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61
+ "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62
+ "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63
+ "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64
+ "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65
+ "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66
+ "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67
+ "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68
+ "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69
+ "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A
+ "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B
+ "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C
+ "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D
+ "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E
+ "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F
+ "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70
+ "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71
+ "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72
+ "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73
+ "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74
+ "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75
+ "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76
+ "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77
+ "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78
+ "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79
+ "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A
+ "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B
+ "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C
+ "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D
+ "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E
+ "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F
+ "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80
+ "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81
+ "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82
+ "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83
+ "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84
+ "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85
+ "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86
+ "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87
+ "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88
+ "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89
+ "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A
+ "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B
+ "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C
+ "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D
+ "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E
+ "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F
+ "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90
+ "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91
+ "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92
+ "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93
+ "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94
+ "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95
+ "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96
+ "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97
+ "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98
+ "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99
+ "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B
+ "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0
+ "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1
+ "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2
+ "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3
+ "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4
+ "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5
+ "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6
+ "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7
+ "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8
+ "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9
+ "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA
+ "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB
+ "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC
+ "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD
+ "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE
+ "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF
+ "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0
+ "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1
+ "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2
+ "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3
+ "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4
+ "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5
+ "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6
+ "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7
+ "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8
+ "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9
+ "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA
+ "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB
+ "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC
+ "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD
+ "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE
+ "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF
+ "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0
+ "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1
+ "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2
+ "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3
+ "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4
+ "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5
+ "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6
+ "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7
+ "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8
+ "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9
+ "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA
+ "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB
+ "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC
+ "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD
+ "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE
+ "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF
+ "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0
+ "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1
+ "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2
+ "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3
+ "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4
+ "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5
+ "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6
+ "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7
+ "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8
+ "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9
+ "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA
+ "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB
+ "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC
+ "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD
+ "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE
+ "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF
+ "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0
+ "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1
+ "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2
+ "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3
+ "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4
+ "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5
+ "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6
+ "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7
+ "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8
+ "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9
+ "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA
+ "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB
+ "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC
+ "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED
+ "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE
+ "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF
+ "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0
+ "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1
+ "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2
+ "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3
+ "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4
+ "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5
+ "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6
+ "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7
+ "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8
+ "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9
+ "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00
+ "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01
+ "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02
+ "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03
+ "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04
+ "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05
+ "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06
+ "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07
+ "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08
+ "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09
+ "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A
+ "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B
+ "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C
+ "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D
+ "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E
+ "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F
+ "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10
+ "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11
+ "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12
+ "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13
+ "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14
+ "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15
+ "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18
+ "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19
+ "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A
+ "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B
+ "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C
+ "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D
+ "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20
+ "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21
+ "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22
+ "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23
+ "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24
+ "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25
+ "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26
+ "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27
+ "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28
+ "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29
+ "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A
+ "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B
+ "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C
+ "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D
+ "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E
+ "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F
+ "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30
+ "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31
+ "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32
+ "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33
+ "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34
+ "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35
+ "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36
+ "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37
+ "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38
+ "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39
+ "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A
+ "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B
+ "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C
+ "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D
+ "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E
+ "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F
+ "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40
+ "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41
+ "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42
+ "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43
+ "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44
+ "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45
+ "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48
+ "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49
+ "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A
+ "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B
+ "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C
+ "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D
+ "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50
+ "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51
+ "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52
+ "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53
+ "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54
+ "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55
+ "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56
+ "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57
+ "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59
+ "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B
+ "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D
+ "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F
+ "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60
+ "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61
+ "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62
+ "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63
+ "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64
+ "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65
+ "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66
+ "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67
+ "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68
+ "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69
+ "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A
+ "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B
+ "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C
+ "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D
+ "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E
+ "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F
+ "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70
+ "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72
+ "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74
+ "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76
+ "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78
+ "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A
+ "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C
+ "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80
+ "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81
+ "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82
+ "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83
+ "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84
+ "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85
+ "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86
+ "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87
+ "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88
+ "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89
+ "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A
+ "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B
+ "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C
+ "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D
+ "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E
+ "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F
+ "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90
+ "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91
+ "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92
+ "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93
+ "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94
+ "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95
+ "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96
+ "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97
+ "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98
+ "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99
+ "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A
+ "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B
+ "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C
+ "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D
+ "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E
+ "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F
+ "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0
+ "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1
+ "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2
+ "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3
+ "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4
+ "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5
+ "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6
+ "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7
+ "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8
+ "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9
+ "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA
+ "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB
+ "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC
+ "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD
+ "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE
+ "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF
+ "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0
+ "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1
+ "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2
+ "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3
+ "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4
+ "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6
+ "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7
+ "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8
+ "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9
+ "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA
+ "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC
+ "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1
+ "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2
+ "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3
+ "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4
+ "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6
+ "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7
+ "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8
+ "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA
+ "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC
+ "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD
+ "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE
+ "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF
+ "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0
+ "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1
+ "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2
+ "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6
+ "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7
+ "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8
+ "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9
+ "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA
+ "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD
+ "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE
+ "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF
+ "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0
+ "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1
+ "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2
+ "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4
+ "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5
+ "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6
+ "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7
+ "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8
+ "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9
+ "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA
+ "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC
+ "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED
+ "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2
+ "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3
+ "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4
+ "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6
+ "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7
+ "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8
+ "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA
+ "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC
+ "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A
+ "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B
+ "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE
+ "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD
+ "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE
+ "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF
+ "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204
+ "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209
+ "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C
+ "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224
+ "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226
+ "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241
+ "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244
+ "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247
+ "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249
+ "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260
+ "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262
+ "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D
+ "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E
+ "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F
+ "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270
+ "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271
+ "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274
+ "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275
+ "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278
+ "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279
+ "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280
+ "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281
+ "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284
+ "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285
+ "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288
+ "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289
+ "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC
+ "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD
+ "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE
+ "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF
+ "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0
+ "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1
+ "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2
+ "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3
+ "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA
+ "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB
+ "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC
+ "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED
+ "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C
+ "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E
+ "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050
+ "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052
+ "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054
+ "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056
+ "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058
+ "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A
+ "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C
+ "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E
+ "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060
+ "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062
+ "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065
+ "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067
+ "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069
+ "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070
+ "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071
+ "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073
+ "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074
+ "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076
+ "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077
+ "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079
+ "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A
+ "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C
+ "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D
+ "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094
+ "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E
+ "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC
+ "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE
+ "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0
+ "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2
+ "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4
+ "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6
+ "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8
+ "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA
+ "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC
+ "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE
+ "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0
+ "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2
+ "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5
+ "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7
+ "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9
+ "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0
+ "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1
+ "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3
+ "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4
+ "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6
+ "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7
+ "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9
+ "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA
+ "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC
+ "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD
+ "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4
+ "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7
+ "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8
+ "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9
+ "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA
+ "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE
+ "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A
+ "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C
+ "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB
+ "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E
+ "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F
+ "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B
+ "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C
+ "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB
+ "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC
+ "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE
+ "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA
+ "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB
+ ""
+ // Total size of tables: 55KB (55977 bytes)
diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS
new file mode 100644
index 00000000..2b00ddba
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000..1fbd3e97
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE
new file mode 100644
index 00000000..49ea0f92
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2018 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS
new file mode 100644
index 00000000..73309904
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
new file mode 100644
index 00000000..77dbe1b5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -0,0 +1,789 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package prototext
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/encoding/text"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/set"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Unmarshal reads the given []byte into the given proto.Message.
+func Unmarshal(b []byte, m proto.Message) error {
+ return UnmarshalOptions{}.Unmarshal(b, m)
+}
+
+// UnmarshalOptions is a configurable textproto format unmarshaler.
+type UnmarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowPartial accepts input for messages that will result in missing
+ // required fields. If AllowPartial is false (the default), Unmarshal will
+ // return error if there are any missing required fields.
+ AllowPartial bool
+
+ // DiscardUnknown specifies whether to ignore unknown fields when parsing.
+ // An unknown field is any field whose field name or field number does not
+ // resolve to any known or extension field in the message.
+ // By default, unmarshal rejects unknown fields as an error.
+ DiscardUnknown bool
+
+ // Resolver is used for looking up types when unmarshaling
+ // google.protobuf.Any messages or extension fields.
+ // If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.MessageTypeResolver
+ protoregistry.ExtensionTypeResolver
+ }
+}
+
+// Unmarshal reads the given []byte and populates the given proto.Message using options in
+// UnmarshalOptions object.
+func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+ proto.Reset(m)
+
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ dec := decoder{text.NewDecoder(b), o}
+ if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
+ return err
+ }
+ if o.AllowPartial {
+ return nil
+ }
+ return proto.CheckInitialized(m)
+}
+
+type decoder struct {
+ *text.Decoder
+ opts UnmarshalOptions
+}
+
+// newError returns an error object with position info.
+func (d decoder) newError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("(line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unexpectedTokenError returns a syntax error for the given unexpected token.
+func (d decoder) unexpectedTokenError(tok text.Token) error {
+ return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString())
+}
+
+// syntaxError returns a syntax error for given position.
+func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+ line, column := d.Position(pos)
+ head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
+ return errors.New(head+f, x...)
+}
+
+// unmarshalMessage unmarshals into the given protoreflect.Message.
+func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
+ messageDesc := m.Descriptor()
+ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ if messageDesc.FullName() == "google.protobuf.Any" {
+ return d.unmarshalAny(m, checkDelims)
+ }
+
+ if checkDelims {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() != text.MessageOpen {
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ var seenNums set.Ints
+ var seenOneofs set.Ints
+ fieldDescs := messageDesc.Fields()
+
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch typ := tok.Kind(); typ {
+ case text.Name:
+ // Continue below.
+ case text.EOF:
+ if checkDelims {
+ return text.ErrUnexpectedEOF
+ }
+ return nil
+ default:
+ if checkDelims && typ == text.MessageClose {
+ return nil
+ }
+ return d.unexpectedTokenError(tok)
+ }
+
+ // Resolve the field descriptor.
+ var name pref.Name
+ var fd pref.FieldDescriptor
+ var xt pref.ExtensionType
+ var xtErr error
+ var isFieldNumberName bool
+
+ switch tok.NameKind() {
+ case text.IdentName:
+ name = pref.Name(tok.IdentName())
+ fd = fieldDescs.ByName(name)
+ if fd == nil {
+ // The proto name of a group field is in all lowercase,
+ // while the textproto field name is the group message name.
+ gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name {
+ fd = nil // reset since field name is actually the message name
+ }
+
+ case text.TypeName:
+ // Handle extensions only. This code path is not for Any.
+ xt, xtErr = d.findExtension(pref.FullName(tok.TypeName()))
+
+ case text.FieldNumber:
+ isFieldNumberName = true
+ num := pref.FieldNumber(tok.FieldNumber())
+ if !num.IsValid() {
+ return d.newError(tok.Pos(), "invalid field number: %d", num)
+ }
+ fd = fieldDescs.ByNumber(num)
+ if fd == nil {
+ xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num)
+ }
+ }
+
+ if xt != nil {
+ fd = xt.TypeDescriptor()
+ if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
+ return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
+ }
+ } else if xtErr != nil && xtErr != protoregistry.NotFound {
+ return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
+ }
+ if flags.ProtoLegacy {
+ if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
+ fd = nil // reset since the weak reference is not linked in
+ }
+ }
+
+ // Handle unknown fields.
+ if fd == nil {
+ if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) {
+ d.skipValue()
+ continue
+ }
+ return d.newError(tok.Pos(), "unknown field: %v", tok.RawString())
+ }
+
+ // Handle fields identified by field number.
+ if isFieldNumberName {
+ // TODO: Add an option to permit parsing field numbers.
+ //
+ // This requires careful thought as the MarshalOptions.EmitUnknown
+ // option allows formatting unknown fields as the field number and the
+ // best-effort textual representation of the field value. In that case,
+ // it may not be possible to unmarshal the value from a parser that does
+ // have information about the unknown field.
+ return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString())
+ }
+
+ switch {
+ case fd.IsList():
+ kind := fd.Kind()
+ if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ list := m.Mutable(fd).List()
+ if err := d.unmarshalList(fd, list); err != nil {
+ return err
+ }
+
+ case fd.IsMap():
+ mmap := m.Mutable(fd).Map()
+ if err := d.unmarshalMap(fd, mmap); err != nil {
+ return err
+ }
+
+ default:
+ kind := fd.Kind()
+ if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ // If field is a oneof, check if it has already been set.
+ if od := fd.ContainingOneof(); od != nil {
+ idx := uint64(od.Index())
+ if seenOneofs.Has(idx) {
+ return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName())
+ }
+ seenOneofs.Set(idx)
+ }
+
+ num := uint64(fd.Number())
+ if seenNums.Has(num) {
+ return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString())
+ }
+
+ if err := d.unmarshalSingular(fd, m); err != nil {
+ return err
+ }
+ seenNums.Set(num)
+ }
+ }
+
+ return nil
+}
+
+// findExtension returns protoreflect.ExtensionType from the Resolver if found.
+func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) {
+ xt, err := d.opts.Resolver.FindExtensionByName(xtName)
+ if err == nil {
+ return xt, nil
+ }
+ return messageset.FindMessageSetExtension(d.opts.Resolver, xtName)
+}
+
+// unmarshalSingular unmarshals a non-repeated field value specified by the
+// given FieldDescriptor.
+func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
+ var val pref.Value
+ var err error
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ val = m.NewField(fd)
+ err = d.unmarshalMessage(val.Message(), true)
+ default:
+ val, err = d.unmarshalScalar(fd)
+ }
+ if err == nil {
+ m.Set(fd, val)
+ }
+ return err
+}
+
+// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
+// given FieldDescriptor.
+func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
+ tok, err := d.Read()
+ if err != nil {
+ return pref.Value{}, err
+ }
+
+ if tok.Kind() != text.Scalar {
+ return pref.Value{}, d.unexpectedTokenError(tok)
+ }
+
+ kind := fd.Kind()
+ switch kind {
+ case pref.BoolKind:
+ if b, ok := tok.Bool(); ok {
+ return pref.ValueOfBool(b), nil
+ }
+
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if n, ok := tok.Int32(); ok {
+ return pref.ValueOfInt32(n), nil
+ }
+
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if n, ok := tok.Int64(); ok {
+ return pref.ValueOfInt64(n), nil
+ }
+
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if n, ok := tok.Uint32(); ok {
+ return pref.ValueOfUint32(n), nil
+ }
+
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if n, ok := tok.Uint64(); ok {
+ return pref.ValueOfUint64(n), nil
+ }
+
+ case pref.FloatKind:
+ if n, ok := tok.Float32(); ok {
+ return pref.ValueOfFloat32(n), nil
+ }
+
+ case pref.DoubleKind:
+ if n, ok := tok.Float64(); ok {
+ return pref.ValueOfFloat64(n), nil
+ }
+
+ case pref.StringKind:
+ if s, ok := tok.String(); ok {
+ if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
+ return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
+ }
+ return pref.ValueOfString(s), nil
+ }
+
+ case pref.BytesKind:
+ if b, ok := tok.String(); ok {
+ return pref.ValueOfBytes([]byte(b)), nil
+ }
+
+ case pref.EnumKind:
+ if lit, ok := tok.Enum(); ok {
+ // Lookup EnumNumber based on name.
+ if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil {
+ return pref.ValueOfEnum(enumVal.Number()), nil
+ }
+ }
+ if num, ok := tok.Int32(); ok {
+ return pref.ValueOfEnum(pref.EnumNumber(num)), nil
+ }
+
+ default:
+ panic(fmt.Sprintf("invalid scalar kind %v", kind))
+ }
+
+ return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+}
+
+// unmarshalList unmarshals into given protoreflect.List. A list value can
+// either be in [] syntax or simply just a single scalar/message value.
+func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch fd.Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ switch tok.Kind() {
+ case text.ListOpen:
+ d.Read()
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch tok.Kind() {
+ case text.ListClose:
+ d.Read()
+ return nil
+ case text.MessageOpen:
+ pval := list.NewElement()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return err
+ }
+ list.Append(pval)
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ case text.MessageOpen:
+ pval := list.NewElement()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return err
+ }
+ list.Append(pval)
+ return nil
+ }
+
+ default:
+ switch tok.Kind() {
+ case text.ListOpen:
+ d.Read()
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+
+ switch tok.Kind() {
+ case text.ListClose:
+ d.Read()
+ return nil
+ case text.Scalar:
+ pval, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ list.Append(pval)
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ case text.Scalar:
+ pval, err := d.unmarshalScalar(fd)
+ if err != nil {
+ return err
+ }
+ list.Append(pval)
+ return nil
+ }
+ }
+
+ return d.unexpectedTokenError(tok)
+}
+
+// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
+// textproto message containing {key: , value: }.
+func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
+ // Determine ahead whether map entry is a scalar type or a message type in
+ // order to call the appropriate unmarshalMapValue func inside
+ // unmarshalMapEntry.
+ var unmarshalMapValue func() (pref.Value, error)
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ unmarshalMapValue = func() (pref.Value, error) {
+ pval := mmap.NewValue()
+ if err := d.unmarshalMessage(pval.Message(), true); err != nil {
+ return pref.Value{}, err
+ }
+ return pval, nil
+ }
+ default:
+ unmarshalMapValue = func() (pref.Value, error) {
+ return d.unmarshalScalar(fd.MapValue())
+ }
+ }
+
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.MessageOpen:
+ return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue)
+
+ case text.ListOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.ListClose:
+ return nil
+ case text.MessageOpen:
+ if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil {
+ return err
+ }
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+}
+
+// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
+// textproto message containing {key: , value: }.
+func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error {
+ var key pref.MapKey
+ var pval pref.Value
+Loop:
+ for {
+ // Read field name.
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.Name:
+ if tok.NameKind() != text.IdentName {
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString())
+ }
+ d.skipValue()
+ continue Loop
+ }
+ // Continue below.
+ case text.MessageClose:
+ break Loop
+ default:
+ return d.unexpectedTokenError(tok)
+ }
+
+ name := tok.IdentName()
+ switch name {
+ case "key":
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+ if key.IsValid() {
+ return d.newError(tok.Pos(), `map entry "key" cannot be repeated`)
+ }
+ val, err := d.unmarshalScalar(fd.MapKey())
+ if err != nil {
+ return err
+ }
+ key = val.MapKey()
+
+ case "value":
+ if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+ }
+ if pval.IsValid() {
+ return d.newError(tok.Pos(), `map entry "value" cannot be repeated`)
+ }
+ pval, err = unmarshalMapValue()
+ if err != nil {
+ return err
+ }
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "unknown map entry field %q", name)
+ }
+ d.skipValue()
+ }
+ }
+
+ if !key.IsValid() {
+ key = fd.MapKey().Default().MapKey()
+ }
+ if !pval.IsValid() {
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind, pref.GroupKind:
+ // If value field is not set for message/group types, construct an
+ // empty one as default.
+ pval = mmap.NewValue()
+ default:
+ pval = fd.MapValue().Default()
+ }
+ }
+ mmap.Set(key, pval)
+ return nil
+}
+
+// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
+// or non-expanded form.
+func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
+ var typeURL string
+ var bValue []byte
+
+ // hasFields tracks which valid fields have been seen in the loop below in
+ // order to flag an error if there are duplicates or conflicts. It may
+ // contain the strings "type_url", "value" and "expanded". The literal
+ // "expanded" is used to indicate that the expanded form has been
+ // encountered already.
+ hasFields := map[string]bool{}
+
+ if checkDelims {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+
+ if tok.Kind() != text.MessageOpen {
+ return d.unexpectedTokenError(tok)
+ }
+ }
+
+Loop:
+ for {
+ // Read field name. Can only have 3 possible field names, i.e. type_url,
+ // value and type URL name inside [].
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ if typ := tok.Kind(); typ != text.Name {
+ if checkDelims {
+ if typ == text.MessageClose {
+ break Loop
+ }
+ } else if typ == text.EOF {
+ break Loop
+ }
+ return d.unexpectedTokenError(tok)
+ }
+
+ switch tok.NameKind() {
+ case text.IdentName:
+ // Both type_url and value fields require field separator :.
+ if !tok.HasSeparator() {
+ return d.syntaxError(tok.Pos(), "missing field separator :")
+ }
+
+ switch tok.IdentName() {
+ case "type_url":
+ if hasFields["type_url"] {
+ return d.newError(tok.Pos(), "duplicate Any type_url field")
+ }
+ if hasFields["expanded"] {
+ return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
+ }
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ var ok bool
+ typeURL, ok = tok.String()
+ if !ok {
+ return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString())
+ }
+ hasFields["type_url"] = true
+
+ case "value":
+ if hasFields["value"] {
+ return d.newError(tok.Pos(), "duplicate Any value field")
+ }
+ if hasFields["expanded"] {
+ return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
+ }
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ s, ok := tok.String()
+ if !ok {
+ return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString())
+ }
+ bValue = []byte(s)
+ hasFields["value"] = true
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
+ }
+ }
+
+ case text.TypeName:
+ if hasFields["expanded"] {
+ return d.newError(tok.Pos(), "cannot have more than one type")
+ }
+ if hasFields["type_url"] {
+ return d.newError(tok.Pos(), "conflict with type_url field")
+ }
+ typeURL = tok.TypeName()
+ var err error
+ bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos())
+ if err != nil {
+ return err
+ }
+ hasFields["expanded"] = true
+
+ default:
+ if !d.opts.DiscardUnknown {
+ return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString())
+ }
+ }
+ }
+
+ fds := m.Descriptor().Fields()
+ if len(typeURL) > 0 {
+ m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL))
+ }
+ if len(bValue) > 0 {
+ m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue))
+ }
+ return nil
+}
+
+func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) {
+ mt, err := d.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err)
+ }
+ // Create new message for the embedded message type and unmarshal the value
+ // field into it.
+ m := mt.New()
+ if err := d.unmarshalMessage(m, true); err != nil {
+ return nil, err
+ }
+ // Serialize the embedded message and return the resulting bytes.
+ b, err := proto.MarshalOptions{
+ AllowPartial: true, // Never check required fields inside an Any.
+ Deterministic: true,
+ }.Marshal(m.Interface())
+ if err != nil {
+ return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err)
+ }
+ return b, nil
+}
+
+// skipValue makes the decoder parse a field value in order to advance the read
+// to the next field. It relies on Read returning an error if the types are not
+// in valid sequence.
+func (d decoder) skipValue() error {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ // Only need to continue reading for messages and lists.
+ switch tok.Kind() {
+ case text.MessageOpen:
+ return d.skipMessageValue()
+
+ case text.ListOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.ListClose:
+ return nil
+ case text.MessageOpen:
+ return d.skipMessageValue()
+ default:
+ // Skip items. This will not validate whether skipped values are
+ // of the same type or not, same behavior as C++
+ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0.
+ if err := d.skipValue(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// skipMessageValue makes the decoder parse and skip over all fields in a
+// message. It assumes that the previous read type is MessageOpen.
+func (d decoder) skipMessageValue() error {
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case text.MessageClose:
+ return nil
+ case text.Name:
+ if err := d.skipValue(); err != nil {
+ return err
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go
new file mode 100644
index 00000000..162b4f98
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package prototext marshals and unmarshals protocol buffer messages as the
+// textproto format.
+package prototext
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
new file mode 100644
index 00000000..dece2297
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -0,0 +1,426 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package prototext
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/encoding/text"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/mapsort"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const defaultIndent = " "
+
+// Format formats the message as a multiline string.
+// This function is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func Format(m proto.Message) string {
+ return MarshalOptions{Multiline: true}.Format(m)
+}
+
+// Marshal writes the given proto.Message in textproto format using default
+// options. Do not depend on the output being stable. It may change over time
+// across different versions of the program.
+func Marshal(m proto.Message) ([]byte, error) {
+ return MarshalOptions{}.Marshal(m)
+}
+
+// MarshalOptions is a configurable text format marshaler.
+type MarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // Multiline specifies whether the marshaler should format the output in
+ // indented-form with every textual element on a new line.
+ // If Indent is an empty string, then an arbitrary indent is chosen.
+ Multiline bool
+
+ // Indent specifies the set of indentation characters to use in a multiline
+ // formatted output such that every entry is preceded by Indent and
+ // terminated by a newline. If non-empty, then Multiline is treated as true.
+ // Indent can only be composed of space or tab characters.
+ Indent string
+
+ // EmitASCII specifies whether to format strings and bytes as ASCII only
+ // as opposed to using UTF-8 encoding when possible.
+ EmitASCII bool
+
+ // allowInvalidUTF8 specifies whether to permit the encoding of strings
+ // with invalid UTF-8. This is unexported as it is intended to only
+ // be specified by the Format method.
+ allowInvalidUTF8 bool
+
+ // AllowPartial allows messages that have missing required fields to marshal
+ // without returning an error. If AllowPartial is false (the default),
+ // Marshal will return error if there are any missing required fields.
+ AllowPartial bool
+
+ // EmitUnknown specifies whether to emit unknown fields in the output.
+ // If specified, the unmarshaler may be unable to parse the output.
+ // The default is to exclude unknown fields.
+ EmitUnknown bool
+
+ // Resolver is used for looking up types when expanding google.protobuf.Any
+ // messages. If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ protoregistry.ExtensionTypeResolver
+ protoregistry.MessageTypeResolver
+ }
+}
+
+// Format formats the message as a string.
+// This method is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. It may change over time across
+// different versions of the program.
+func (o MarshalOptions) Format(m proto.Message) string {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return "" // invalid syntax, but okay since this is for debugging
+ }
+ o.allowInvalidUTF8 = true
+ o.AllowPartial = true
+ o.EmitUnknown = true
+ b, _ := o.Marshal(m)
+ return string(b)
+}
+
+// Marshal writes the given proto.Message in textproto format using options in
+// MarshalOptions object. Do not depend on the output being stable. It may
+// change over time across different versions of the program.
+func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
+ var delims = [2]byte{'{', '}'}
+
+ if o.Multiline && o.Indent == "" {
+ o.Indent = defaultIndent
+ }
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII)
+ if err != nil {
+ return nil, err
+ }
+
+ // Treat nil message interface as an empty message,
+ // in which case there is nothing to output.
+ if m == nil {
+ return []byte{}, nil
+ }
+
+ enc := encoder{internalEnc, o}
+ err = enc.marshalMessage(m.ProtoReflect(), false)
+ if err != nil {
+ return nil, err
+ }
+ out := enc.Bytes()
+ if len(o.Indent) > 0 && len(out) > 0 {
+ out = append(out, '\n')
+ }
+ if o.AllowPartial {
+ return out, nil
+ }
+ return out, proto.CheckInitialized(m)
+}
+
+type encoder struct {
+ *text.Encoder
+ opts MarshalOptions
+}
+
+// marshalMessage marshals the given protoreflect.Message.
+func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
+ messageDesc := m.Descriptor()
+ if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+ return errors.New("no support for proto1 MessageSets")
+ }
+
+ if inclDelims {
+ e.StartMessage()
+ defer e.EndMessage()
+ }
+
+ // Handle Any expansion.
+ if messageDesc.FullName() == "google.protobuf.Any" {
+ if e.marshalAny(m) {
+ return nil
+ }
+ // If unable to expand, continue on to marshal Any as a regular message.
+ }
+
+ // Marshal known fields.
+ fieldDescs := messageDesc.Fields()
+ size := fieldDescs.Len()
+ for i := 0; i < size; {
+ fd := fieldDescs.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ name := fd.Name()
+ // Use type name for group field name.
+ if fd.Kind() == pref.GroupKind {
+ name = fd.Message().Name()
+ }
+ val := m.Get(fd)
+ if err := e.marshalField(string(name), val, fd); err != nil {
+ return err
+ }
+ }
+
+ // Marshal extensions.
+ if err := e.marshalExtensions(m); err != nil {
+ return err
+ }
+
+ // Marshal unknown fields.
+ if e.opts.EmitUnknown {
+ e.marshalUnknown(m.GetUnknown())
+ }
+
+ return nil
+}
+
+// marshalField marshals the given field with protoreflect.Value.
+func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error {
+ switch {
+ case fd.IsList():
+ return e.marshalList(name, val.List(), fd)
+ case fd.IsMap():
+ return e.marshalMap(name, val.Map(), fd)
+ default:
+ e.WriteName(name)
+ return e.marshalSingular(val, fd)
+ }
+}
+
+// marshalSingular marshals the given non-repeated field value. This includes
+// all scalar types, enums, messages, and groups.
+func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
+ kind := fd.Kind()
+ switch kind {
+ case pref.BoolKind:
+ e.WriteBool(val.Bool())
+
+ case pref.StringKind:
+ s := val.String()
+ if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
+ return errors.InvalidUTF8(string(fd.FullName()))
+ }
+ e.WriteString(s)
+
+ case pref.Int32Kind, pref.Int64Kind,
+ pref.Sint32Kind, pref.Sint64Kind,
+ pref.Sfixed32Kind, pref.Sfixed64Kind:
+ e.WriteInt(val.Int())
+
+ case pref.Uint32Kind, pref.Uint64Kind,
+ pref.Fixed32Kind, pref.Fixed64Kind:
+ e.WriteUint(val.Uint())
+
+ case pref.FloatKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 32)
+
+ case pref.DoubleKind:
+ // Encoder.WriteFloat handles the special numbers NaN and infinites.
+ e.WriteFloat(val.Float(), 64)
+
+ case pref.BytesKind:
+ e.WriteString(string(val.Bytes()))
+
+ case pref.EnumKind:
+ num := val.Enum()
+ if desc := fd.Enum().Values().ByNumber(num); desc != nil {
+ e.WriteLiteral(string(desc.Name()))
+ } else {
+ // Use numeric value if there is no enum description.
+ e.WriteInt(int64(num))
+ }
+
+ case pref.MessageKind, pref.GroupKind:
+ return e.marshalMessage(val.Message(), true)
+
+ default:
+ panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
+ }
+ return nil
+}
+
+// marshalList marshals the given protoreflect.List as multiple name-value fields.
+func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error {
+ size := list.Len()
+ for i := 0; i < size; i++ {
+ e.WriteName(name)
+ if err := e.marshalSingular(list.Get(i), fd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
+func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
+ var err error
+ mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool {
+ e.WriteName(name)
+ e.StartMessage()
+ defer e.EndMessage()
+
+ e.WriteName("key")
+ err = e.marshalSingular(key.Value(), fd.MapKey())
+ if err != nil {
+ return false
+ }
+
+ e.WriteName("value")
+ err = e.marshalSingular(val, fd.MapValue())
+ if err != nil {
+ return false
+ }
+ return true
+ })
+ return err
+}
+
+// marshalExtensions marshals extension fields.
+func (e encoder) marshalExtensions(m pref.Message) error {
+ type entry struct {
+ key string
+ value pref.Value
+ desc pref.FieldDescriptor
+ }
+
+ // Get a sorted list based on field key first.
+ var entries []entry
+ m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool {
+ if !fd.IsExtension() {
+ return true
+ }
+ // For MessageSet extensions, the name used is the parent message.
+ name := fd.FullName()
+ if messageset.IsMessageSetExtension(fd) {
+ name = name.Parent()
+ }
+ entries = append(entries, entry{
+ key: string(name),
+ value: v,
+ desc: fd,
+ })
+ return true
+ })
+ // Sort extensions lexicographically.
+ sort.Slice(entries, func(i, j int) bool {
+ return entries[i].key < entries[j].key
+ })
+
+ // Write out sorted list.
+ for _, entry := range entries {
+ // Extension field name is the proto field name enclosed in [].
+ name := "[" + entry.key + "]"
+ if err := e.marshalField(name, entry.value, entry.desc); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// marshalUnknown parses the given []byte and marshals fields out.
+// This function assumes proper encoding in the given []byte.
+func (e encoder) marshalUnknown(b []byte) {
+ const dec = 10
+ const hex = 16
+ for len(b) > 0 {
+ num, wtype, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ e.WriteName(strconv.FormatInt(int64(num), dec))
+
+ switch wtype {
+ case protowire.VarintType:
+ var v uint64
+ v, n = protowire.ConsumeVarint(b)
+ e.WriteUint(v)
+ case protowire.Fixed32Type:
+ var v uint32
+ v, n = protowire.ConsumeFixed32(b)
+ e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex))
+ case protowire.Fixed64Type:
+ var v uint64
+ v, n = protowire.ConsumeFixed64(b)
+ e.WriteLiteral("0x" + strconv.FormatUint(v, hex))
+ case protowire.BytesType:
+ var v []byte
+ v, n = protowire.ConsumeBytes(b)
+ e.WriteString(string(v))
+ case protowire.StartGroupType:
+ e.StartMessage()
+ var v []byte
+ v, n = protowire.ConsumeGroup(num, b)
+ e.marshalUnknown(v)
+ e.EndMessage()
+ default:
+ panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype))
+ }
+
+ b = b[n:]
+ }
+}
+
+// marshalAny marshals the given google.protobuf.Any message in expanded form.
+// It returns true if it was able to marshal, else false.
+func (e encoder) marshalAny(any pref.Message) bool {
+ // Construct the embedded message.
+ fds := any.Descriptor().Fields()
+ fdType := fds.ByNumber(fieldnum.Any_TypeUrl)
+ typeURL := any.Get(fdType).String()
+ mt, err := e.opts.Resolver.FindMessageByURL(typeURL)
+ if err != nil {
+ return false
+ }
+ m := mt.New().Interface()
+
+ // Unmarshal bytes into embedded message.
+ fdValue := fds.ByNumber(fieldnum.Any_Value)
+ value := any.Get(fdValue)
+ err = proto.UnmarshalOptions{
+ AllowPartial: true,
+ Resolver: e.opts.Resolver,
+ }.Unmarshal(value.Bytes(), m)
+ if err != nil {
+ return false
+ }
+
+ // Get current encoder position. If marshaling fails, reset encoder output
+ // back to this position.
+ pos := e.Snapshot()
+
+ // Field name is the proto field name enclosed in [].
+ e.WriteName("[" + typeURL + "]")
+ err = e.marshalMessage(m.ProtoReflect(), true)
+ if err != nil {
+ e.Reset(pos)
+ return false
+ }
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
new file mode 100644
index 00000000..a427f8b7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -0,0 +1,538 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protowire parses and formats the raw wire encoding.
+// See https://developers.google.com/protocol-buffers/docs/encoding.
+//
+// For marshaling and unmarshaling entire protobuf messages,
+// use the "google.golang.org/protobuf/proto" package instead.
+package protowire
+
+import (
+ "io"
+ "math"
+ "math/bits"
+
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// Number represents the field number.
+type Number int32
+
+const (
+ MinValidNumber Number = 1
+ FirstReservedNumber Number = 19000
+ LastReservedNumber Number = 19999
+ MaxValidNumber Number = 1<<29 - 1
+)
+
+// IsValid reports whether the field number is semantically valid.
+//
+// Note that while numbers within the reserved range are semantically invalid,
+// they are syntactically valid in the wire format.
+// Implementations may treat records with reserved field numbers as unknown.
+func (n Number) IsValid() bool {
+ return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
+}
+
+// Type represents the wire type.
+type Type int8
+
+const (
+ VarintType Type = 0
+ Fixed32Type Type = 5
+ Fixed64Type Type = 1
+ BytesType Type = 2
+ StartGroupType Type = 3
+ EndGroupType Type = 4
+)
+
+const (
+ _ = -iota
+ errCodeTruncated
+ errCodeFieldNumber
+ errCodeOverflow
+ errCodeReserved
+ errCodeEndGroup
+)
+
+var (
+ errFieldNumber = errors.New("invalid field number")
+ errOverflow = errors.New("variable length integer overflow")
+ errReserved = errors.New("cannot parse reserved wire type")
+ errEndGroup = errors.New("mismatching end group marker")
+ errParse = errors.New("parse error")
+)
+
+// ParseError converts an error code into an error value.
+// This returns nil if n is a non-negative number.
+func ParseError(n int) error {
+ if n >= 0 {
+ return nil
+ }
+ switch n {
+ case errCodeTruncated:
+ return io.ErrUnexpectedEOF
+ case errCodeFieldNumber:
+ return errFieldNumber
+ case errCodeOverflow:
+ return errOverflow
+ case errCodeReserved:
+ return errReserved
+ case errCodeEndGroup:
+ return errEndGroup
+ default:
+ return errParse
+ }
+}
+
+// ConsumeField parses an entire field record (both tag and value) and returns
+// the field number, the wire type, and the total length.
+// This returns a negative length upon an error (see ParseError).
+//
+// The total length includes the tag header and the end group marker (if the
+// field is a group).
+func ConsumeField(b []byte) (Number, Type, int) {
+ num, typ, n := ConsumeTag(b)
+ if n < 0 {
+ return 0, 0, n // forward error code
+ }
+ m := ConsumeFieldValue(num, typ, b[n:])
+ if m < 0 {
+ return 0, 0, m // forward error code
+ }
+ return num, typ, n + m
+}
+
+// ConsumeFieldValue parses a field value and returns its length.
+// This assumes that the field Number and wire Type have already been parsed.
+// This returns a negative length upon an error (see ParseError).
+//
+// When parsing a group, the length includes the end group marker and
+// the end group is verified to match the starting field number.
+func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
+ switch typ {
+ case VarintType:
+ _, n = ConsumeVarint(b)
+ return n
+ case Fixed32Type:
+ _, n = ConsumeFixed32(b)
+ return n
+ case Fixed64Type:
+ _, n = ConsumeFixed64(b)
+ return n
+ case BytesType:
+ _, n = ConsumeBytes(b)
+ return n
+ case StartGroupType:
+ n0 := len(b)
+ for {
+ num2, typ2, n := ConsumeTag(b)
+ if n < 0 {
+ return n // forward error code
+ }
+ b = b[n:]
+ if typ2 == EndGroupType {
+ if num != num2 {
+ return errCodeEndGroup
+ }
+ return n0 - len(b)
+ }
+
+ n = ConsumeFieldValue(num2, typ2, b)
+ if n < 0 {
+ return n // forward error code
+ }
+ b = b[n:]
+ }
+ case EndGroupType:
+ return errCodeEndGroup
+ default:
+ return errCodeReserved
+ }
+}
+
+// AppendTag encodes num and typ as a varint-encoded tag and appends it to b.
+func AppendTag(b []byte, num Number, typ Type) []byte {
+ return AppendVarint(b, EncodeTag(num, typ))
+}
+
+// ConsumeTag parses b as a varint-encoded tag, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeTag(b []byte) (Number, Type, int) {
+ v, n := ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0, n // forward error code
+ }
+ num, typ := DecodeTag(v)
+ if num < MinValidNumber {
+ return 0, 0, errCodeFieldNumber
+ }
+ return num, typ, n
+}
+
+func SizeTag(num Number) int {
+ return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size
+}
+
+// AppendVarint appends v to b as a varint-encoded uint64.
+func AppendVarint(b []byte, v uint64) []byte {
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte((v>>0)&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeVarint(b []byte) (v uint64, n int) {
+ var y uint64
+ if len(b) <= 0 {
+ return 0, errCodeTruncated
+ }
+ v = uint64(b[0])
+ if v < 0x80 {
+ return v, 1
+ }
+ v -= 0x80
+
+ if len(b) <= 1 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[1])
+ v += y << 7
+ if y < 0x80 {
+ return v, 2
+ }
+ v -= 0x80 << 7
+
+ if len(b) <= 2 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[2])
+ v += y << 14
+ if y < 0x80 {
+ return v, 3
+ }
+ v -= 0x80 << 14
+
+ if len(b) <= 3 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[3])
+ v += y << 21
+ if y < 0x80 {
+ return v, 4
+ }
+ v -= 0x80 << 21
+
+ if len(b) <= 4 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[4])
+ v += y << 28
+ if y < 0x80 {
+ return v, 5
+ }
+ v -= 0x80 << 28
+
+ if len(b) <= 5 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[5])
+ v += y << 35
+ if y < 0x80 {
+ return v, 6
+ }
+ v -= 0x80 << 35
+
+ if len(b) <= 6 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[6])
+ v += y << 42
+ if y < 0x80 {
+ return v, 7
+ }
+ v -= 0x80 << 42
+
+ if len(b) <= 7 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[7])
+ v += y << 49
+ if y < 0x80 {
+ return v, 8
+ }
+ v -= 0x80 << 49
+
+ if len(b) <= 8 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[8])
+ v += y << 56
+ if y < 0x80 {
+ return v, 9
+ }
+ v -= 0x80 << 56
+
+ if len(b) <= 9 {
+ return 0, errCodeTruncated
+ }
+ y = uint64(b[9])
+ v += y << 63
+ if y < 2 {
+ return v, 10
+ }
+ return 0, errCodeOverflow
+}
+
+// SizeVarint returns the encoded size of a varint.
+// The size is guaranteed to be within 1 and 10, inclusive.
+func SizeVarint(v uint64) int {
+ // This computes 1 + (bits.Len64(v)-1)/7.
+ // 9/64 is a good enough approximation of 1/7
+ return int(9*uint32(bits.Len64(v))+64) / 64
+}
+
+// AppendFixed32 appends v to b as a little-endian uint32.
+func AppendFixed32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>0),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+}
+
+// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeFixed32(b []byte) (v uint32, n int) {
+ if len(b) < 4 {
+ return 0, errCodeTruncated
+ }
+ v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ return v, 4
+}
+
+// SizeFixed32 returns the encoded size of a fixed32; which is always 4.
+func SizeFixed32() int {
+ return 4
+}
+
+// AppendFixed64 appends v to b as a little-endian uint64.
+func AppendFixed64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>0),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+}
+
+// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeFixed64(b []byte) (v uint64, n int) {
+ if len(b) < 8 {
+ return 0, errCodeTruncated
+ }
+ v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ return v, 8
+}
+
+// SizeFixed64 returns the encoded size of a fixed64; which is always 8.
+func SizeFixed64() int {
+ return 8
+}
+
+// AppendBytes appends v to b as a length-prefixed bytes value.
+func AppendBytes(b []byte, v []byte) []byte {
+ return append(AppendVarint(b, uint64(len(v))), v...)
+}
+
+// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeBytes(b []byte) (v []byte, n int) {
+ m, n := ConsumeVarint(b)
+ if n < 0 {
+ return nil, n // forward error code
+ }
+ if m > uint64(len(b[n:])) {
+ return nil, errCodeTruncated
+ }
+ return b[n:][:m], n + int(m)
+}
+
+// SizeBytes returns the encoded size of a length-prefixed bytes value,
+// given only the length.
+func SizeBytes(n int) int {
+ return SizeVarint(uint64(n)) + n
+}
+
+// AppendString appends v to b as a length-prefixed bytes value.
+func AppendString(b []byte, v string) []byte {
+ return append(AppendVarint(b, uint64(len(v))), v...)
+}
+
+// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeString(b []byte) (v string, n int) {
+ bb, n := ConsumeBytes(b)
+ return string(bb), n
+}
+
+// AppendGroup appends v to b as group value, with a trailing end group marker.
+// The value v must not contain the end marker.
+func AppendGroup(b []byte, num Number, v []byte) []byte {
+ return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType))
+}
+
+// ConsumeGroup parses b as a group value until the trailing end group marker,
+// and verifies that the end marker matches the provided num. The value v
+// does not contain the end marker, while the length does contain the end marker.
+// This returns a negative length upon an error (see ParseError).
+func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
+ n = ConsumeFieldValue(num, StartGroupType, b)
+ if n < 0 {
+ return nil, n // forward error code
+ }
+ b = b[:n]
+
+ // Truncate off end group marker, but need to handle denormalized varints.
+ // Assuming end marker is never 0 (which is always the case since
+ // EndGroupType is non-zero), we can truncate all trailing bytes where the
+ // lower 7 bits are all zero (implying that the varint is denormalized).
+ for len(b) > 0 && b[len(b)-1]&0x7f == 0 {
+ b = b[:len(b)-1]
+ }
+ b = b[:len(b)-SizeTag(num)]
+ return b, n
+}
+
+// SizeGroup returns the encoded size of a group, given only the length.
+func SizeGroup(num Number, n int) int {
+ return n + SizeTag(num)
+}
+
+// DecodeTag decodes the field Number and wire Type from its unified form.
+// The Number is -1 if the decoded field number overflows int32.
+// Other than overflow, this does not check for field number validity.
+func DecodeTag(x uint64) (Number, Type) {
+ // NOTE: MessageSet allows for larger field numbers than normal.
+ if x>>3 > uint64(math.MaxInt32) {
+ return -1, 0
+ }
+ return Number(x >> 3), Type(x & 7)
+}
+
+// EncodeTag encodes the field Number and wire Type into its unified form.
+func EncodeTag(num Number, typ Type) uint64 {
+ return uint64(num)<<3 | uint64(typ&7)
+}
+
+// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
+// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
+// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
+func DecodeZigZag(x uint64) int64 {
+ return int64(x>>1) ^ int64(x)<<63>>63
+}
+
+// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
+// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
+// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
+func EncodeZigZag(x int64) uint64 {
+ return uint64(x<<1) ^ uint64(x>>63)
+}
+
+// DecodeBool decodes a uint64 as a bool.
+// Input: { 0, 1, 2, …}
+// Output: {false, true, true, …}
+func DecodeBool(x uint64) bool {
+ return x != 0
+}
+
+// EncodeBool encodes a bool as a uint64.
+// Input: {false, true}
+// Output: { 0, 1}
+func EncodeBool(x bool) uint64 {
+ if x {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
new file mode 100644
index 00000000..e7af0fe0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -0,0 +1,316 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package descfmt provides functionality to format descriptors.
+package descfmt
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type list interface {
+ Len() int
+ pragma.DoNotImplement
+}
+
+func FormatList(s fmt.State, r rune, vs list) {
+ io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+}
+func formatListOpt(vs list, isRoot, allowMulti bool) string {
+ start, end := "[", "]"
+ if isRoot {
+ var name string
+ switch vs.(type) {
+ case pref.Names:
+ name = "Names"
+ case pref.FieldNumbers:
+ name = "FieldNumbers"
+ case pref.FieldRanges:
+ name = "FieldRanges"
+ case pref.EnumRanges:
+ name = "EnumRanges"
+ case pref.FileImports:
+ name = "FileImports"
+ case pref.Descriptor:
+ name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
+ }
+ start, end = name+"{", "}"
+ }
+
+ var ss []string
+ switch vs := vs.(type) {
+ case pref.Names:
+ for i := 0; i < vs.Len(); i++ {
+ ss = append(ss, fmt.Sprint(vs.Get(i)))
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FieldNumbers:
+ for i := 0; i < vs.Len(); i++ {
+ ss = append(ss, fmt.Sprint(vs.Get(i)))
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FieldRanges:
+ for i := 0; i < vs.Len(); i++ {
+ r := vs.Get(i)
+ if r[0]+1 == r[1] {
+ ss = append(ss, fmt.Sprintf("%d", r[0]))
+ } else {
+ ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive
+ }
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.EnumRanges:
+ for i := 0; i < vs.Len(); i++ {
+ r := vs.Get(i)
+ if r[0] == r[1] {
+ ss = append(ss, fmt.Sprintf("%d", r[0]))
+ } else {
+ ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive
+ }
+ }
+ return start + joinStrings(ss, false) + end
+ case pref.FileImports:
+ for i := 0; i < vs.Len(); i++ {
+ var rs records
+ rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
+ ss = append(ss, "{"+rs.Join()+"}")
+ }
+ return start + joinStrings(ss, allowMulti) + end
+ default:
+ _, isEnumValue := vs.(pref.EnumValueDescriptors)
+ for i := 0; i < vs.Len(); i++ {
+ m := reflect.ValueOf(vs).MethodByName("Get")
+ v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
+ ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue))
+ }
+ return start + joinStrings(ss, allowMulti && isEnumValue) + end
+ }
+}
+
+// descriptorAccessors is a list of accessors to print for each descriptor.
+//
+// Do not print all accessors since some contain redundant information,
+// while others are pointers that we do not want to follow since the descriptor
+// is actually a cyclic graph.
+//
+// Using a list allows us to print the accessors in a sensible order.
+var descriptorAccessors = map[reflect.Type][]string{
+ reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
+ reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
+ reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
+ reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
+ reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
+ reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
+ reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"},
+ reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
+}
+
+func FormatDesc(s fmt.State, r rune, t pref.Descriptor) {
+ io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+}
+func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
+ rv := reflect.ValueOf(t)
+ rt := rv.MethodByName("ProtoType").Type().In(0)
+
+ start, end := "{", "}"
+ if isRoot {
+ start = rt.Name() + "{"
+ }
+
+ _, isFile := t.(pref.FileDescriptor)
+ rs := records{allowMulti: allowMulti}
+ if t.IsPlaceholder() {
+ if isFile {
+ rs.Append(rv, "Path", "Package", "IsPlaceholder")
+ } else {
+ rs.Append(rv, "FullName", "IsPlaceholder")
+ }
+ } else {
+ switch {
+ case isFile:
+ rs.Append(rv, "Syntax")
+ case isRoot:
+ rs.Append(rv, "Syntax", "FullName")
+ default:
+ rs.Append(rv, "Name")
+ }
+ switch t := t.(type) {
+ case pref.FieldDescriptor:
+ for _, s := range descriptorAccessors[rt] {
+ switch s {
+ case "MapKey":
+ if k := t.MapKey(); k != nil {
+ rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
+ }
+ case "MapValue":
+ if v := t.MapValue(); v != nil {
+ switch v.Kind() {
+ case pref.EnumKind:
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
+ case pref.MessageKind, pref.GroupKind:
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
+ default:
+ rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
+ }
+ }
+ case "ContainingOneof":
+ if od := t.ContainingOneof(); od != nil {
+ rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
+ }
+ case "ContainingMessage":
+ if t.IsExtension() {
+ rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
+ }
+ case "Message":
+ if !t.IsMap() {
+ rs.Append(rv, s)
+ }
+ default:
+ rs.Append(rv, s)
+ }
+ }
+ case pref.OneofDescriptor:
+ var ss []string
+ fs := t.Fields()
+ for i := 0; i < fs.Len(); i++ {
+ ss = append(ss, string(fs.Get(i).Name()))
+ }
+ if len(ss) > 0 {
+ rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
+ }
+ default:
+ rs.Append(rv, descriptorAccessors[rt]...)
+ }
+ if rv.MethodByName("GoType").IsValid() {
+ rs.Append(rv, "GoType")
+ }
+ }
+ return start + rs.Join() + end
+}
+
+type records struct {
+ recs [][2]string
+ allowMulti bool
+}
+
+func (rs *records) Append(v reflect.Value, accessors ...string) {
+ for _, a := range accessors {
+ var rv reflect.Value
+ if m := v.MethodByName(a); m.IsValid() {
+ rv = m.Call(nil)[0]
+ }
+ if v.Kind() == reflect.Struct && !rv.IsValid() {
+ rv = v.FieldByName(a)
+ }
+ if !rv.IsValid() {
+ panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
+ }
+ if _, ok := rv.Interface().(pref.Value); ok {
+ rv = rv.MethodByName("Interface").Call(nil)[0]
+ if !rv.IsNil() {
+ rv = rv.Elem()
+ }
+ }
+
+ // Ignore zero values.
+ var isZero bool
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Slice:
+ isZero = rv.IsNil()
+ case reflect.Bool:
+ isZero = rv.Bool() == false
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ isZero = rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ isZero = rv.Uint() == 0
+ case reflect.String:
+ isZero = rv.String() == ""
+ }
+ if n, ok := rv.Interface().(list); ok {
+ isZero = n.Len() == 0
+ }
+ if isZero {
+ continue
+ }
+
+ // Format the value.
+ var s string
+ v := rv.Interface()
+ switch v := v.(type) {
+ case list:
+ s = formatListOpt(v, false, rs.allowMulti)
+ case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor:
+ s = string(v.(pref.Descriptor).Name())
+ case pref.Descriptor:
+ s = string(v.FullName())
+ case string:
+ s = strconv.Quote(v)
+ case []byte:
+ s = fmt.Sprintf("%q", v)
+ default:
+ s = fmt.Sprint(v)
+ }
+ rs.recs = append(rs.recs, [2]string{a, s})
+ }
+}
+
+func (rs *records) Join() string {
+ var ss []string
+
+ // In single line mode, simply join all records with commas.
+ if !rs.allowMulti {
+ for _, r := range rs.recs {
+ ss = append(ss, r[0]+formatColon(0)+r[1])
+ }
+ return joinStrings(ss, false)
+ }
+
+ // In allowMulti line mode, align single line records for more readable output.
+ var maxLen int
+ flush := func(i int) {
+ for _, r := range rs.recs[len(ss):i] {
+ ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1])
+ }
+ maxLen = 0
+ }
+ for i, r := range rs.recs {
+ if isMulti := strings.Contains(r[1], "\n"); isMulti {
+ flush(i)
+ ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t"))
+ } else if maxLen < len(r[0]) {
+ maxLen = len(r[0])
+ }
+ }
+ flush(len(rs.recs))
+ return joinStrings(ss, true)
+}
+
+func formatColon(padding int) string {
+ // Deliberately introduce instability into the debug output to
+ // discourage users from performing string comparisons.
+ // This provides us flexibility to change the output in the future.
+ if detrand.Bool() {
+ return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0)
+ } else {
+ return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020)
+ }
+}
+
+func joinStrings(ss []string, isMulti bool) string {
+ if len(ss) == 0 {
+ return ""
+ }
+ if isMulti {
+ return "\n\t" + strings.Join(ss, "\n\t") + "\n"
+ }
+ return strings.Join(ss, ", ")
+}
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
new file mode 100644
index 00000000..8401be8c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package descopts contains the nil pointers to concrete descriptor options.
+//
+// This package exists as a form of reverse dependency injection so that certain
+// packages (e.g., internal/filedesc and internal/filetype can avoid a direct
+// dependency on the descriptor proto package).
+package descopts
+
+import pref "google.golang.org/protobuf/reflect/protoreflect"
+
+// These variables are set by the init function in descriptor.pb.go via logic
+// in internal/filetype. In other words, so long as the descriptor proto package
+// is linked in, these variables will be populated.
+//
+// Each variable is populated with a nil pointer to the options struct.
+var (
+ File pref.ProtoMessage
+ Enum pref.ProtoMessage
+ EnumValue pref.ProtoMessage
+ Message pref.ProtoMessage
+ Field pref.ProtoMessage
+ Oneof pref.ProtoMessage
+ ExtensionRange pref.ProtoMessage
+ Service pref.ProtoMessage
+ Method pref.ProtoMessage
+)
diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
new file mode 100644
index 00000000..a904dd1f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package detrand provides deterministically random functionality.
+//
+// The pseudo-randomness of these functions is seeded by the program binary
+// itself and guarantees that the output does not change within a program,
+// while ensuring that the output is unstable across different builds.
+package detrand
+
+import (
+ "encoding/binary"
+ "hash/fnv"
+ "os"
+)
+
+// Disable disables detrand such that all functions returns the zero value.
+// This function is not concurrent-safe and must be called during program init.
+func Disable() {
+ randSeed = 0
+}
+
+// Bool returns a deterministically random boolean.
+func Bool() bool {
+ return randSeed%2 == 1
+}
+
+// randSeed is a best-effort at an approximate hash of the Go binary.
+var randSeed = binaryHash()
+
+func binaryHash() uint64 {
+ // Open the Go binary.
+ s, err := os.Executable()
+ if err != nil {
+ return 0
+ }
+ f, err := os.Open(s)
+ if err != nil {
+ return 0
+ }
+ defer f.Close()
+
+ // Hash the size and several samples of the Go binary.
+ const numSamples = 8
+ var buf [64]byte
+ h := fnv.New64()
+ fi, err := f.Stat()
+ if err != nil {
+ return 0
+ }
+ binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size()))
+ h.Write(buf[:8])
+ for i := int64(0); i < numSamples; i++ {
+ if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil {
+ return 0
+ }
+ h.Write(buf[:])
+ }
+ return h.Sum64()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
new file mode 100644
index 00000000..fdd9b13f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
@@ -0,0 +1,213 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package defval marshals and unmarshals textual forms of default values.
+//
+// This package handles both the form historically used in Go struct field tags
+// and also the form used by google.protobuf.FieldDescriptorProto.default_value
+// since they differ in superficial ways.
+package defval
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+
+ ptext "google.golang.org/protobuf/internal/encoding/text"
+ errors "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Format is the serialization format used to represent the default value.
+type Format int
+
+const (
+ _ Format = iota
+
+ // Descriptor uses the serialization format that protoc uses with the
+ // google.protobuf.FieldDescriptorProto.default_value field.
+ Descriptor
+
+ // GoTag uses the historical serialization format in Go struct field tags.
+ GoTag
+)
+
+// Unmarshal deserializes the default string s according to the given kind k.
+// When k is an enum, a list of enum value descriptors must be provided.
+func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) {
+ switch k {
+ case pref.BoolKind:
+ if f == GoTag {
+ switch s {
+ case "1":
+ return pref.ValueOfBool(true), nil, nil
+ case "0":
+ return pref.ValueOfBool(false), nil, nil
+ }
+ } else {
+ switch s {
+ case "true":
+ return pref.ValueOfBool(true), nil, nil
+ case "false":
+ return pref.ValueOfBool(false), nil, nil
+ }
+ }
+ case pref.EnumKind:
+ if f == GoTag {
+ // Go tags use the numeric form of the enum value.
+ if n, err := strconv.ParseInt(s, 10, 32); err == nil {
+ if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil {
+ return pref.ValueOfEnum(ev.Number()), ev, nil
+ }
+ }
+ } else {
+ // Descriptor default_value use the enum identifier.
+ ev := evs.ByName(pref.Name(s))
+ if ev != nil {
+ return pref.ValueOfEnum(ev.Number()), ev, nil
+ }
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if v, err := strconv.ParseInt(s, 10, 32); err == nil {
+ return pref.ValueOfInt32(int32(v)), nil, nil
+ }
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if v, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return pref.ValueOfInt64(int64(v)), nil, nil
+ }
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if v, err := strconv.ParseUint(s, 10, 32); err == nil {
+ return pref.ValueOfUint32(uint32(v)), nil, nil
+ }
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if v, err := strconv.ParseUint(s, 10, 64); err == nil {
+ return pref.ValueOfUint64(uint64(v)), nil, nil
+ }
+ case pref.FloatKind, pref.DoubleKind:
+ var v float64
+ var err error
+ switch s {
+ case "-inf":
+ v = math.Inf(-1)
+ case "inf":
+ v = math.Inf(+1)
+ case "nan":
+ v = math.NaN()
+ default:
+ v, err = strconv.ParseFloat(s, 64)
+ }
+ if err == nil {
+ if k == pref.FloatKind {
+ return pref.ValueOfFloat32(float32(v)), nil, nil
+ } else {
+ return pref.ValueOfFloat64(float64(v)), nil, nil
+ }
+ }
+ case pref.StringKind:
+ // String values are already unescaped and can be used as is.
+ return pref.ValueOfString(s), nil, nil
+ case pref.BytesKind:
+ if b, ok := unmarshalBytes(s); ok {
+ return pref.ValueOfBytes(b), nil, nil
+ }
+ }
+ return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
+}
+
+// Marshal serializes v as the default string according to the given kind k.
+// When specifying the Descriptor format for an enum kind, the associated
+// enum value descriptor must be provided.
+func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) {
+ switch k {
+ case pref.BoolKind:
+ if f == GoTag {
+ if v.Bool() {
+ return "1", nil
+ } else {
+ return "0", nil
+ }
+ } else {
+ if v.Bool() {
+ return "true", nil
+ } else {
+ return "false", nil
+ }
+ }
+ case pref.EnumKind:
+ if f == GoTag {
+ return strconv.FormatInt(int64(v.Enum()), 10), nil
+ } else {
+ return string(ev.Name()), nil
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ return strconv.FormatInt(v.Int(), 10), nil
+ case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind:
+ return strconv.FormatUint(v.Uint(), 10), nil
+ case pref.FloatKind, pref.DoubleKind:
+ f := v.Float()
+ switch {
+ case math.IsInf(f, -1):
+ return "-inf", nil
+ case math.IsInf(f, +1):
+ return "inf", nil
+ case math.IsNaN(f):
+ return "nan", nil
+ default:
+ if k == pref.FloatKind {
+ return strconv.FormatFloat(f, 'g', -1, 32), nil
+ } else {
+ return strconv.FormatFloat(f, 'g', -1, 64), nil
+ }
+ }
+ case pref.StringKind:
+ // String values are serialized as is without any escaping.
+ return v.String(), nil
+ case pref.BytesKind:
+ if s, ok := marshalBytes(v.Bytes()); ok {
+ return s, nil
+ }
+ }
+ return "", errors.New("could not format value for %v: %v", k, v)
+}
+
+// unmarshalBytes deserializes bytes by applying C unescaping.
+func unmarshalBytes(s string) ([]byte, bool) {
+ // Bytes values use the same escaping as the text format,
+ // however they lack the surrounding double quotes.
+ v, err := ptext.UnmarshalString(`"` + s + `"`)
+ if err != nil {
+ return nil, false
+ }
+ return []byte(v), true
+}
+
+// marshalBytes serializes bytes by using C escaping.
+// To match the exact output of protoc, this is identical to the
+// CEscape function in strutil.cc of the protoc source code.
+func marshalBytes(b []byte) (string, bool) {
+ var s []byte
+ for _, c := range b {
+ switch c {
+ case '\n':
+ s = append(s, `\n`...)
+ case '\r':
+ s = append(s, `\r`...)
+ case '\t':
+ s = append(s, `\t`...)
+ case '"':
+ s = append(s, `\"`...)
+ case '\'':
+ s = append(s, `\'`...)
+ case '\\':
+ s = append(s, `\\`...)
+ default:
+ if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII {
+ s = append(s, c)
+ } else {
+ s = append(s, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ return string(s), true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
new file mode 100644
index 00000000..b1eeea50
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
@@ -0,0 +1,258 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package messageset encodes and decodes the obsolete MessageSet wire format.
+package messageset
+
+import (
+ "math"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// The MessageSet wire format is equivalent to a message defiend as follows,
+// where each Item defines an extension field with a field number of 'type_id'
+// and content of 'message'. MessageSet extensions must be non-repeated message
+// fields.
+//
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// }
+// }
+const (
+ FieldItem = protowire.Number(1)
+ FieldTypeID = protowire.Number(2)
+ FieldMessage = protowire.Number(3)
+)
+
+// ExtensionName is the field name for extensions of MessageSet.
+//
+// A valid MessageSet extension must be of the form:
+// message MyMessage {
+// extend proto2.bridge.MessageSet {
+// optional MyMessage message_set_extension = 1234;
+// }
+// ...
+// }
+const ExtensionName = "message_set_extension"
+
+// IsMessageSet returns whether the message uses the MessageSet wire format.
+func IsMessageSet(md pref.MessageDescriptor) bool {
+ xmd, ok := md.(interface{ IsMessageSet() bool })
+ return ok && xmd.IsMessageSet()
+}
+
+// IsMessageSetExtension reports this field extends a MessageSet.
+func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
+ if fd.Name() != ExtensionName {
+ return false
+ }
+ if fd.FullName().Parent() != fd.Message().FullName() {
+ return false
+ }
+ return IsMessageSet(fd.ContainingMessage())
+}
+
+// FindMessageSetExtension locates a MessageSet extension field by name.
+// In text and JSON formats, the extension name used is the message itself.
+// The extension field name is derived by appending ExtensionName.
+func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) {
+ name := s.Append(ExtensionName)
+ xt, err := r.FindExtensionByName(name)
+ if err != nil {
+ if err == preg.NotFound {
+ return nil, err
+ }
+ return nil, errors.Wrap(err, "%q", name)
+ }
+ if !IsMessageSetExtension(xt.TypeDescriptor()) {
+ return nil, preg.NotFound
+ }
+ return xt, nil
+}
+
+// SizeField returns the size of a MessageSet item field containing an extension
+// with the given field number, not counting the contents of the message subfield.
+func SizeField(num protowire.Number) int {
+ return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num))
+}
+
+// Unmarshal parses a MessageSet.
+//
+// It calls fn with the type ID and value of each item in the MessageSet.
+// Unknown fields are discarded.
+//
+// If wantLen is true, the item values include the varint length prefix.
+// This is ugly, but simplifies the fast-path decoder in internal/impl.
+func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error {
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+ b = b[n:]
+ if num != FieldItem || wtyp != protowire.StartGroupType {
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+ b = b[n:]
+ continue
+ }
+ typeID, value, n, err := ConsumeFieldValue(b, wantLen)
+ if err != nil {
+ return err
+ }
+ b = b[n:]
+ if typeID == 0 {
+ continue
+ }
+ if err := fn(typeID, value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ConsumeFieldValue parses b as a MessageSet item field value until and including
+// the trailing end group marker. It assumes the start group tag has already been parsed.
+// It returns the contents of the type_id and message subfields and the total
+// item length.
+//
+// If wantLen is true, the returned message value includes the length prefix.
+func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) {
+ ilen := len(b)
+ for {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ switch {
+ case num == FieldItem && wtyp == protowire.EndGroupType:
+ if wantLen && len(message) == 0 {
+ // The message field was missing, which should never happen.
+ // Be prepared for this case anyway.
+ message = protowire.AppendVarint(message, 0)
+ }
+ return typeid, message, ilen - len(b), nil
+ case num == FieldTypeID && wtyp == protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ if v < 1 || v > math.MaxInt32 {
+ return 0, nil, 0, errors.New("invalid type_id in message set")
+ }
+ typeid = protowire.Number(v)
+ case num == FieldMessage && wtyp == protowire.BytesType:
+ m, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ if message == nil {
+ if wantLen {
+ message = b[:n:n]
+ } else {
+ message = m[:len(m):len(m)]
+ }
+ } else {
+ // This case should never happen in practice, but handle it for
+ // correctness: The MessageSet item contains multiple message
+ // fields, which need to be merged.
+ //
+ // In the case where we're returning the length, this becomes
+ // quite inefficient since we need to strip the length off
+ // the existing data and reconstruct it with the combined length.
+ if wantLen {
+ _, nn := protowire.ConsumeVarint(message)
+ m0 := message[nn:]
+ message = nil
+ message = protowire.AppendVarint(message, uint64(len(m0)+len(m)))
+ message = append(message, m0...)
+ message = append(message, m...)
+ } else {
+ message = append(message, m...)
+ }
+ }
+ b = b[n:]
+ default:
+ // We have no place to put it, so we just ignore unknown fields.
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return 0, nil, 0, protowire.ParseError(n)
+ }
+ b = b[n:]
+ }
+ }
+}
+
+// AppendFieldStart appends the start of a MessageSet item field containing
+// an extension with the given number. The caller must add the message
+// subfield (including the tag).
+func AppendFieldStart(b []byte, num protowire.Number) []byte {
+ b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType)
+ b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType)
+ b = protowire.AppendVarint(b, uint64(num))
+ return b
+}
+
+// AppendFieldEnd appends the trailing end group marker for a MessageSet item field.
+func AppendFieldEnd(b []byte) []byte {
+ return protowire.AppendTag(b, FieldItem, protowire.EndGroupType)
+}
+
+// SizeUnknown returns the size of an unknown fields section in MessageSet format.
+//
+// See AppendUnknown.
+func SizeUnknown(unknown []byte) (size int) {
+ for len(unknown) > 0 {
+ num, typ, n := protowire.ConsumeTag(unknown)
+ if n < 0 || typ != protowire.BytesType {
+ return 0
+ }
+ unknown = unknown[n:]
+ _, n = protowire.ConsumeBytes(unknown)
+ if n < 0 {
+ return 0
+ }
+ unknown = unknown[n:]
+ size += SizeField(num) + protowire.SizeTag(FieldMessage) + n
+ }
+ return size
+}
+
+// AppendUnknown appends unknown fields to b in MessageSet format.
+//
+// For historic reasons, unresolved items in a MessageSet are stored in a
+// message's unknown fields section in non-MessageSet format. That is, an
+// unknown item with typeID T and value V appears in the unknown fields as
+// a field with number T and value V.
+//
+// This function converts the unknown fields back into MessageSet form.
+func AppendUnknown(b, unknown []byte) ([]byte, error) {
+ for len(unknown) > 0 {
+ num, typ, n := protowire.ConsumeTag(unknown)
+ if n < 0 || typ != protowire.BytesType {
+ return nil, errors.New("invalid data in message set unknown fields")
+ }
+ unknown = unknown[n:]
+ _, n = protowire.ConsumeBytes(unknown)
+ if n < 0 {
+ return nil, errors.New("invalid data in message set unknown fields")
+ }
+ b = AppendFieldStart(b, num)
+ b = protowire.AppendTag(b, FieldMessage, protowire.BytesType)
+ b = append(b, unknown[:n]...)
+ b = AppendFieldEnd(b)
+ unknown = unknown[n:]
+ }
+ return b, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
new file mode 100644
index 00000000..16c02d7b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -0,0 +1,207 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag marshals and unmarshals the legacy struct tags as generated
+// by historical versions of protoc-gen-go.
+package tag
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+
+ defval "google.golang.org/protobuf/internal/encoding/defval"
+ fdesc "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var byteType = reflect.TypeOf(byte(0))
+
+// Unmarshal decodes the tag into a prototype.Field.
+//
+// The goType is needed to determine the original protoreflect.Kind since the
+// tag does not record sufficient information to determine that.
+// The type is the underlying field type (e.g., a repeated field may be
+// represented by []T, but the Go type passed in is just T).
+// A list of enum value descriptors must be provided for enum fields.
+// This does not populate the Enum or Message (except for weak message).
+//
+// This function is a best effort attempt; parsing errors are ignored.
+func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor {
+ f := new(fdesc.Field)
+ f.L0.ParentFile = fdesc.SurrogateProto2
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ f.L0.FullName = pref.FullName(s[len("name="):])
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ f.L1.Number = pref.FieldNumber(n)
+ case s == "opt":
+ f.L1.Cardinality = pref.Optional
+ case s == "req":
+ f.L1.Cardinality = pref.Required
+ case s == "rep":
+ f.L1.Cardinality = pref.Repeated
+ case s == "varint":
+ switch goType.Kind() {
+ case reflect.Bool:
+ f.L1.Kind = pref.BoolKind
+ case reflect.Int32:
+ f.L1.Kind = pref.Int32Kind
+ case reflect.Int64:
+ f.L1.Kind = pref.Int64Kind
+ case reflect.Uint32:
+ f.L1.Kind = pref.Uint32Kind
+ case reflect.Uint64:
+ f.L1.Kind = pref.Uint64Kind
+ }
+ case s == "zigzag32":
+ if goType.Kind() == reflect.Int32 {
+ f.L1.Kind = pref.Sint32Kind
+ }
+ case s == "zigzag64":
+ if goType.Kind() == reflect.Int64 {
+ f.L1.Kind = pref.Sint64Kind
+ }
+ case s == "fixed32":
+ switch goType.Kind() {
+ case reflect.Int32:
+ f.L1.Kind = pref.Sfixed32Kind
+ case reflect.Uint32:
+ f.L1.Kind = pref.Fixed32Kind
+ case reflect.Float32:
+ f.L1.Kind = pref.FloatKind
+ }
+ case s == "fixed64":
+ switch goType.Kind() {
+ case reflect.Int64:
+ f.L1.Kind = pref.Sfixed64Kind
+ case reflect.Uint64:
+ f.L1.Kind = pref.Fixed64Kind
+ case reflect.Float64:
+ f.L1.Kind = pref.DoubleKind
+ }
+ case s == "bytes":
+ switch {
+ case goType.Kind() == reflect.String:
+ f.L1.Kind = pref.StringKind
+ case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
+ f.L1.Kind = pref.BytesKind
+ default:
+ f.L1.Kind = pref.MessageKind
+ }
+ case s == "group":
+ f.L1.Kind = pref.GroupKind
+ case strings.HasPrefix(s, "enum="):
+ f.L1.Kind = pref.EnumKind
+ case strings.HasPrefix(s, "json="):
+ jsonName := s[len("json="):]
+ if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
+ f.L1.JSONName.Init(jsonName)
+ }
+ case s == "packed":
+ f.L1.HasPacked = true
+ f.L1.IsPacked = true
+ case strings.HasPrefix(s, "weak="):
+ f.L1.IsWeak = true
+ f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):]))
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
+ s, i = tag[len("def="):], len(tag)
+ v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
+ f.L1.Default = fdesc.DefaultValue(v, ev)
+ case s == "proto3":
+ f.L0.ParentFile = fdesc.SurrogateProto3
+ }
+ tag = strings.TrimPrefix(tag[i:], ",")
+ }
+
+ // The generator uses the group message name instead of the field name.
+ // We obtain the real field name by lowercasing the group name.
+ if f.L1.Kind == pref.GroupKind {
+ f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName)))
+ }
+ return f
+}
+
+// Marshal encodes the protoreflect.FieldDescriptor as a tag.
+//
+// The enumName must be provided if the kind is an enum.
+// Historically, the formulation of the enum "name" was the proto package
+// dot-concatenated with the generated Go identifier for the enum type.
+// Depending on the context on how Marshal is called, there are different ways
+// through which that information is determined. As such it is the caller's
+// responsibility to provide a function to obtain that information.
+func Marshal(fd pref.FieldDescriptor, enumName string) string {
+ var tag []string
+ switch fd.Kind() {
+ case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind:
+ tag = append(tag, "varint")
+ case pref.Sint32Kind:
+ tag = append(tag, "zigzag32")
+ case pref.Sint64Kind:
+ tag = append(tag, "zigzag64")
+ case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind:
+ tag = append(tag, "fixed32")
+ case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind:
+ tag = append(tag, "fixed64")
+ case pref.StringKind, pref.BytesKind, pref.MessageKind:
+ tag = append(tag, "bytes")
+ case pref.GroupKind:
+ tag = append(tag, "group")
+ }
+ tag = append(tag, strconv.Itoa(int(fd.Number())))
+ switch fd.Cardinality() {
+ case pref.Optional:
+ tag = append(tag, "opt")
+ case pref.Required:
+ tag = append(tag, "req")
+ case pref.Repeated:
+ tag = append(tag, "rep")
+ }
+ if fd.IsPacked() {
+ tag = append(tag, "packed")
+ }
+ name := string(fd.Name())
+ if fd.Kind() == pref.GroupKind {
+ // The name of the FieldDescriptor for a group field is
+ // lowercased. To find the original capitalization, we
+ // look in the field's MessageType.
+ name = string(fd.Message().Name())
+ }
+ tag = append(tag, "name="+name)
+ if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() {
+ // NOTE: The jsonName != name condition is suspect, but it preserve
+ // the exact same semantics from the previous generator.
+ tag = append(tag, "json="+jsonName)
+ }
+ if fd.IsWeak() {
+ tag = append(tag, "weak="+string(fd.Message().FullName()))
+ }
+ // The previous implementation does not tag extension fields as proto3,
+ // even when the field is defined in a proto3 file. Match that behavior
+ // for consistency.
+ if fd.Syntax() == pref.Proto3 && !fd.IsExtension() {
+ tag = append(tag, "proto3")
+ }
+ if fd.Kind() == pref.EnumKind && enumName != "" {
+ tag = append(tag, "enum="+enumName)
+ }
+ if fd.ContainingOneof() != nil {
+ tag = append(tag, "oneof")
+ }
+ // This must appear last in the tag, since commas in strings aren't escaped.
+ if fd.HasDefault() {
+ def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag)
+ tag = append(tag, "def="+def)
+ }
+ return strings.Join(tag, ",")
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
new file mode 100644
index 00000000..eb10ea10
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -0,0 +1,665 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// Decoder is a token-based textproto decoder.
+type Decoder struct {
+ // lastCall is last method called, either readCall or peekCall.
+ // Initial value is readCall.
+ lastCall call
+
+ // lastToken contains the last read token.
+ lastToken Token
+
+ // lastErr contains the last read error.
+ lastErr error
+
+ // openStack is a stack containing the byte characters for MessageOpen and
+ // ListOpen kinds. The top of stack represents the message or the list that
+ // the current token is nested in. An empty stack means the current token is
+ // at the top level message. The characters '{' and '<' both represent the
+ // MessageOpen kind.
+ openStack []byte
+
+ // orig is used in reporting line and column.
+ orig []byte
+ // in contains the unconsumed input.
+ in []byte
+}
+
+// NewDecoder returns a Decoder to read the given []byte.
+func NewDecoder(b []byte) *Decoder {
+ return &Decoder{orig: b, in: b}
+}
+
+// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
+var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
+
+// call specifies which Decoder method was invoked.
+type call uint8
+
+const (
+ readCall call = iota
+ peekCall
+)
+
+// Peek looks ahead and returns the next token and error without advancing a read.
+func (d *Decoder) Peek() (Token, error) {
+ defer func() { d.lastCall = peekCall }()
+ if d.lastCall == readCall {
+ d.lastToken, d.lastErr = d.Read()
+ }
+ return d.lastToken, d.lastErr
+}
+
+// Read returns the next token.
+// It will return an error if there is no valid token.
+func (d *Decoder) Read() (Token, error) {
+ defer func() { d.lastCall = readCall }()
+ if d.lastCall == peekCall {
+ return d.lastToken, d.lastErr
+ }
+
+ tok, err := d.parseNext(d.lastToken.kind)
+ if err != nil {
+ return Token{}, err
+ }
+
+ switch tok.kind {
+ case comma, semicolon:
+ tok, err = d.parseNext(tok.kind)
+ if err != nil {
+ return Token{}, err
+ }
+ }
+ d.lastToken = tok
+ return tok, nil
+}
+
+const (
+ mismatchedFmt = "mismatched close character %q"
+ unexpectedFmt = "unexpected character %q"
+)
+
+// parseNext parses the next Token based on given last kind.
+func (d *Decoder) parseNext(lastKind Kind) (Token, error) {
+ // Trim leading spaces.
+ d.consume(0)
+ isEOF := false
+ if len(d.in) == 0 {
+ isEOF = true
+ }
+
+ switch lastKind {
+ case EOF:
+ return d.consumeToken(EOF, 0, 0), nil
+
+ case bof:
+ // Start of top level message. Next token can be EOF or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ return d.parseFieldName()
+
+ case Name:
+ // Next token can be MessageOpen, ListOpen or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ case '[':
+ d.pushOpenStack(ch)
+ return d.consumeToken(ListOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+
+ case Scalar:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch d.in[0] {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ // Next token can be ListClose or comma.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case ']':
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ default:
+ return Token{}, d.newSyntaxError(unexpectedFmt, ch)
+ }
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ _, closeCh := d.currentOpenKind()
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageClose:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch ch := d.in[0]; ch {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ // Next token can be ListClose or comma
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ default:
+ return Token{}, d.newSyntaxError(unexpectedFmt, ch)
+ }
+ }
+
+ case ListOpen:
+ // Next token can be ListClose, MessageStart or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case ']':
+ d.popOpenStack()
+ return d.consumeToken(ListClose, 1, 0), nil
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+
+ case ListClose:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message.
+ // Next token can be EOF, comma, semicolon or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ switch ch := d.in[0]; ch {
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ case MessageOpen:
+ // Next token can be MessageClose, comma, semicolon or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ case ',':
+ return d.consumeToken(comma, 1, 0), nil
+ case ';':
+ return d.consumeToken(semicolon, 1, 0), nil
+ default:
+ return d.parseFieldName()
+ }
+
+ default:
+ // It is not possible to have this case. Let it panic below.
+ }
+
+ case comma, semicolon:
+ openKind, closeCh := d.currentOpenKind()
+ switch openKind {
+ case bof:
+ // Top level message. Next token can be EOF or Name.
+ if isEOF {
+ return d.consumeToken(EOF, 0, 0), nil
+ }
+ return d.parseFieldName()
+
+ case MessageOpen:
+ // Next token can be MessageClose or Name.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case closeCh:
+ d.popOpenStack()
+ return d.consumeToken(MessageClose, 1, 0), nil
+ case otherCloseChar[closeCh]:
+ return Token{}, d.newSyntaxError(mismatchedFmt, ch)
+ default:
+ return d.parseFieldName()
+ }
+
+ case ListOpen:
+ if lastKind == semicolon {
+ // It is not be possible to have this case as logic here
+ // should not have produced a semicolon Token when inside a
+ // list. Let it panic below.
+ break
+ }
+ // Next token can be MessageOpen or Scalar.
+ if isEOF {
+ return Token{}, ErrUnexpectedEOF
+ }
+ switch ch := d.in[0]; ch {
+ case '{', '<':
+ d.pushOpenStack(ch)
+ return d.consumeToken(MessageOpen, 1, 0), nil
+ default:
+ return d.parseScalar()
+ }
+ }
+ }
+
+ line, column := d.Position(len(d.orig) - len(d.in))
+ panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind))
+}
+
+var otherCloseChar = map[byte]byte{
+ '}': '>',
+ '>': '}',
+}
+
+// currentOpenKind indicates whether current position is inside a message, list
+// or top-level message by returning MessageOpen, ListOpen or bof respectively.
+// If the returned kind is either a MessageOpen or ListOpen, it also returns the
+// corresponding closing character.
+func (d *Decoder) currentOpenKind() (Kind, byte) {
+ if len(d.openStack) == 0 {
+ return bof, 0
+ }
+ openCh := d.openStack[len(d.openStack)-1]
+ switch openCh {
+ case '{':
+ return MessageOpen, '}'
+ case '<':
+ return MessageOpen, '>'
+ case '[':
+ return ListOpen, ']'
+ }
+ panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh)))
+}
+
+func (d *Decoder) pushOpenStack(ch byte) {
+ d.openStack = append(d.openStack, ch)
+}
+
+func (d *Decoder) popOpenStack() {
+ d.openStack = d.openStack[:len(d.openStack)-1]
+}
+
+// parseFieldName parses field name and separator.
+func (d *Decoder) parseFieldName() (tok Token, err error) {
+ defer func() {
+ if err == nil && d.tryConsumeChar(':') {
+ tok.attrs |= hasSeparator
+ }
+ }()
+
+ // Extension or Any type URL.
+ if d.in[0] == '[' {
+ return d.parseTypeName()
+ }
+
+ // Identifier.
+ if size := parseIdent(d.in, false); size > 0 {
+ return d.consumeToken(Name, size, uint8(IdentName)), nil
+ }
+
+ // Field number. Identify if input is a valid number that is not negative
+ // and is decimal integer within 32-bit range.
+ if num := parseNumber(d.in); num.size > 0 {
+ if !num.neg && num.kind == numDec {
+ if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil {
+ return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
+ }
+ }
+ return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
+ }
+
+ return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in))
+}
+
+// parseTypeName parses Any type URL or extension field name. The name is
+// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
+// strings. This implementation is more liberal and allows for the pattern
+// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
+// in between [ ], '.', '/' and the sub names.
+func (d *Decoder) parseTypeName() (Token, error) {
+ startPos := len(d.orig) - len(d.in)
+ // Use alias s to advance first in order to use d.in for error handling.
+ // Caller already checks for [ as first character.
+ s := consume(d.in[1:], 0)
+ if len(s) == 0 {
+ return Token{}, ErrUnexpectedEOF
+ }
+
+ var name []byte
+ for len(s) > 0 && isTypeNameChar(s[0]) {
+ name = append(name, s[0])
+ s = s[1:]
+ }
+ s = consume(s, 0)
+
+ var closed bool
+ for len(s) > 0 && !closed {
+ switch {
+ case s[0] == ']':
+ s = s[1:]
+ closed = true
+
+ case s[0] == '/', s[0] == '.':
+ if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
+ return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
+ d.orig[startPos:len(d.orig)-len(s)+1])
+ }
+ name = append(name, s[0])
+ s = s[1:]
+ s = consume(s, 0)
+ for len(s) > 0 && isTypeNameChar(s[0]) {
+ name = append(name, s[0])
+ s = s[1:]
+ }
+ s = consume(s, 0)
+
+ default:
+ return Token{}, d.newSyntaxError(
+ "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
+ }
+ }
+
+ if !closed {
+ return Token{}, ErrUnexpectedEOF
+ }
+
+ // First character cannot be '.'. Last character cannot be '.' or '/'.
+ size := len(name)
+ if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
+ return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
+ d.orig[startPos:len(d.orig)-len(s)])
+ }
+
+ d.in = s
+ endPos := len(d.orig) - len(d.in)
+ d.consume(0)
+
+ return Token{
+ kind: Name,
+ attrs: uint8(TypeName),
+ pos: startPos,
+ raw: d.orig[startPos:endPos],
+ str: string(name),
+ }, nil
+}
+
+func isTypeNameChar(b byte) bool {
+ return (b == '-' || b == '_' ||
+ ('0' <= b && b <= '9') ||
+ ('a' <= b && b <= 'z') ||
+ ('A' <= b && b <= 'Z'))
+}
+
+func isWhiteSpace(b byte) bool {
+ switch b {
+ case ' ', '\n', '\r', '\t':
+ return true
+ default:
+ return false
+ }
+}
+
+// parseIdent parses an unquoted proto identifier and returns size.
+// If allowNeg is true, it allows '-' to be the first character in the
+// identifier. This is used when parsing literal values like -infinity, etc.
+// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*`
+func parseIdent(input []byte, allowNeg bool) int {
+ var size int
+
+ s := input
+ if len(s) == 0 {
+ return 0
+ }
+
+ if allowNeg && s[0] == '-' {
+ s = s[1:]
+ size++
+ if len(s) == 0 {
+ return 0
+ }
+ }
+
+ switch {
+ case s[0] == '_',
+ 'a' <= s[0] && s[0] <= 'z',
+ 'A' <= s[0] && s[0] <= 'Z':
+ s = s[1:]
+ size++
+ default:
+ return 0
+ }
+
+ for len(s) > 0 && (s[0] == '_' ||
+ 'a' <= s[0] && s[0] <= 'z' ||
+ 'A' <= s[0] && s[0] <= 'Z' ||
+ '0' <= s[0] && s[0] <= '9') {
+ s = s[1:]
+ size++
+ }
+
+ if len(s) > 0 && !isDelim(s[0]) {
+ return 0
+ }
+
+ return size
+}
+
+// parseScalar parses for a string, literal or number value.
+func (d *Decoder) parseScalar() (Token, error) {
+ if d.in[0] == '"' || d.in[0] == '\'' {
+ return d.parseStringValue()
+ }
+
+ if tok, ok := d.parseLiteralValue(); ok {
+ return tok, nil
+ }
+
+ if tok, ok := d.parseNumberValue(); ok {
+ return tok, nil
+ }
+
+ return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in))
+}
+
+// parseLiteralValue parses a literal value. A literal value is used for
+// bools, special floats and enums. This function simply identifies that the
+// field value is a literal.
+func (d *Decoder) parseLiteralValue() (Token, bool) {
+ size := parseIdent(d.in, true)
+ if size == 0 {
+ return Token{}, false
+ }
+ return d.consumeToken(Scalar, size, literalValue), true
+}
+
+// consumeToken constructs a Token for given Kind from d.in and consumes given
+// size-length from it.
+func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
+ // Important to compute raw and pos before consuming.
+ tok := Token{
+ kind: kind,
+ attrs: attrs,
+ pos: len(d.orig) - len(d.in),
+ raw: d.in[:size],
+ }
+ d.consume(size)
+ return tok
+}
+
+// newSyntaxError returns a syntax error with line and column information for
+// current position.
+func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
+ e := errors.New(f, x...)
+ line, column := d.Position(len(d.orig) - len(d.in))
+ return errors.New("syntax error (line %d:%d): %v", line, column, e)
+}
+
+// Position returns line and column number of given index of the original input.
+// It will panic if index is out of range.
+func (d *Decoder) Position(idx int) (line int, column int) {
+ b := d.orig[:idx]
+ line = bytes.Count(b, []byte("\n")) + 1
+ if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
+ b = b[i+1:]
+ }
+ column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
+ return line, column
+}
+
+func (d *Decoder) tryConsumeChar(c byte) bool {
+ if len(d.in) > 0 && d.in[0] == c {
+ d.consume(1)
+ return true
+ }
+ return false
+}
+
+// consume consumes n bytes of input and any subsequent whitespace or comments.
+func (d *Decoder) consume(n int) {
+ d.in = consume(d.in, n)
+ return
+}
+
+// consume consumes n bytes of input and any subsequent whitespace or comments.
+func consume(b []byte, n int) []byte {
+ b = b[n:]
+ for len(b) > 0 {
+ switch b[0] {
+ case ' ', '\n', '\r', '\t':
+ b = b[1:]
+ case '#':
+ if i := bytes.IndexByte(b, '\n'); i >= 0 {
+ b = b[i+len("\n"):]
+ } else {
+ b = nil
+ }
+ default:
+ return b
+ }
+ }
+ return b
+}
+
+// Any sequence that looks like a non-delimiter (for error reporting).
+var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`)
+
+// isDelim returns true if given byte is a delimiter character.
+func isDelim(c byte) bool {
+ return !(c == '-' || c == '+' || c == '.' || c == '_' ||
+ ('a' <= c && c <= 'z') ||
+ ('A' <= c && c <= 'Z') ||
+ ('0' <= c && c <= '9'))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
new file mode 100644
index 00000000..f2d90b78
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
@@ -0,0 +1,190 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+// parseNumberValue parses a number from the input and returns a Token object.
+func (d *Decoder) parseNumberValue() (Token, bool) {
+ in := d.in
+ num := parseNumber(in)
+ if num.size == 0 {
+ return Token{}, false
+ }
+ numAttrs := num.kind
+ if num.neg {
+ numAttrs |= isNegative
+ }
+ strSize := num.size
+ last := num.size - 1
+ if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') {
+ strSize = last
+ }
+ tok := Token{
+ kind: Scalar,
+ attrs: numberValue,
+ pos: len(d.orig) - len(d.in),
+ raw: d.in[:num.size],
+ str: string(d.in[:strSize]),
+ numAttrs: numAttrs,
+ }
+ d.consume(num.size)
+ return tok, true
+}
+
+const (
+ numDec uint8 = (1 << iota) / 2
+ numHex
+ numOct
+ numFloat
+)
+
+// number is the result of parsing out a valid number from parseNumber. It
+// contains data for doing float or integer conversion via the strconv package
+// in conjunction with the input bytes.
+type number struct {
+ kind uint8
+ neg bool
+ size int
+}
+
+// parseNumber constructs a number object from given input. It allows for the
+// following patterns:
+// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*)
+// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?)
+// It also returns the number of parsed bytes for the given number, 0 if it is
+// not a number.
+func parseNumber(input []byte) number {
+ kind := numDec
+ var size int
+ var neg bool
+
+ s := input
+ if len(s) == 0 {
+ return number{}
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ size++
+ if len(s) == 0 {
+ return number{}
+ }
+ }
+
+ // C++ allows for whitespace and comments in between the negative sign and
+ // the rest of the number. This logic currently does not but is consistent
+ // with v1.
+
+ switch {
+ case s[0] == '0':
+ if len(s) > 1 {
+ switch {
+ case s[1] == 'x' || s[1] == 'X':
+ // Parse as hex number.
+ kind = numHex
+ n := 2
+ s = s[2:]
+ for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') ||
+ ('a' <= s[0] && s[0] <= 'f') ||
+ ('A' <= s[0] && s[0] <= 'F')) {
+ s = s[1:]
+ n++
+ }
+ if n == 2 {
+ return number{}
+ }
+ size += n
+
+ case '0' <= s[1] && s[1] <= '7':
+ // Parse as octal number.
+ kind = numOct
+ n := 2
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '7' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ }
+
+ if kind&(numHex|numOct) > 0 {
+ if len(s) > 0 && !isDelim(s[0]) {
+ return number{}
+ }
+ return number{kind: kind, neg: neg, size: size}
+ }
+ }
+ s = s[1:]
+ size++
+
+ case '1' <= s[0] && s[0] <= '9':
+ n := 1
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+
+ case s[0] == '.':
+ // Set kind to numFloat to signify the intent to parse as float. And
+ // that it needs to have other digits after '.'.
+ kind = numFloat
+
+ default:
+ return number{}
+ }
+
+ // . followed by 0 or more digits.
+ if len(s) > 0 && s[0] == '.' {
+ n := 1
+ s = s[1:]
+ // If decimal point was before any digits, it should be followed by
+ // other digits.
+ if len(s) == 0 && kind == numFloat {
+ return number{}
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ kind = numFloat
+ }
+
+ // e or E followed by an optional - or + and 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ kind = numFloat
+ s = s[1:]
+ n := 1
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ n++
+ if len(s) == 0 {
+ return number{}
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ n++
+ }
+ size += n
+ }
+
+ // Optional suffix f or F for floats.
+ if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') {
+ kind = numFloat
+ s = s[1:]
+ size++
+ }
+
+ // Check that next byte is a delimiter or it is at the end.
+ if len(s) > 0 && !isDelim(s[0]) {
+ return number{}
+ }
+
+ return number{kind: kind, neg: neg, size: size}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
new file mode 100644
index 00000000..d4d34902
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go
@@ -0,0 +1,161 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/strs"
+)
+
+// parseStringValue parses string field token.
+// This differs from parseString since the text format allows
+// multiple back-to-back string literals where they are semantically treated
+// as a single large string with all values concatenated.
+//
+// E.g., `"foo" "bar" "baz"` => "foobarbaz"
+func (d *Decoder) parseStringValue() (Token, error) {
+ // Note that the ending quote is sufficient to unambiguously mark the end
+ // of a string. Thus, the text grammar does not require intervening
+ // whitespace or control characters in-between strings.
+ // Thus, the following is valid:
+ // `"foo"'bar'"baz"` => "foobarbaz"
+ in0 := d.in
+ var ss []string
+ for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') {
+ s, err := d.parseString()
+ if err != nil {
+ return Token{}, err
+ }
+ ss = append(ss, s)
+ }
+ // d.in already points to the end of the value at this point.
+ return Token{
+ kind: Scalar,
+ attrs: stringValue,
+ pos: len(d.orig) - len(in0),
+ raw: in0[:len(in0)-len(d.in)],
+ str: strings.Join(ss, ""),
+ }, nil
+}
+
+// parseString parses a string value enclosed in " or '.
+func (d *Decoder) parseString() (string, error) {
+ in := d.in
+ if len(in) == 0 {
+ return "", ErrUnexpectedEOF
+ }
+ quote := in[0]
+ in = in[1:]
+ i := indexNeedEscapeInBytes(in)
+ in, out := in[i:], in[:i:i] // set cap to prevent mutations
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRune(in); {
+ case r == utf8.RuneError && n == 1:
+ return "", d.newSyntaxError("invalid UTF-8 detected")
+ case r == 0 || r == '\n':
+ return "", d.newSyntaxError("invalid character %q in string", r)
+ case r == rune(quote):
+ in = in[1:]
+ d.consume(len(d.in) - len(in))
+ return string(out), nil
+ case r == '\\':
+ if len(in) < 2 {
+ return "", ErrUnexpectedEOF
+ }
+ switch r := in[1]; r {
+ case '"', '\'', '\\', '?':
+ in, out = in[2:], append(out, r)
+ case 'a':
+ in, out = in[2:], append(out, '\a')
+ case 'b':
+ in, out = in[2:], append(out, '\b')
+ case 'n':
+ in, out = in[2:], append(out, '\n')
+ case 'r':
+ in, out = in[2:], append(out, '\r')
+ case 't':
+ in, out = in[2:], append(out, '\t')
+ case 'v':
+ in, out = in[2:], append(out, '\v')
+ case 'f':
+ in, out = in[2:], append(out, '\f')
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // One, two, or three octal characters.
+ n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567"))
+ if n > 3 {
+ n = 3
+ }
+ v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8)
+ if err != nil {
+ return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n])
+ }
+ in, out = in[1+n:], append(out, byte(v))
+ case 'x':
+ // One or two hexadecimal characters.
+ n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF"))
+ if n > 2 {
+ n = 2
+ }
+ v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8)
+ if err != nil {
+ return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n])
+ }
+ in, out = in[2+n:], append(out, byte(v))
+ case 'u', 'U':
+ // Four or eight hexadecimal characters
+ n := 6
+ if r == 'U' {
+ n = 10
+ }
+ if len(in) < n {
+ return "", ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:n]), 16, 32)
+ if utf8.MaxRune < v || err != nil {
+ return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n])
+ }
+ in = in[n:]
+
+ r := rune(v)
+ if utf16.IsSurrogate(r) {
+ if len(in) < 6 {
+ return "", ErrUnexpectedEOF
+ }
+ v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+ r = utf16.DecodeRune(r, rune(v))
+ if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil {
+ return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6])
+ }
+ in = in[6:]
+ }
+ out = append(out, string(r)...)
+ default:
+ return "", d.newSyntaxError("invalid escape code %q in string", in[:2])
+ }
+ default:
+ i := indexNeedEscapeInBytes(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ return "", ErrUnexpectedEOF
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
+
+// UnmarshalString returns an unescaped string given a textproto string value.
+// String value needs to contain single or double quotes. This is only used by
+// internal/encoding/defval package for unmarshaling bytes.
+func UnmarshalString(s string) (string, error) {
+ d := NewDecoder([]byte(s))
+ return d.parseString()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
new file mode 100644
index 00000000..83d2b0d5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go
@@ -0,0 +1,373 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/flags"
+)
+
+// Kind represents a token kind expressible in the textproto format.
+type Kind uint8
+
+// Kind values.
+const (
+ Invalid Kind = iota
+ EOF
+ Name // Name indicates the field name.
+ Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true.
+ MessageOpen
+ MessageClose
+ ListOpen
+ ListClose
+
+ // comma and semi-colon are only for parsing in between values and should not be exposed.
+ comma
+ semicolon
+
+ // bof indicates beginning of file, which is the default token
+ // kind at the beginning of parsing.
+ bof = Invalid
+)
+
+func (t Kind) String() string {
+ switch t {
+ case Invalid:
+ return ""
+ case EOF:
+ return "eof"
+ case Scalar:
+ return "scalar"
+ case Name:
+ return "name"
+ case MessageOpen:
+ return "{"
+ case MessageClose:
+ return "}"
+ case ListOpen:
+ return "["
+ case ListClose:
+ return "]"
+ case comma:
+ return ","
+ case semicolon:
+ return ";"
+ default:
+ return fmt.Sprintf("", uint8(t))
+ }
+}
+
+// NameKind represents different types of field names.
+type NameKind uint8
+
+// NameKind values.
+const (
+ IdentName NameKind = iota + 1
+ TypeName
+ FieldNumber
+)
+
+func (t NameKind) String() string {
+ switch t {
+ case IdentName:
+ return "IdentName"
+ case TypeName:
+ return "TypeName"
+ case FieldNumber:
+ return "FieldNumber"
+ default:
+ return fmt.Sprintf("", uint8(t))
+ }
+}
+
+// Bit mask in Token.attrs to indicate if a Name token is followed by the
+// separator char ':'. The field name separator char is optional for message
+// field or repeated message field, but required for all other types. Decoder
+// simply indicates whether a Name token is followed by separator or not. It is
+// up to the prototext package to validate.
+const hasSeparator = 1 << 7
+
+// Scalar value types.
+const (
+ numberValue = iota + 1
+ stringValue
+ literalValue
+)
+
+// Bit mask in Token.numAttrs to indicate that the number is a negative.
+const isNegative = 1 << 7
+
+// Token provides a parsed token kind and value. Values are provided by the
+// different accessor methods.
+type Token struct {
+ // Kind of the Token object.
+ kind Kind
+ // attrs contains metadata for the following Kinds:
+ // Name: hasSeparator bit and one of NameKind.
+ // Scalar: one of numberValue, stringValue, literalValue.
+ attrs uint8
+ // numAttrs contains metadata for numberValue:
+ // - highest bit is whether negative or positive.
+ // - lower bits indicate one of numDec, numHex, numOct, numFloat.
+ numAttrs uint8
+ // pos provides the position of the token in the original input.
+ pos int
+ // raw bytes of the serialized token.
+ // This is a subslice into the original input.
+ raw []byte
+ // str contains parsed string for the following:
+ // - stringValue of Scalar kind
+ // - numberValue of Scalar kind
+ // - TypeName of Name kind
+ str string
+}
+
+// Kind returns the token kind.
+func (t Token) Kind() Kind {
+ return t.kind
+}
+
+// RawString returns the read value in string.
+func (t Token) RawString() string {
+ return string(t.raw)
+}
+
+// Pos returns the token position from the input.
+func (t Token) Pos() int {
+ return t.pos
+}
+
+// NameKind returns IdentName, TypeName or FieldNumber.
+// It panics if type is not Name.
+func (t Token) NameKind() NameKind {
+ if t.kind == Name {
+ return NameKind(t.attrs &^ hasSeparator)
+ }
+ panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
+}
+
+// HasSeparator returns true if the field name is followed by the separator char
+// ':', else false. It panics if type is not Name.
+func (t Token) HasSeparator() bool {
+ if t.kind == Name {
+ return t.attrs&hasSeparator != 0
+ }
+ panic(fmt.Sprintf("Token is not a Name type: %s", t.kind))
+}
+
+// IdentName returns the value for IdentName type.
+func (t Token) IdentName() string {
+ if t.kind == Name && t.attrs&uint8(IdentName) != 0 {
+ return string(t.raw)
+ }
+ panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+}
+
+// TypeName returns the value for TypeName type.
+func (t Token) TypeName() string {
+ if t.kind == Name && t.attrs&uint8(TypeName) != 0 {
+ return t.str
+ }
+ panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+}
+
+// FieldNumber returns the value for FieldNumber type. It returns a
+// non-negative int32 value. Caller will still need to validate for the correct
+// field number range.
+func (t Token) FieldNumber() int32 {
+ if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 {
+ panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator)))
+ }
+ // Following should not return an error as it had already been called right
+ // before this Token was constructed.
+ num, _ := strconv.ParseInt(string(t.raw), 10, 32)
+ return int32(num)
+}
+
+// String returns the string value for a Scalar type.
+func (t Token) String() (string, bool) {
+ if t.kind != Scalar || t.attrs != stringValue {
+ return "", false
+ }
+ return t.str, true
+}
+
+// Enum returns the literal value for a Scalar type for use as enum literals.
+func (t Token) Enum() (string, bool) {
+ if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') {
+ return "", false
+ }
+ return string(t.raw), true
+}
+
+// Bool returns the bool value for a Scalar type.
+func (t Token) Bool() (bool, bool) {
+ if t.kind != Scalar {
+ return false, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if b, ok := boolLits[string(t.raw)]; ok {
+ return b, true
+ }
+ case numberValue:
+ // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01,
+ // 0x1, etc.
+ n, err := strconv.ParseUint(t.str, 0, 64)
+ if err == nil {
+ switch n {
+ case 0:
+ return false, true
+ case 1:
+ return true, true
+ }
+ }
+ }
+ return false, false
+}
+
+// These exact boolean literals are the ones supported in C++.
+var boolLits = map[string]bool{
+ "t": true,
+ "true": true,
+ "True": true,
+ "f": false,
+ "false": false,
+ "False": false,
+}
+
+// Uint64 returns the uint64 value for a Scalar type.
+func (t Token) Uint64() (uint64, bool) {
+ if t.kind != Scalar || t.attrs != numberValue ||
+ t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(t.str, 0, 64)
+ if err != nil {
+ return 0, false
+ }
+ return n, true
+}
+
+// Uint32 returns the uint32 value for a Scalar type.
+func (t Token) Uint32() (uint32, bool) {
+ if t.kind != Scalar || t.attrs != numberValue ||
+ t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ n, err := strconv.ParseUint(t.str, 0, 32)
+ if err != nil {
+ return 0, false
+ }
+ return uint32(n), true
+}
+
+// Int64 returns the int64 value for a Scalar type.
+func (t Token) Int64() (int64, bool) {
+ if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ if n, err := strconv.ParseInt(t.str, 0, 64); err == nil {
+ return n, true
+ }
+ // C++ accepts large positive hex numbers as negative values.
+ // This feature is here for proto1 backwards compatibility purposes.
+ if flags.ProtoLegacy && (t.numAttrs == numHex) {
+ if n, err := strconv.ParseUint(t.str, 0, 64); err == nil {
+ return int64(n), true
+ }
+ }
+ return 0, false
+}
+
+// Int32 returns the int32 value for a Scalar type.
+func (t Token) Int32() (int32, bool) {
+ if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 {
+ return 0, false
+ }
+ if n, err := strconv.ParseInt(t.str, 0, 32); err == nil {
+ return int32(n), true
+ }
+ // C++ accepts large positive hex numbers as negative values.
+ // This feature is here for proto1 backwards compatibility purposes.
+ if flags.ProtoLegacy && (t.numAttrs == numHex) {
+ if n, err := strconv.ParseUint(t.str, 0, 32); err == nil {
+ return int32(n), true
+ }
+ }
+ return 0, false
+}
+
+// Float64 returns the float64 value for a Scalar type.
+func (t Token) Float64() (float64, bool) {
+ if t.kind != Scalar {
+ return 0, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
+ return f, true
+ }
+ case numberValue:
+ n, err := strconv.ParseFloat(t.str, 64)
+ if err == nil {
+ return n, true
+ }
+ nerr := err.(*strconv.NumError)
+ if nerr.Err == strconv.ErrRange {
+ return n, true
+ }
+ }
+ return 0, false
+}
+
+// Float32 returns the float32 value for a Scalar type.
+func (t Token) Float32() (float32, bool) {
+ if t.kind != Scalar {
+ return 0, false
+ }
+ switch t.attrs {
+ case literalValue:
+ if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok {
+ return float32(f), true
+ }
+ case numberValue:
+ n, err := strconv.ParseFloat(t.str, 64)
+ if err == nil {
+ // Overflows are treated as (-)infinity.
+ return float32(n), true
+ }
+ nerr := err.(*strconv.NumError)
+ if nerr.Err == strconv.ErrRange {
+ return float32(n), true
+ }
+ }
+ return 0, false
+}
+
+// These are the supported float literals which C++ permits case-insensitive
+// variants of these.
+var floatLits = map[string]float64{
+ "nan": math.NaN(),
+ "inf": math.Inf(1),
+ "infinity": math.Inf(1),
+ "-inf": math.Inf(-1),
+ "-infinity": math.Inf(-1),
+}
+
+// TokenEquals returns true if given Tokens are equal, else false.
+func TokenEquals(x, y Token) bool {
+ return x.kind == y.kind &&
+ x.attrs == y.attrs &&
+ x.numAttrs == y.numAttrs &&
+ x.pos == y.pos &&
+ bytes.Equal(x.raw, y.raw) &&
+ x.str == y.str
+}
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
new file mode 100644
index 00000000..0ce8d6fb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package text implements the text format for protocol buffers.
+// This package has no semantic understanding for protocol buffers and is only
+// a parser and composer for the format.
+//
+// There is no formal specification for the protobuf text format, as such the
+// C++ implementation (see google::protobuf::TextFormat) is the reference
+// implementation of the text format.
+//
+// This package is neither a superset nor a subset of the C++ implementation.
+// This implementation permits a more liberal grammar in some cases to be
+// backwards compatible with the historical Go implementation.
+// Future parsings unique to Go should not be added.
+// Some grammars allowed by the C++ implementation are deliberately
+// not implemented here because they are considered a bug by the protobuf team
+// and should not be replicated.
+//
+// The Go implementation should implement a sufficient amount of the C++
+// grammar such that the default text serialization by C++ can be parsed by Go.
+// However, just because the C++ parser accepts some input does not mean that
+// the Go implementation should as well.
+//
+// The text format is almost a superset of JSON except:
+// * message keys are not quoted strings, but identifiers
+// * the top-level value must be a message without the delimiters
+package text
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
new file mode 100644
index 00000000..c4ba1c59
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go
@@ -0,0 +1,267 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package text
+
+import (
+ "math"
+ "math/bits"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/errors"
+)
+
+// encType represents an encoding type.
+type encType uint8
+
+const (
+ _ encType = (1 << iota) / 2
+ name
+ scalar
+ messageOpen
+ messageClose
+)
+
+// Encoder provides methods to write out textproto constructs and values. The user is
+// responsible for producing valid sequences of constructs and values.
+type Encoder struct {
+ encoderState
+
+ indent string
+ newline string // set to "\n" if len(indent) > 0
+ delims [2]byte
+ outputASCII bool
+}
+
+type encoderState struct {
+ lastType encType
+ indents []byte
+ out []byte
+}
+
+// NewEncoder returns an Encoder.
+//
+// If indent is a non-empty string, it causes every entry in a List or Message
+// to be preceded by the indent and trailed by a newline.
+//
+// If delims is not the zero value, it controls the delimiter characters used
+// for messages (e.g., "{}" vs "<>").
+//
+// If outputASCII is true, strings will be serialized in such a way that
+// multi-byte UTF-8 sequences are escaped. This property ensures that the
+// overall output is ASCII (as opposed to UTF-8).
+func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) {
+ e := &Encoder{}
+ if len(indent) > 0 {
+ if strings.Trim(indent, " \t") != "" {
+ return nil, errors.New("indent may only be composed of space and tab characters")
+ }
+ e.indent = indent
+ e.newline = "\n"
+ }
+ switch delims {
+ case [2]byte{0, 0}:
+ e.delims = [2]byte{'{', '}'}
+ case [2]byte{'{', '}'}, [2]byte{'<', '>'}:
+ e.delims = delims
+ default:
+ return nil, errors.New("delimiters may only be \"{}\" or \"<>\"")
+ }
+ e.outputASCII = outputASCII
+
+ return e, nil
+}
+
+// Bytes returns the content of the written bytes.
+func (e *Encoder) Bytes() []byte {
+ return e.out
+}
+
+// StartMessage writes out the '{' or '<' symbol.
+func (e *Encoder) StartMessage() {
+ e.prepareNext(messageOpen)
+ e.out = append(e.out, e.delims[0])
+}
+
+// EndMessage writes out the '}' or '>' symbol.
+func (e *Encoder) EndMessage() {
+ e.prepareNext(messageClose)
+ e.out = append(e.out, e.delims[1])
+}
+
+// WriteName writes out the field name and the separator ':'.
+func (e *Encoder) WriteName(s string) {
+ e.prepareNext(name)
+ e.out = append(e.out, s...)
+ e.out = append(e.out, ':')
+}
+
+// WriteBool writes out the given boolean value.
+func (e *Encoder) WriteBool(b bool) {
+ if b {
+ e.WriteLiteral("true")
+ } else {
+ e.WriteLiteral("false")
+ }
+}
+
+// WriteString writes out the given string value.
+func (e *Encoder) WriteString(s string) {
+ e.prepareNext(scalar)
+ e.out = appendString(e.out, s, e.outputASCII)
+}
+
+func appendString(out []byte, in string, outputASCII bool) []byte {
+ out = append(out, '"')
+ i := indexNeedEscapeInString(in)
+ in, out = in[i:], append(out, in[:i]...)
+ for len(in) > 0 {
+ switch r, n := utf8.DecodeRuneInString(in); {
+ case r == utf8.RuneError && n == 1:
+ // We do not report invalid UTF-8 because strings in the text format
+ // are used to represent both the proto string and bytes type.
+ r = rune(in[0])
+ fallthrough
+ case r < ' ' || r == '"' || r == '\\':
+ out = append(out, '\\')
+ switch r {
+ case '"', '\\':
+ out = append(out, byte(r))
+ case '\n':
+ out = append(out, 'n')
+ case '\r':
+ out = append(out, 'r')
+ case '\t':
+ out = append(out, 't')
+ default:
+ out = append(out, 'x')
+ out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ }
+ in = in[n:]
+ case outputASCII && r >= utf8.RuneSelf:
+ out = append(out, '\\')
+ if r <= math.MaxUint16 {
+ out = append(out, 'u')
+ out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ } else {
+ out = append(out, 'U')
+ out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+ out = strconv.AppendUint(out, uint64(r), 16)
+ }
+ in = in[n:]
+ default:
+ i := indexNeedEscapeInString(in[n:])
+ in, out = in[n+i:], append(out, in[:n+i]...)
+ }
+ }
+ out = append(out, '"')
+ return out
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInString(s string) int {
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf {
+ return i
+ }
+ }
+ return len(s)
+}
+
+// WriteFloat writes out the given float value for given bitSize.
+func (e *Encoder) WriteFloat(n float64, bitSize int) {
+ e.prepareNext(scalar)
+ e.out = appendFloat(e.out, n, bitSize)
+}
+
+func appendFloat(out []byte, n float64, bitSize int) []byte {
+ switch {
+ case math.IsNaN(n):
+ return append(out, "nan"...)
+ case math.IsInf(n, +1):
+ return append(out, "inf"...)
+ case math.IsInf(n, -1):
+ return append(out, "-inf"...)
+ default:
+ return strconv.AppendFloat(out, n, 'g', -1, bitSize)
+ }
+}
+
+// WriteInt writes out the given signed integer value.
+func (e *Encoder) WriteInt(n int64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatInt(n, 10)...)
+}
+
+// WriteUint writes out the given unsigned integer value.
+func (e *Encoder) WriteUint(n uint64) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, strconv.FormatUint(n, 10)...)
+}
+
+// WriteLiteral writes out the given string as a literal value without quotes.
+// This is used for writing enum literal strings.
+func (e *Encoder) WriteLiteral(s string) {
+ e.prepareNext(scalar)
+ e.out = append(e.out, s...)
+}
+
+// prepareNext adds possible space and indentation for the next value based
+// on last encType and indent option. It also updates e.lastType to next.
+func (e *Encoder) prepareNext(next encType) {
+ defer func() {
+ e.lastType = next
+ }()
+
+ // Single line.
+ if len(e.indent) == 0 {
+ // Add space after each field before the next one.
+ if e.lastType&(scalar|messageClose) != 0 && next == name {
+ e.out = append(e.out, ' ')
+ // Add a random extra space to make output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+ }
+ return
+ }
+
+ // Multi-line.
+ switch {
+ case e.lastType == name:
+ e.out = append(e.out, ' ')
+ // Add a random extra space after name: to make output unstable.
+ if detrand.Bool() {
+ e.out = append(e.out, ' ')
+ }
+
+ case e.lastType == messageOpen && next != messageClose:
+ e.indents = append(e.indents, e.indent...)
+ e.out = append(e.out, '\n')
+ e.out = append(e.out, e.indents...)
+
+ case e.lastType&(scalar|messageClose) != 0:
+ if next == messageClose {
+ e.indents = e.indents[:len(e.indents)-len(e.indent)]
+ }
+ e.out = append(e.out, '\n')
+ e.out = append(e.out, e.indents...)
+ }
+}
+
+// Snapshot returns the current snapshot for use in Reset.
+func (e *Encoder) Snapshot() encoderState {
+ return e.encoderState
+}
+
+// Reset resets the Encoder to the given encoderState from a Snapshot.
+func (e *Encoder) Reset(es encoderState) {
+ e.encoderState = es
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go
new file mode 100644
index 00000000..20c17b35
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errors implements functions to manipulate errors.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/internal/detrand"
+)
+
+// Error is a sentinel matching all errors produced by this package.
+var Error = errors.New("protobuf error")
+
+// New formats a string according to the format specifier and arguments and
+// returns an error that has a "proto" prefix.
+func New(f string, x ...interface{}) error {
+ return &prefixError{s: format(f, x...)}
+}
+
+type prefixError struct{ s string }
+
+var prefix = func() string {
+ // Deliberately introduce instability into the error message string to
+ // discourage users from performing error string comparisons.
+ if detrand.Bool() {
+ return "proto: " // use non-breaking spaces (U+00a0)
+ } else {
+ return "proto: " // use regular spaces (U+0020)
+ }
+}()
+
+func (e *prefixError) Error() string {
+ return prefix + e.s
+}
+
+func (e *prefixError) Unwrap() error {
+ return Error
+}
+
+// Wrap returns an error that has a "proto" prefix, the formatted string described
+// by the format specifier and arguments, and a suffix of err. The error wraps err.
+func Wrap(err error, f string, x ...interface{}) error {
+ return &wrapError{
+ s: format(f, x...),
+ err: err,
+ }
+}
+
+type wrapError struct {
+ s string
+ err error
+}
+
+func (e *wrapError) Error() string {
+ return format("%v%v: %v", prefix, e.s, e.err)
+}
+
+func (e *wrapError) Unwrap() error {
+ return e.err
+}
+
+func (e *wrapError) Is(target error) bool {
+ return target == Error
+}
+
+func format(f string, x ...interface{}) string {
+ // avoid "proto: " prefix when chaining
+ for i := 0; i < len(x); i++ {
+ switch e := x[i].(type) {
+ case *prefixError:
+ x[i] = e.s
+ case *wrapError:
+ x[i] = format("%v: %v", e.s, e.err)
+ }
+ }
+ return fmt.Sprintf(f, x...)
+}
+
+func InvalidUTF8(name string) error {
+ return New("field %v contains invalid UTF-8", name)
+}
+
+func RequiredNotSet(name string) error {
+ return New("required field %v not set", name)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
new file mode 100644
index 00000000..f90e909b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.13
+
+package errors
+
+import "reflect"
+
+// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
+func Is(err, target error) bool {
+ if target == nil {
+ return err == target
+ }
+
+ isComparable := reflect.TypeOf(target).Comparable()
+ for {
+ if isComparable && err == target {
+ return true
+ }
+ if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
+ return true
+ }
+ if err = unwrap(err); err == nil {
+ return false
+ }
+ }
+}
+
+func unwrap(err error) error {
+ u, ok := err.(interface {
+ Unwrap() error
+ })
+ if !ok {
+ return nil
+ }
+ return u.Unwrap()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
new file mode 100644
index 00000000..dc05f419
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
@@ -0,0 +1,12 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+
+package errors
+
+import "errors"
+
+// Is is errors.Is.
+func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go
new file mode 100644
index 00000000..74c5fef2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Any.
+const (
+ Any_TypeUrl = 1 // optional string
+ Any_Value = 2 // optional bytes
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go
new file mode 100644
index 00000000..9a6b5f29
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Api.
+const (
+ Api_Name = 1 // optional string
+ Api_Methods = 2 // repeated google.protobuf.Method
+ Api_Options = 3 // repeated google.protobuf.Option
+ Api_Version = 4 // optional string
+ Api_SourceContext = 5 // optional google.protobuf.SourceContext
+ Api_Mixins = 6 // repeated google.protobuf.Mixin
+ Api_Syntax = 7 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.Method.
+const (
+ Method_Name = 1 // optional string
+ Method_RequestTypeUrl = 2 // optional string
+ Method_RequestStreaming = 3 // optional bool
+ Method_ResponseTypeUrl = 4 // optional string
+ Method_ResponseStreaming = 5 // optional bool
+ Method_Options = 6 // repeated google.protobuf.Option
+ Method_Syntax = 7 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.Mixin.
+const (
+ Mixin_Name = 1 // optional string
+ Mixin_Root = 2 // optional string
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go
new file mode 100644
index 00000000..6e37b59e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go
@@ -0,0 +1,240 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.FileDescriptorSet.
+const (
+ FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto
+)
+
+// Field numbers for google.protobuf.FileDescriptorProto.
+const (
+ FileDescriptorProto_Name = 1 // optional string
+ FileDescriptorProto_Package = 2 // optional string
+ FileDescriptorProto_Dependency = 3 // repeated string
+ FileDescriptorProto_PublicDependency = 10 // repeated int32
+ FileDescriptorProto_WeakDependency = 11 // repeated int32
+ FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto
+ FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto
+ FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto
+ FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto
+ FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions
+ FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo
+ FileDescriptorProto_Syntax = 12 // optional string
+)
+
+// Field numbers for google.protobuf.DescriptorProto.
+const (
+ DescriptorProto_Name = 1 // optional string
+ DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto
+ DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto
+ DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto
+ DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto
+ DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange
+ DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto
+ DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions
+ DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange
+ DescriptorProto_ReservedName = 10 // repeated string
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ExtensionRange.
+const (
+ DescriptorProto_ExtensionRange_Start = 1 // optional int32
+ DescriptorProto_ExtensionRange_End = 2 // optional int32
+ DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions
+)
+
+// Field numbers for google.protobuf.DescriptorProto.ReservedRange.
+const (
+ DescriptorProto_ReservedRange_Start = 1 // optional int32
+ DescriptorProto_ReservedRange_End = 2 // optional int32
+)
+
+// Field numbers for google.protobuf.ExtensionRangeOptions.
+const (
+ ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.FieldDescriptorProto.
+const (
+ FieldDescriptorProto_Name = 1 // optional string
+ FieldDescriptorProto_Number = 3 // optional int32
+ FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label
+ FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type
+ FieldDescriptorProto_TypeName = 6 // optional string
+ FieldDescriptorProto_Extendee = 2 // optional string
+ FieldDescriptorProto_DefaultValue = 7 // optional string
+ FieldDescriptorProto_OneofIndex = 9 // optional int32
+ FieldDescriptorProto_JsonName = 10 // optional string
+ FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions
+ FieldDescriptorProto_Proto3Optional = 17 // optional bool
+)
+
+// Field numbers for google.protobuf.OneofDescriptorProto.
+const (
+ OneofDescriptorProto_Name = 1 // optional string
+ OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.
+const (
+ EnumDescriptorProto_Name = 1 // optional string
+ EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto
+ EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions
+ EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange
+ EnumDescriptorProto_ReservedName = 5 // repeated string
+)
+
+// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange.
+const (
+ EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32
+ EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32
+)
+
+// Field numbers for google.protobuf.EnumValueDescriptorProto.
+const (
+ EnumValueDescriptorProto_Name = 1 // optional string
+ EnumValueDescriptorProto_Number = 2 // optional int32
+ EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions
+)
+
+// Field numbers for google.protobuf.ServiceDescriptorProto.
+const (
+ ServiceDescriptorProto_Name = 1 // optional string
+ ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto
+ ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions
+)
+
+// Field numbers for google.protobuf.MethodDescriptorProto.
+const (
+ MethodDescriptorProto_Name = 1 // optional string
+ MethodDescriptorProto_InputType = 2 // optional string
+ MethodDescriptorProto_OutputType = 3 // optional string
+ MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions
+ MethodDescriptorProto_ClientStreaming = 5 // optional bool
+ MethodDescriptorProto_ServerStreaming = 6 // optional bool
+)
+
+// Field numbers for google.protobuf.FileOptions.
+const (
+ FileOptions_JavaPackage = 1 // optional string
+ FileOptions_JavaOuterClassname = 8 // optional string
+ FileOptions_JavaMultipleFiles = 10 // optional bool
+ FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool
+ FileOptions_JavaStringCheckUtf8 = 27 // optional bool
+ FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode
+ FileOptions_GoPackage = 11 // optional string
+ FileOptions_CcGenericServices = 16 // optional bool
+ FileOptions_JavaGenericServices = 17 // optional bool
+ FileOptions_PyGenericServices = 18 // optional bool
+ FileOptions_PhpGenericServices = 42 // optional bool
+ FileOptions_Deprecated = 23 // optional bool
+ FileOptions_CcEnableArenas = 31 // optional bool
+ FileOptions_ObjcClassPrefix = 36 // optional string
+ FileOptions_CsharpNamespace = 37 // optional string
+ FileOptions_SwiftPrefix = 39 // optional string
+ FileOptions_PhpClassPrefix = 40 // optional string
+ FileOptions_PhpNamespace = 41 // optional string
+ FileOptions_PhpMetadataNamespace = 44 // optional string
+ FileOptions_RubyPackage = 45 // optional string
+ FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.MessageOptions.
+const (
+ MessageOptions_MessageSetWireFormat = 1 // optional bool
+ MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool
+ MessageOptions_Deprecated = 3 // optional bool
+ MessageOptions_MapEntry = 7 // optional bool
+ MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.FieldOptions.
+const (
+ FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType
+ FieldOptions_Packed = 2 // optional bool
+ FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType
+ FieldOptions_Lazy = 5 // optional bool
+ FieldOptions_Deprecated = 3 // optional bool
+ FieldOptions_Weak = 10 // optional bool
+ FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.OneofOptions.
+const (
+ OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.EnumOptions.
+const (
+ EnumOptions_AllowAlias = 2 // optional bool
+ EnumOptions_Deprecated = 3 // optional bool
+ EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.EnumValueOptions.
+const (
+ EnumValueOptions_Deprecated = 1 // optional bool
+ EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.ServiceOptions.
+const (
+ ServiceOptions_Deprecated = 33 // optional bool
+ ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.MethodOptions.
+const (
+ MethodOptions_Deprecated = 33 // optional bool
+ MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel
+ MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.
+const (
+ UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart
+ UninterpretedOption_IdentifierValue = 3 // optional string
+ UninterpretedOption_PositiveIntValue = 4 // optional uint64
+ UninterpretedOption_NegativeIntValue = 5 // optional int64
+ UninterpretedOption_DoubleValue = 6 // optional double
+ UninterpretedOption_StringValue = 7 // optional bytes
+ UninterpretedOption_AggregateValue = 8 // optional string
+)
+
+// Field numbers for google.protobuf.UninterpretedOption.NamePart.
+const (
+ UninterpretedOption_NamePart_NamePart = 1 // required string
+ UninterpretedOption_NamePart_IsExtension = 2 // required bool
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.
+const (
+ SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location
+)
+
+// Field numbers for google.protobuf.SourceCodeInfo.Location.
+const (
+ SourceCodeInfo_Location_Path = 1 // repeated int32
+ SourceCodeInfo_Location_Span = 2 // repeated int32
+ SourceCodeInfo_Location_LeadingComments = 3 // optional string
+ SourceCodeInfo_Location_TrailingComments = 4 // optional string
+ SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.
+const (
+ GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation
+)
+
+// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
+const (
+ GeneratedCodeInfo_Annotation_Path = 1 // repeated int32
+ GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string
+ GeneratedCodeInfo_Annotation_Begin = 3 // optional int32
+ GeneratedCodeInfo_Annotation_End = 4 // optional int32
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go
new file mode 100644
index 00000000..e5978859
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fieldnum contains constants for field numbers of fields in messages
+// declared in descriptor.proto and any of the well-known types.
+package fieldnum
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go
new file mode 100644
index 00000000..8816c735
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Duration.
+const (
+ Duration_Seconds = 1 // optional int64
+ Duration_Nanos = 2 // optional int32
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go
new file mode 100644
index 00000000..b5130a6d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go
@@ -0,0 +1,10 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Empty.
+const ()
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go
new file mode 100644
index 00000000..7e3bfa27
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.FieldMask.
+const (
+ FieldMask_Paths = 1 // repeated string
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go
new file mode 100644
index 00000000..241972b1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go
@@ -0,0 +1,12 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.SourceContext.
+const (
+ SourceContext_FileName = 1 // optional string
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go
new file mode 100644
index 00000000..c460aab4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go
@@ -0,0 +1,33 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Struct.
+const (
+ Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry
+)
+
+// Field numbers for google.protobuf.Struct.FieldsEntry.
+const (
+ Struct_FieldsEntry_Key = 1 // optional string
+ Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value
+)
+
+// Field numbers for google.protobuf.Value.
+const (
+ Value_NullValue = 1 // optional google.protobuf.NullValue
+ Value_NumberValue = 2 // optional double
+ Value_StringValue = 3 // optional string
+ Value_BoolValue = 4 // optional bool
+ Value_StructValue = 5 // optional google.protobuf.Struct
+ Value_ListValue = 6 // optional google.protobuf.ListValue
+)
+
+// Field numbers for google.protobuf.ListValue.
+const (
+ ListValue_Values = 1 // repeated google.protobuf.Value
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go
new file mode 100644
index 00000000..b4346fba
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Timestamp.
+const (
+ Timestamp_Seconds = 1 // optional int64
+ Timestamp_Nanos = 2 // optional int32
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go
new file mode 100644
index 00000000..b392e959
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go
@@ -0,0 +1,53 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.Type.
+const (
+ Type_Name = 1 // optional string
+ Type_Fields = 2 // repeated google.protobuf.Field
+ Type_Oneofs = 3 // repeated string
+ Type_Options = 4 // repeated google.protobuf.Option
+ Type_SourceContext = 5 // optional google.protobuf.SourceContext
+ Type_Syntax = 6 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.Field.
+const (
+ Field_Kind = 1 // optional google.protobuf.Field.Kind
+ Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality
+ Field_Number = 3 // optional int32
+ Field_Name = 4 // optional string
+ Field_TypeUrl = 6 // optional string
+ Field_OneofIndex = 7 // optional int32
+ Field_Packed = 8 // optional bool
+ Field_Options = 9 // repeated google.protobuf.Option
+ Field_JsonName = 10 // optional string
+ Field_DefaultValue = 11 // optional string
+)
+
+// Field numbers for google.protobuf.Enum.
+const (
+ Enum_Name = 1 // optional string
+ Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue
+ Enum_Options = 3 // repeated google.protobuf.Option
+ Enum_SourceContext = 4 // optional google.protobuf.SourceContext
+ Enum_Syntax = 5 // optional google.protobuf.Syntax
+)
+
+// Field numbers for google.protobuf.EnumValue.
+const (
+ EnumValue_Name = 1 // optional string
+ EnumValue_Number = 2 // optional int32
+ EnumValue_Options = 3 // repeated google.protobuf.Option
+)
+
+// Field numbers for google.protobuf.Option.
+const (
+ Option_Name = 1 // optional string
+ Option_Value = 2 // optional google.protobuf.Any
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go
new file mode 100644
index 00000000..42f846a9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-protos. DO NOT EDIT.
+
+package fieldnum
+
+// Field numbers for google.protobuf.DoubleValue.
+const (
+ DoubleValue_Value = 1 // optional double
+)
+
+// Field numbers for google.protobuf.FloatValue.
+const (
+ FloatValue_Value = 1 // optional float
+)
+
+// Field numbers for google.protobuf.Int64Value.
+const (
+ Int64Value_Value = 1 // optional int64
+)
+
+// Field numbers for google.protobuf.UInt64Value.
+const (
+ UInt64Value_Value = 1 // optional uint64
+)
+
+// Field numbers for google.protobuf.Int32Value.
+const (
+ Int32Value_Value = 1 // optional int32
+)
+
+// Field numbers for google.protobuf.UInt32Value.
+const (
+ UInt32Value_Value = 1 // optional uint32
+)
+
+// Field numbers for google.protobuf.BoolValue.
+const (
+ BoolValue_Value = 1 // optional bool
+)
+
+// Field numbers for google.protobuf.StringValue.
+const (
+ StringValue_Value = 1 // optional string
+)
+
+// Field numbers for google.protobuf.BytesValue.
+const (
+ BytesValue_Value = 1 // optional bytes
+)
diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
new file mode 100644
index 00000000..517c4e2a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go
@@ -0,0 +1,40 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package fieldsort defines an ordering of fields.
+//
+// The ordering defined by this package matches the historic behavior of the proto
+// package, placing extensions first and oneofs last.
+//
+// There is no guarantee about stability of the wire encoding, and users should not
+// depend on the order defined in this package as it is subject to change without
+// notice.
+package fieldsort
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Less returns true if field a comes before field j in ordered wire marshal output.
+func Less(a, b protoreflect.FieldDescriptor) bool {
+ ea := a.IsExtension()
+ eb := b.IsExtension()
+ oa := a.ContainingOneof()
+ ob := b.ContainingOneof()
+ switch {
+ case ea != eb:
+ return ea
+ case oa != nil && ob != nil:
+ if oa == ob {
+ return a.Number() < b.Number()
+ }
+ return oa.Index() < ob.Index()
+ case oa != nil && !oa.IsSynthetic():
+ return false
+ case ob != nil && !ob.IsSynthetic():
+ return true
+ default:
+ return a.Number() < b.Number()
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
new file mode 100644
index 00000000..462d384e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filedesc provides functionality for constructing descriptors.
+package filedesc
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Builder construct a protoreflect.FileDescriptor from the raw descriptor.
+type Builder struct {
+ // GoPackagePath is the Go package path that is invoking this builder.
+ GoPackagePath string
+
+ // RawDescriptor is the wire-encoded bytes of FileDescriptorProto
+ // and must be populated.
+ RawDescriptor []byte
+
+ // NumEnums is the total number of enums declared in the file.
+ NumEnums int32
+ // NumMessages is the total number of messages declared in the file.
+ // It includes the implicit message declarations for map entries.
+ NumMessages int32
+ // NumExtensions is the total number of extensions declared in the file.
+ NumExtensions int32
+ // NumServices is the total number of services declared in the file.
+ NumServices int32
+
+ // TypeResolver resolves extension field types for descriptor options.
+ // If nil, it uses protoregistry.GlobalTypes.
+ TypeResolver interface {
+ preg.ExtensionTypeResolver
+ }
+
+ // FileRegistry is use to lookup file, enum, and message dependencies.
+ // Once constructed, the file descriptor is registered here.
+ // If nil, it uses protoregistry.GlobalFiles.
+ FileRegistry interface {
+ FindFileByPath(string) (protoreflect.FileDescriptor, error)
+ FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
+ RegisterFile(pref.FileDescriptor) error
+ }
+}
+
+// resolverByIndex is an interface Builder.FileRegistry may implement.
+// If so, it permits looking up an enum or message dependency based on the
+// sub-list and element index into filetype.Builder.DependencyIndexes.
+type resolverByIndex interface {
+ FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor
+ FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor
+}
+
+// Indexes of each sub-list in filetype.Builder.DependencyIndexes.
+const (
+ listFieldDeps int32 = iota
+ listExtTargets
+ listExtDeps
+ listMethInDeps
+ listMethOutDeps
+)
+
+// Out is the output of the Builder.
+type Out struct {
+ File pref.FileDescriptor
+
+ // Enums is all enum descriptors in "flattened ordering".
+ Enums []Enum
+ // Messages is all message descriptors in "flattened ordering".
+ // It includes the implicit message declarations for map entries.
+ Messages []Message
+ // Extensions is all extension descriptors in "flattened ordering".
+ Extensions []Extension
+ // Service is all service descriptors in "flattened ordering".
+ Services []Service
+}
+
+// Build constructs a FileDescriptor given the parameters set in Builder.
+// It assumes that the inputs are well-formed and panics if any inconsistencies
+// are encountered.
+//
+// If NumEnums+NumMessages+NumExtensions+NumServices is zero,
+// then Build automatically derives them from the raw descriptor.
+func (db Builder) Build() (out Out) {
+ // Populate the counts if uninitialized.
+ if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 {
+ db.unmarshalCounts(db.RawDescriptor, true)
+ }
+
+ // Initialize resolvers and registries if unpopulated.
+ if db.TypeResolver == nil {
+ db.TypeResolver = preg.GlobalTypes
+ }
+ if db.FileRegistry == nil {
+ db.FileRegistry = preg.GlobalFiles
+ }
+
+ fd := newRawFile(db)
+ out.File = fd
+ out.Enums = fd.allEnums
+ out.Messages = fd.allMessages
+ out.Extensions = fd.allExtensions
+ out.Services = fd.allServices
+
+ if err := db.FileRegistry.RegisterFile(fd); err != nil {
+ panic(err)
+ }
+ return out
+}
+
+// unmarshalCounts counts the number of enum, message, extension, and service
+// declarations in the raw message, which is either a FileDescriptorProto
+// or a MessageDescriptorProto depending on whether isFile is set.
+func (db *Builder) unmarshalCounts(b []byte, isFile bool) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ if isFile {
+ switch num {
+ case fieldnum.FileDescriptorProto_EnumType:
+ db.NumEnums++
+ case fieldnum.FileDescriptorProto_MessageType:
+ db.unmarshalCounts(v, false)
+ db.NumMessages++
+ case fieldnum.FileDescriptorProto_Extension:
+ db.NumExtensions++
+ case fieldnum.FileDescriptorProto_Service:
+ db.NumServices++
+ }
+ } else {
+ switch num {
+ case fieldnum.DescriptorProto_EnumType:
+ db.NumEnums++
+ case fieldnum.DescriptorProto_NestedType:
+ db.unmarshalCounts(v, false)
+ db.NumMessages++
+ case fieldnum.DescriptorProto_Extension:
+ db.NumExtensions++
+ }
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
new file mode 100644
index 00000000..2540befd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -0,0 +1,613 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/encoding/defval"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// The types in this file may have a suffix:
+// • L0: Contains fields common to all descriptors (except File) and
+// must be initialized up front.
+// • L1: Contains fields specific to a descriptor and
+// must be initialized up front.
+// • L2: Contains fields that are lazily initialized when constructing
+// from the raw file descriptor. When constructing as a literal, the L2
+// fields must be initialized up front.
+//
+// The types are exported so that packages like reflect/protodesc can
+// directly construct descriptors.
+
+type (
+ File struct {
+ fileRaw
+ L1 FileL1
+
+ once uint32 // atomically set if L2 is valid
+ mu sync.Mutex // protects L2
+ L2 *FileL2
+ }
+ FileL1 struct {
+ Syntax pref.Syntax
+ Path string
+ Package pref.FullName
+
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
+ Services Services
+ }
+ FileL2 struct {
+ Options func() pref.ProtoMessage
+ Imports FileImports
+ Locations SourceLocations
+ }
+)
+
+func (fd *File) ParentFile() pref.FileDescriptor { return fd }
+func (fd *File) Parent() pref.Descriptor { return nil }
+func (fd *File) Index() int { return 0 }
+func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax }
+func (fd *File) Name() pref.Name { return fd.L1.Package.Name() }
+func (fd *File) FullName() pref.FullName { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool { return false }
+func (fd *File) Options() pref.ProtoMessage {
+ if f := fd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.File
+}
+func (fd *File) Path() string { return fd.L1.Path }
+func (fd *File) Package() pref.FullName { return fd.L1.Package }
+func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports }
+func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums }
+func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages }
+func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions }
+func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services }
+func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations }
+func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *File) ProtoType(pref.FileDescriptor) {}
+func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
+
+func (fd *File) lazyInit() *FileL2 {
+ if atomic.LoadUint32(&fd.once) == 0 {
+ fd.lazyInitOnce()
+ }
+ return fd.L2
+}
+
+func (fd *File) lazyInitOnce() {
+ fd.mu.Lock()
+ if fd.L2 == nil {
+ fd.lazyRawInit() // recursively initializes all L2 structures
+ }
+ atomic.StoreUint32(&fd.once, 1)
+ fd.mu.Unlock()
+}
+
+// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve the raw descriptor.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *File) ProtoLegacyRawDesc() []byte {
+ return fd.builder.RawDescriptor
+}
+
+// GoPackagePath is a pseudo-internal API for determining the Go package path
+// that this file descriptor is declared in.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *File) GoPackagePath() string {
+ return fd.builder.GoPackagePath
+}
+
+type (
+ Enum struct {
+ Base
+ L1 EnumL1
+ L2 *EnumL2 // protected by fileDesc.once
+ }
+ EnumL1 struct {
+ eagerValues bool // controls whether EnumL2.Values is already populated
+ }
+ EnumL2 struct {
+ Options func() pref.ProtoMessage
+ Values EnumValues
+ ReservedNames Names
+ ReservedRanges EnumRanges
+ }
+
+ EnumValue struct {
+ Base
+ L1 EnumValueL1
+ }
+ EnumValueL1 struct {
+ Options func() pref.ProtoMessage
+ Number pref.EnumNumber
+ }
+)
+
+func (ed *Enum) Options() pref.ProtoMessage {
+ if f := ed.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Enum
+}
+func (ed *Enum) Values() pref.EnumValueDescriptors {
+ if ed.L1.eagerValues {
+ return &ed.L2.Values
+ }
+ return &ed.lazyInit().Values
+}
+func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames }
+func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges }
+func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *Enum) ProtoType(pref.EnumDescriptor) {}
+func (ed *Enum) lazyInit() *EnumL2 {
+ ed.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return ed.L2
+}
+
+func (ed *EnumValue) Options() pref.ProtoMessage {
+ if f := ed.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.EnumValue
+}
+func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number }
+func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
+func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {}
+
+type (
+ Message struct {
+ Base
+ L1 MessageL1
+ L2 *MessageL2 // protected by fileDesc.once
+ }
+ MessageL1 struct {
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
+ IsMapEntry bool // promoted from google.protobuf.MessageOptions
+ IsMessageSet bool // promoted from google.protobuf.MessageOptions
+ }
+ MessageL2 struct {
+ Options func() pref.ProtoMessage
+ Fields Fields
+ Oneofs Oneofs
+ ReservedNames Names
+ ReservedRanges FieldRanges
+ RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality
+ ExtensionRanges FieldRanges
+ ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges
+ }
+
+ Field struct {
+ Base
+ L1 FieldL1
+ }
+ FieldL1 struct {
+ Options func() pref.ProtoMessage
+ Number pref.FieldNumber
+ Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers
+ Kind pref.Kind
+ JSONName jsonName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsWeak bool // promoted from google.protobuf.FieldOptions
+ HasPacked bool // promoted from google.protobuf.FieldOptions
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
+ }
+
+ Oneof struct {
+ Base
+ L1 OneofL1
+ }
+ OneofL1 struct {
+ Options func() pref.ProtoMessage
+ Fields OneofFields // must be consistent with Message.Fields.ContainingOneof
+ }
+)
+
+func (md *Message) Options() pref.ProtoMessage {
+ if f := md.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Message
+}
+func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry }
+func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields }
+func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs }
+func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames }
+func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges }
+func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers }
+func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges }
+func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage {
+ if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil {
+ return f()
+ }
+ return descopts.ExtensionRange
+}
+func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums }
+func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages }
+func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions }
+func (md *Message) ProtoType(pref.MessageDescriptor) {}
+func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Message) lazyInit() *MessageL2 {
+ md.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return md.L2
+}
+
+// IsMessageSet is a pseudo-internal API for checking whether a message
+// should serialize in the proto1 message format.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (md *Message) IsMessageSet() bool {
+ return md.L1.IsMessageSet
+}
+
+func (fd *Field) Options() pref.ProtoMessage {
+ if f := fd.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Field
+}
+func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number }
+func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality }
+func (fd *Field) Kind() pref.Kind { return fd.L1.Kind }
+func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has }
+func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) }
+func (fd *Field) HasPresence() bool {
+ return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
+}
+func (fd *Field) HasOptionalKeyword() bool {
+ return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
+}
+func (fd *Field) IsPacked() bool {
+ if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated {
+ switch fd.L1.Kind {
+ case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind:
+ default:
+ return true
+ }
+ }
+ return fd.L1.IsPacked
+}
+func (fd *Field) IsExtension() bool { return false }
+func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() }
+func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
+func (fd *Field) MapKey() pref.FieldDescriptor {
+ if !fd.IsMap() {
+ return nil
+ }
+ return fd.Message().Fields().ByNumber(1)
+}
+func (fd *Field) MapValue() pref.FieldDescriptor {
+ if !fd.IsMap() {
+ return nil
+ }
+ return fd.Message().Fields().ByNumber(2)
+}
+func (fd *Field) HasDefault() bool { return fd.L1.Default.has }
+func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) }
+func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum }
+func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof }
+func (fd *Field) ContainingMessage() pref.MessageDescriptor {
+ return fd.L0.Parent.(pref.MessageDescriptor)
+}
+func (fd *Field) Enum() pref.EnumDescriptor {
+ return fd.L1.Enum
+}
+func (fd *Field) Message() pref.MessageDescriptor {
+ if fd.L1.IsWeak {
+ if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
+ return d.(pref.MessageDescriptor)
+ }
+ }
+ return fd.L1.Message
+}
+func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
+func (fd *Field) ProtoType(pref.FieldDescriptor) {}
+
+// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8
+// validation for the string field. This exists for Google-internal use only
+// since proto3 did not enforce UTF-8 validity prior to the open-source release.
+// If this method does not exist, the default is to enforce valid UTF-8.
+//
+// WARNING: This method is exempt from the compatibility promise and may be
+// removed in the future without warning.
+func (fd *Field) EnforceUTF8() bool {
+ if fd.L1.HasEnforceUTF8 {
+ return fd.L1.EnforceUTF8
+ }
+ return fd.L0.ParentFile.L1.Syntax == pref.Proto3
+}
+
+func (od *Oneof) IsSynthetic() bool {
+ return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword()
+}
+func (od *Oneof) Options() pref.ProtoMessage {
+ if f := od.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Oneof
+}
+func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields }
+func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) }
+func (od *Oneof) ProtoType(pref.OneofDescriptor) {}
+
+type (
+ Extension struct {
+ Base
+ L1 ExtensionL1
+ L2 *ExtensionL2 // protected by fileDesc.once
+ }
+ ExtensionL1 struct {
+ Number pref.FieldNumber
+ Extendee pref.MessageDescriptor
+ Cardinality pref.Cardinality
+ Kind pref.Kind
+ }
+ ExtensionL2 struct {
+ Options func() pref.ProtoMessage
+ JSONName jsonName
+ IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
+ IsPacked bool // promoted from google.protobuf.FieldOptions
+ Default defaultValue
+ Enum pref.EnumDescriptor
+ Message pref.MessageDescriptor
+ }
+)
+
+func (xd *Extension) Options() pref.ProtoMessage {
+ if f := xd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Field
+}
+func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number }
+func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality }
+func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind }
+func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has }
+func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) }
+func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated }
+func (xd *Extension) HasOptionalKeyword() bool {
+ return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional
+}
+func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
+func (xd *Extension) IsExtension() bool { return true }
+func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated }
+func (xd *Extension) IsMap() bool { return false }
+func (xd *Extension) MapKey() pref.FieldDescriptor { return nil }
+func (xd *Extension) MapValue() pref.FieldDescriptor { return nil }
+func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has }
+func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) }
+func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum }
+func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil }
+func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee }
+func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum }
+func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message }
+func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) }
+func (xd *Extension) ProtoType(pref.FieldDescriptor) {}
+func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {}
+func (xd *Extension) lazyInit() *ExtensionL2 {
+ xd.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return xd.L2
+}
+
+type (
+ Service struct {
+ Base
+ L1 ServiceL1
+ L2 *ServiceL2 // protected by fileDesc.once
+ }
+ ServiceL1 struct{}
+ ServiceL2 struct {
+ Options func() pref.ProtoMessage
+ Methods Methods
+ }
+
+ Method struct {
+ Base
+ L1 MethodL1
+ }
+ MethodL1 struct {
+ Options func() pref.ProtoMessage
+ Input pref.MessageDescriptor
+ Output pref.MessageDescriptor
+ IsStreamingClient bool
+ IsStreamingServer bool
+ }
+)
+
+func (sd *Service) Options() pref.ProtoMessage {
+ if f := sd.lazyInit().Options; f != nil {
+ return f()
+ }
+ return descopts.Service
+}
+func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods }
+func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) }
+func (sd *Service) ProtoType(pref.ServiceDescriptor) {}
+func (sd *Service) ProtoInternal(pragma.DoNotImplement) {}
+func (sd *Service) lazyInit() *ServiceL2 {
+ sd.L0.ParentFile.lazyInit() // implicitly initializes L2
+ return sd.L2
+}
+
+func (md *Method) Options() pref.ProtoMessage {
+ if f := md.L1.Options; f != nil {
+ return f()
+ }
+ return descopts.Method
+}
+func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input }
+func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output }
+func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient }
+func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer }
+func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+func (md *Method) ProtoType(pref.MethodDescriptor) {}
+func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
+
+// Surrogate files are can be used to create standalone descriptors
+// where the syntax is only information derived from the parent file.
+var (
+ SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}}
+ SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}}
+)
+
+type (
+ Base struct {
+ L0 BaseL0
+ }
+ BaseL0 struct {
+ FullName pref.FullName // must be populated
+ ParentFile *File // must be populated
+ Parent pref.Descriptor
+ Index int
+ }
+)
+
+func (d *Base) Name() pref.Name { return d.L0.FullName.Name() }
+func (d *Base) FullName() pref.FullName { return d.L0.FullName }
+func (d *Base) ParentFile() pref.FileDescriptor {
+ if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 {
+ return nil // surrogate files are not real parents
+ }
+ return d.L0.ParentFile
+}
+func (d *Base) Parent() pref.Descriptor { return d.L0.Parent }
+func (d *Base) Index() int { return d.L0.Index }
+func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() }
+func (d *Base) IsPlaceholder() bool { return false }
+func (d *Base) ProtoInternal(pragma.DoNotImplement) {}
+
+type jsonName struct {
+ has bool
+ once sync.Once
+ name string
+}
+
+// Init initializes the name. It is exported for use by other internal packages.
+func (js *jsonName) Init(s string) {
+ js.has = true
+ js.name = s
+}
+
+func (js *jsonName) get(fd pref.FieldDescriptor) string {
+ if !js.has {
+ js.once.Do(func() {
+ js.name = strs.JSONCamelCase(string(fd.Name()))
+ })
+ }
+ return js.name
+}
+
+func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue {
+ dv := defaultValue{has: v.IsValid(), val: v, enum: ev}
+ if b, ok := v.Interface().([]byte); ok {
+ // Store a copy of the default bytes, so that we can detect
+ // accidental mutations of the original value.
+ dv.bytes = append([]byte(nil), b...)
+ }
+ return dv
+}
+
+func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue {
+ var evs pref.EnumValueDescriptors
+ if k == pref.EnumKind {
+ // If the enum is declared within the same file, be careful not to
+ // blindly call the Values method, lest we bind ourselves in a deadlock.
+ if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf {
+ evs = &e.L2.Values
+ } else {
+ evs = ed.Values()
+ }
+
+ // If we are unable to resolve the enum dependency, use a placeholder
+ // enum value since we will not be able to parse the default value.
+ if ed.IsPlaceholder() && pref.Name(b).IsValid() {
+ v := pref.ValueOfEnum(0)
+ ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b)))
+ return DefaultValue(v, ev)
+ }
+ }
+
+ v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor)
+ if err != nil {
+ panic(err)
+ }
+ return DefaultValue(v, ev)
+}
+
+type defaultValue struct {
+ has bool
+ val pref.Value
+ enum pref.EnumValueDescriptor
+ bytes []byte
+}
+
+func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value {
+ // Return the zero value as the default if unpopulated.
+ if !dv.has {
+ if fd.Cardinality() == pref.Repeated {
+ return pref.Value{}
+ }
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return pref.ValueOfBool(false)
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ return pref.ValueOfInt32(0)
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ return pref.ValueOfInt64(0)
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ return pref.ValueOfUint32(0)
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ return pref.ValueOfUint64(0)
+ case pref.FloatKind:
+ return pref.ValueOfFloat32(0)
+ case pref.DoubleKind:
+ return pref.ValueOfFloat64(0)
+ case pref.StringKind:
+ return pref.ValueOfString("")
+ case pref.BytesKind:
+ return pref.ValueOfBytes(nil)
+ case pref.EnumKind:
+ if evs := fd.Enum().Values(); evs.Len() > 0 {
+ return pref.ValueOfEnum(evs.Get(0).Number())
+ }
+ return pref.ValueOfEnum(0)
+ }
+ }
+
+ if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) {
+ // TODO: Avoid panic if we're running with the race detector
+ // and instead spawn a goroutine that periodically resets
+ // this value back to the original to induce a race.
+ panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName()))
+ }
+ return dv.val
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
new file mode 100644
index 00000000..c0cddf86
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -0,0 +1,471 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// fileRaw is a data struct used when initializing a file descriptor from
+// a raw FileDescriptorProto.
+type fileRaw struct {
+ builder Builder
+ allEnums []Enum
+ allMessages []Message
+ allExtensions []Extension
+ allServices []Service
+}
+
+func newRawFile(db Builder) *File {
+ fd := &File{fileRaw: fileRaw{builder: db}}
+ fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices)
+ fd.unmarshalSeed(db.RawDescriptor)
+
+ // Extended message targets are eagerly resolved since registration
+ // needs this information at program init time.
+ for i := range fd.allExtensions {
+ xd := &fd.allExtensions[i]
+ xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i))
+ }
+
+ fd.checkDecls()
+ return fd
+}
+
+// initDecls pre-allocates slices for the exact number of enums, messages
+// (including map entries), extensions, and services declared in the proto file.
+// This is done to avoid regrowing the slice, which would change the address
+// for any previously seen declaration.
+//
+// The alloc methods "allocates" slices by pulling from the capacity.
+func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) {
+ fd.allEnums = make([]Enum, 0, numEnums)
+ fd.allMessages = make([]Message, 0, numMessages)
+ fd.allExtensions = make([]Extension, 0, numExtensions)
+ fd.allServices = make([]Service, 0, numServices)
+}
+
+func (fd *File) allocEnums(n int) []Enum {
+ total := len(fd.allEnums)
+ es := fd.allEnums[total : total+n]
+ fd.allEnums = fd.allEnums[:total+n]
+ return es
+}
+func (fd *File) allocMessages(n int) []Message {
+ total := len(fd.allMessages)
+ ms := fd.allMessages[total : total+n]
+ fd.allMessages = fd.allMessages[:total+n]
+ return ms
+}
+func (fd *File) allocExtensions(n int) []Extension {
+ total := len(fd.allExtensions)
+ xs := fd.allExtensions[total : total+n]
+ fd.allExtensions = fd.allExtensions[:total+n]
+ return xs
+}
+func (fd *File) allocServices(n int) []Service {
+ total := len(fd.allServices)
+ xs := fd.allServices[total : total+n]
+ fd.allServices = fd.allServices[:total+n]
+ return xs
+}
+
+// checkDecls performs a sanity check that the expected number of expected
+// declarations matches the number that were found in the descriptor proto.
+func (fd *File) checkDecls() {
+ switch {
+ case len(fd.allEnums) != cap(fd.allEnums):
+ case len(fd.allMessages) != cap(fd.allMessages):
+ case len(fd.allExtensions) != cap(fd.allExtensions):
+ case len(fd.allServices) != cap(fd.allServices):
+ default:
+ return
+ }
+ panic("mismatching cardinality")
+}
+
+func (fd *File) unmarshalSeed(b []byte) {
+ sb := getBuilder()
+ defer putBuilder(sb)
+
+ var prevField pref.FieldNumber
+ var numEnums, numMessages, numExtensions, numServices int
+ var posEnums, posMessages, posExtensions, posServices int
+ b0 := b
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FileDescriptorProto_Syntax:
+ switch string(v) {
+ case "proto2":
+ fd.L1.Syntax = pref.Proto2
+ case "proto3":
+ fd.L1.Syntax = pref.Proto3
+ default:
+ panic("invalid syntax")
+ }
+ case fieldnum.FileDescriptorProto_Name:
+ fd.L1.Path = sb.MakeString(v)
+ case fieldnum.FileDescriptorProto_Package:
+ fd.L1.Package = pref.FullName(sb.MakeString(v))
+ case fieldnum.FileDescriptorProto_EnumType:
+ if prevField != fieldnum.FileDescriptorProto_EnumType {
+ if numEnums > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posEnums = len(b0) - len(b) - n - m
+ }
+ numEnums++
+ case fieldnum.FileDescriptorProto_MessageType:
+ if prevField != fieldnum.FileDescriptorProto_MessageType {
+ if numMessages > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posMessages = len(b0) - len(b) - n - m
+ }
+ numMessages++
+ case fieldnum.FileDescriptorProto_Extension:
+ if prevField != fieldnum.FileDescriptorProto_Extension {
+ if numExtensions > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posExtensions = len(b0) - len(b) - n - m
+ }
+ numExtensions++
+ case fieldnum.FileDescriptorProto_Service:
+ if prevField != fieldnum.FileDescriptorProto_Service {
+ if numServices > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posServices = len(b0) - len(b) - n - m
+ }
+ numServices++
+ }
+ prevField = num
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ prevField = -1 // ignore known field numbers of unknown wire type
+ }
+ }
+
+ // If syntax is missing, it is assumed to be proto2.
+ if fd.L1.Syntax == 0 {
+ fd.L1.Syntax = pref.Proto2
+ }
+
+ // Must allocate all declarations before parsing each descriptor type
+ // to ensure we handled all descriptors in "flattened ordering".
+ if numEnums > 0 {
+ fd.L1.Enums.List = fd.allocEnums(numEnums)
+ }
+ if numMessages > 0 {
+ fd.L1.Messages.List = fd.allocMessages(numMessages)
+ }
+ if numExtensions > 0 {
+ fd.L1.Extensions.List = fd.allocExtensions(numExtensions)
+ }
+ if numServices > 0 {
+ fd.L1.Services.List = fd.allocServices(numServices)
+ }
+
+ if numEnums > 0 {
+ b := b0[posEnums:]
+ for i := range fd.L1.Enums.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numMessages > 0 {
+ b := b0[posMessages:]
+ for i := range fd.L1.Messages.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numExtensions > 0 {
+ b := b0[posExtensions:]
+ for i := range fd.L1.Extensions.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+ if numServices > 0 {
+ b := b0[posServices:]
+ for i := range fd.L1.Services.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i)
+ b = b[n+m:]
+ }
+ }
+}
+
+func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ ed.L0.ParentFile = pf
+ ed.L0.Parent = pd
+ ed.L0.Index = i
+
+ var numValues int
+ for b := b; len(b) > 0; {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_Name:
+ ed.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.EnumDescriptorProto_Value:
+ numValues++
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+
+ // Only construct enum value descriptors for top-level enums since
+ // they are needed for registration.
+ if pd != pf {
+ return
+ }
+ ed.L1.eagerValues = true
+ ed.L2 = new(EnumL2)
+ ed.L2.Values.List = make([]EnumValue, numValues)
+ for i := 0; len(b) > 0; {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_Value:
+ ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i)
+ i++
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ md.L0.ParentFile = pf
+ md.L0.Parent = pd
+ md.L0.Index = i
+
+ var prevField pref.FieldNumber
+ var numEnums, numMessages, numExtensions int
+ var posEnums, posMessages, posExtensions int
+ b0 := b
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_Name:
+ md.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.DescriptorProto_EnumType:
+ if prevField != fieldnum.DescriptorProto_EnumType {
+ if numEnums > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posEnums = len(b0) - len(b) - n - m
+ }
+ numEnums++
+ case fieldnum.DescriptorProto_NestedType:
+ if prevField != fieldnum.DescriptorProto_NestedType {
+ if numMessages > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posMessages = len(b0) - len(b) - n - m
+ }
+ numMessages++
+ case fieldnum.DescriptorProto_Extension:
+ if prevField != fieldnum.DescriptorProto_Extension {
+ if numExtensions > 0 {
+ panic("non-contiguous repeated field")
+ }
+ posExtensions = len(b0) - len(b) - n - m
+ }
+ numExtensions++
+ case fieldnum.DescriptorProto_Options:
+ md.unmarshalSeedOptions(v)
+ }
+ prevField = num
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ prevField = -1 // ignore known field numbers of unknown wire type
+ }
+ }
+
+ // Must allocate all declarations before parsing each descriptor type
+ // to ensure we handled all descriptors in "flattened ordering".
+ if numEnums > 0 {
+ md.L1.Enums.List = pf.allocEnums(numEnums)
+ }
+ if numMessages > 0 {
+ md.L1.Messages.List = pf.allocMessages(numMessages)
+ }
+ if numExtensions > 0 {
+ md.L1.Extensions.List = pf.allocExtensions(numExtensions)
+ }
+
+ if numEnums > 0 {
+ b := b0[posEnums:]
+ for i := range md.L1.Enums.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+ if numMessages > 0 {
+ b := b0[posMessages:]
+ for i := range md.L1.Messages.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+ if numExtensions > 0 {
+ b := b0[posExtensions:]
+ for i := range md.L1.Extensions.List {
+ _, n := protowire.ConsumeVarint(b)
+ v, m := protowire.ConsumeBytes(b[n:])
+ md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i)
+ b = b[n+m:]
+ }
+ }
+}
+
+func (md *Message) unmarshalSeedOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MessageOptions_MapEntry:
+ md.L1.IsMapEntry = protowire.DecodeBool(v)
+ case fieldnum.MessageOptions_MessageSetWireFormat:
+ md.L1.IsMessageSet = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ xd.L0.ParentFile = pf
+ xd.L0.Parent = pd
+ xd.L0.Index = i
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Number:
+ xd.L1.Number = pref.FieldNumber(v)
+ case fieldnum.FieldDescriptorProto_Label:
+ xd.L1.Cardinality = pref.Cardinality(v)
+ case fieldnum.FieldDescriptorProto_Type:
+ xd.L1.Kind = pref.Kind(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Name:
+ xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.FieldDescriptorProto_Extendee:
+ xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ sd.L0.ParentFile = pf
+ sd.L0.Parent = pd
+ sd.L0.Index = i
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.ServiceDescriptorProto_Name:
+ sd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+var nameBuilderPool = sync.Pool{
+ New: func() interface{} { return new(strs.Builder) },
+}
+
+func getBuilder() *strs.Builder {
+ return nameBuilderPool.Get().(*strs.Builder)
+}
+func putBuilder(b *strs.Builder) {
+ nameBuilderPool.Put(b)
+}
+
+// makeFullName converts b to a protoreflect.FullName,
+// where b must start with a leading dot.
+func makeFullName(sb *strs.Builder, b []byte) pref.FullName {
+ if len(b) == 0 || b[0] != '.' {
+ panic("name reference must be fully qualified")
+ }
+ return pref.FullName(sb.MakeString(b[1:]))
+}
+
+func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName {
+ return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix)))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
new file mode 100644
index 00000000..bc215944
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -0,0 +1,704 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/fieldnum"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (fd *File) lazyRawInit() {
+ fd.unmarshalFull(fd.builder.RawDescriptor)
+ fd.resolveMessages()
+ fd.resolveExtensions()
+ fd.resolveServices()
+}
+
+func (file *File) resolveMessages() {
+ var depIdx int32
+ for i := range file.allMessages {
+ md := &file.allMessages[i]
+
+ // Resolve message field dependencies.
+ for j := range md.L2.Fields.List {
+ fd := &md.L2.Fields.List[j]
+
+ // Weak fields are resolved upon actual use.
+ if fd.L1.IsWeak {
+ continue
+ }
+
+ // Resolve message field dependency.
+ switch fd.L1.Kind {
+ case pref.EnumKind:
+ fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx)
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
+ depIdx++
+ }
+
+ // Default is resolved here since it depends on Enum being resolved.
+ if v := fd.L1.Default.val; v.IsValid() {
+ fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum)
+ }
+ }
+ }
+}
+
+func (file *File) resolveExtensions() {
+ var depIdx int32
+ for i := range file.allExtensions {
+ xd := &file.allExtensions[i]
+
+ // Resolve extension field dependency.
+ switch xd.L1.Kind {
+ case pref.EnumKind:
+ xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx)
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx)
+ depIdx++
+ }
+
+ // Default is resolved here since it depends on Enum being resolved.
+ if v := xd.L2.Default.val; v.IsValid() {
+ xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum)
+ }
+ }
+}
+
+func (file *File) resolveServices() {
+ var depIdx int32
+ for i := range file.allServices {
+ sd := &file.allServices[i]
+
+ // Resolve method dependencies.
+ for j := range sd.L2.Methods.List {
+ md := &sd.L2.Methods.List[j]
+ md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx)
+ md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx)
+ depIdx++
+ }
+ }
+}
+
+func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor {
+ r := file.builder.FileRegistry
+ if r, ok := r.(resolverByIndex); ok {
+ if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil {
+ return ed2
+ }
+ }
+ for i := range file.allEnums {
+ if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() {
+ return ed2
+ }
+ }
+ if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil {
+ return d.(pref.EnumDescriptor)
+ }
+ return ed
+}
+
+func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor {
+ r := file.builder.FileRegistry
+ if r, ok := r.(resolverByIndex); ok {
+ if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil {
+ return md2
+ }
+ }
+ for i := range file.allMessages {
+ if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() {
+ return md2
+ }
+ }
+ if d, _ := r.FindDescriptorByName(md.FullName()); d != nil {
+ return d.(pref.MessageDescriptor)
+ }
+ return md
+}
+
+func (fd *File) unmarshalFull(b []byte) {
+ sb := getBuilder()
+ defer putBuilder(sb)
+
+ var enumIdx, messageIdx, extensionIdx, serviceIdx int
+ var rawOptions []byte
+ fd.L2 = new(FileL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FileDescriptorProto_PublicDependency:
+ fd.L2.Imports[v].IsPublic = true
+ case fieldnum.FileDescriptorProto_WeakDependency:
+ fd.L2.Imports[v].IsWeak = true
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FileDescriptorProto_Dependency:
+ path := sb.MakeString(v)
+ imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
+ if imp == nil {
+ imp = PlaceholderFile(path)
+ }
+ fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp})
+ case fieldnum.FileDescriptorProto_EnumType:
+ fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
+ enumIdx++
+ case fieldnum.FileDescriptorProto_MessageType:
+ fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
+ messageIdx++
+ case fieldnum.FileDescriptorProto_Extension:
+ fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
+ extensionIdx++
+ case fieldnum.FileDescriptorProto_Service:
+ fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb)
+ serviceIdx++
+ case fieldnum.FileDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
+}
+
+func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawValues [][]byte
+ var rawOptions []byte
+ if !ed.L1.eagerValues {
+ ed.L2 = new(EnumL2)
+ }
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_Value:
+ rawValues = append(rawValues, v)
+ case fieldnum.EnumDescriptorProto_ReservedName:
+ ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ case fieldnum.EnumDescriptorProto_ReservedRange:
+ ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v))
+ case fieldnum.EnumDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if !ed.L1.eagerValues && len(rawValues) > 0 {
+ ed.L2.Values.List = make([]EnumValue, len(rawValues))
+ for i, b := range rawValues {
+ ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i)
+ }
+ }
+ ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions)
+}
+
+func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumDescriptorProto_EnumReservedRange_Start:
+ r[0] = pref.EnumNumber(v)
+ case fieldnum.EnumDescriptorProto_EnumReservedRange_End:
+ r[1] = pref.EnumNumber(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r
+}
+
+func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ vd.L0.ParentFile = pf
+ vd.L0.Parent = pd
+ vd.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumValueDescriptorProto_Number:
+ vd.L1.Number = pref.EnumNumber(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.EnumValueDescriptorProto_Name:
+ // NOTE: Enum values are in the same scope as the enum parent.
+ vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v)
+ case fieldnum.EnumValueDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions)
+}
+
+func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawFields, rawOneofs [][]byte
+ var enumIdx, messageIdx, extensionIdx int
+ var rawOptions []byte
+ md.L2 = new(MessageL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_Field:
+ rawFields = append(rawFields, v)
+ case fieldnum.DescriptorProto_OneofDecl:
+ rawOneofs = append(rawOneofs, v)
+ case fieldnum.DescriptorProto_ReservedName:
+ md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v)))
+ case fieldnum.DescriptorProto_ReservedRange:
+ md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v))
+ case fieldnum.DescriptorProto_ExtensionRange:
+ r, rawOptions := unmarshalMessageExtensionRange(v)
+ opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions)
+ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r)
+ md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts)
+ case fieldnum.DescriptorProto_EnumType:
+ md.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
+ enumIdx++
+ case fieldnum.DescriptorProto_NestedType:
+ md.L1.Messages.List[messageIdx].unmarshalFull(v, sb)
+ messageIdx++
+ case fieldnum.DescriptorProto_Extension:
+ md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
+ extensionIdx++
+ case fieldnum.DescriptorProto_Options:
+ md.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if len(rawFields) > 0 || len(rawOneofs) > 0 {
+ md.L2.Fields.List = make([]Field, len(rawFields))
+ md.L2.Oneofs.List = make([]Oneof, len(rawOneofs))
+ for i, b := range rawFields {
+ fd := &md.L2.Fields.List[i]
+ fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
+ if fd.L1.Cardinality == pref.Required {
+ md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number)
+ }
+ }
+ for i, b := range rawOneofs {
+ od := &md.L2.Oneofs.List[i]
+ od.unmarshalFull(b, sb, md.L0.ParentFile, md, i)
+ }
+ }
+ md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
+}
+
+func (md *Message) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MessageOptions_MapEntry:
+ md.L1.IsMapEntry = protowire.DecodeBool(v)
+ case fieldnum.MessageOptions_MessageSetWireFormat:
+ md.L1.IsMessageSet = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_ReservedRange_Start:
+ r[0] = pref.FieldNumber(v)
+ case fieldnum.DescriptorProto_ReservedRange_End:
+ r[1] = pref.FieldNumber(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r
+}
+
+func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_ExtensionRange_Start:
+ r[0] = pref.FieldNumber(v)
+ case fieldnum.DescriptorProto_ExtensionRange_End:
+ r[1] = pref.FieldNumber(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.DescriptorProto_ExtensionRange_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ return r, rawOptions
+}
+
+func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ fd.L0.ParentFile = pf
+ fd.L0.Parent = pd
+ fd.L0.Index = i
+
+ var rawTypeName []byte
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Number:
+ fd.L1.Number = pref.FieldNumber(v)
+ case fieldnum.FieldDescriptorProto_Label:
+ fd.L1.Cardinality = pref.Cardinality(v)
+ case fieldnum.FieldDescriptorProto_Type:
+ fd.L1.Kind = pref.Kind(v)
+ case fieldnum.FieldDescriptorProto_OneofIndex:
+ // In Message.unmarshalFull, we allocate slices for both
+ // the field and oneof descriptors before unmarshaling either
+ // of them. This ensures pointers to slice elements are stable.
+ od := &pd.(*Message).L2.Oneofs.List[v]
+ od.L1.Fields.List = append(od.L1.Fields.List, fd)
+ if fd.L1.ContainingOneof != nil {
+ panic("oneof type already set")
+ }
+ fd.L1.ContainingOneof = od
+ case fieldnum.FieldDescriptorProto_Proto3Optional:
+ fd.L1.IsProto3Optional = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Name:
+ fd.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.FieldDescriptorProto_JsonName:
+ fd.L1.JSONName.Init(sb.MakeString(v))
+ case fieldnum.FieldDescriptorProto_DefaultValue:
+ fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages
+ case fieldnum.FieldDescriptorProto_TypeName:
+ rawTypeName = v
+ case fieldnum.FieldDescriptorProto_Options:
+ fd.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if rawTypeName != nil {
+ name := makeFullName(sb, rawTypeName)
+ switch fd.L1.Kind {
+ case pref.EnumKind:
+ fd.L1.Enum = PlaceholderEnum(name)
+ case pref.MessageKind, pref.GroupKind:
+ fd.L1.Message = PlaceholderMessage(name)
+ }
+ }
+ fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
+}
+
+func (fd *Field) unmarshalOptions(b []byte) {
+ const FieldOptions_EnforceUTF8 = 13
+
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldOptions_Packed:
+ fd.L1.HasPacked = true
+ fd.L1.IsPacked = protowire.DecodeBool(v)
+ case fieldnum.FieldOptions_Weak:
+ fd.L1.IsWeak = protowire.DecodeBool(v)
+ case FieldOptions_EnforceUTF8:
+ fd.L1.HasEnforceUTF8 = true
+ fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ od.L0.ParentFile = pf
+ od.L0.Parent = pd
+ od.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.OneofDescriptorProto_Name:
+ od.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.OneofDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions)
+}
+
+func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawTypeName []byte
+ var rawOptions []byte
+ xd.L2 = new(ExtensionL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_Proto3Optional:
+ xd.L2.IsProto3Optional = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldDescriptorProto_JsonName:
+ xd.L2.JSONName.Init(sb.MakeString(v))
+ case fieldnum.FieldDescriptorProto_DefaultValue:
+ xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions
+ case fieldnum.FieldDescriptorProto_TypeName:
+ rawTypeName = v
+ case fieldnum.FieldDescriptorProto_Options:
+ xd.unmarshalOptions(v)
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if rawTypeName != nil {
+ name := makeFullName(sb, rawTypeName)
+ switch xd.L1.Kind {
+ case pref.EnumKind:
+ xd.L2.Enum = PlaceholderEnum(name)
+ case pref.MessageKind, pref.GroupKind:
+ xd.L2.Message = PlaceholderMessage(name)
+ }
+ }
+ xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
+}
+
+func (xd *Extension) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.FieldOptions_Packed:
+ xd.L2.IsPacked = protowire.DecodeBool(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+}
+
+func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
+ var rawMethods [][]byte
+ var rawOptions []byte
+ sd.L2 = new(ServiceL2)
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.ServiceDescriptorProto_Method:
+ rawMethods = append(rawMethods, v)
+ case fieldnum.ServiceDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ if len(rawMethods) > 0 {
+ sd.L2.Methods.List = make([]Method, len(rawMethods))
+ for i, b := range rawMethods {
+ sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i)
+ }
+ }
+ sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions)
+}
+
+func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) {
+ md.L0.ParentFile = pf
+ md.L0.Parent = pd
+ md.L0.Index = i
+
+ var rawOptions []byte
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MethodDescriptorProto_ClientStreaming:
+ md.L1.IsStreamingClient = protowire.DecodeBool(v)
+ case fieldnum.MethodDescriptorProto_ServerStreaming:
+ md.L1.IsStreamingServer = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case fieldnum.MethodDescriptorProto_Name:
+ md.L0.FullName = appendFullName(sb, pd.FullName(), v)
+ case fieldnum.MethodDescriptorProto_InputType:
+ md.L1.Input = PlaceholderMessage(makeFullName(sb, v))
+ case fieldnum.MethodDescriptorProto_OutputType:
+ md.L1.Output = PlaceholderMessage(makeFullName(sb, v))
+ case fieldnum.MethodDescriptorProto_Options:
+ rawOptions = appendOptions(rawOptions, v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+ md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions)
+}
+
+// appendOptions appends src to dst, where the returned slice is never nil.
+// This is necessary to distinguish between empty and unpopulated options.
+func appendOptions(dst, src []byte) []byte {
+ if dst == nil {
+ dst = []byte{}
+ }
+ return append(dst, src...)
+}
+
+// optionsUnmarshaler constructs a lazy unmarshal function for an options message.
+//
+// The type of message to unmarshal to is passed as a pointer since the
+// vars in descopts may not yet be populated at the time this function is called.
+func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage {
+ if b == nil {
+ return nil
+ }
+ var opts pref.ProtoMessage
+ var once sync.Once
+ return func() pref.ProtoMessage {
+ once.Do(func() {
+ if *p == nil {
+ panic("Descriptor.Options called without importing the descriptor package")
+ }
+ opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage)
+ if err := (proto.UnmarshalOptions{
+ AllowPartial: true,
+ Resolver: db.TypeResolver,
+ }).Unmarshal(b, opts); err != nil {
+ panic(err)
+ }
+ })
+ return opts
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
new file mode 100644
index 00000000..1b7089b6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go
@@ -0,0 +1,286 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type FileImports []pref.FileImport
+
+func (p *FileImports) Len() int { return len(*p) }
+func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] }
+func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {}
+
+type Names struct {
+ List []pref.Name
+ once sync.Once
+ has map[pref.Name]int // protected by once
+}
+
+func (p *Names) Len() int { return len(p.List) }
+func (p *Names) Get(i int) pref.Name { return p.List[i] }
+func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 }
+func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *Names) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Names) lazyInit() *Names {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.has = make(map[pref.Name]int, len(p.List))
+ for _, s := range p.List {
+ p.has[s] = p.has[s] + 1
+ }
+ }
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of names with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *Names) CheckValid() error {
+ for s, n := range p.lazyInit().has {
+ switch {
+ case n > 1:
+ return errors.New("duplicate name: %q", s)
+ case false && !s.IsValid():
+ // NOTE: The C++ implementation does not validate the identifier.
+ // See https://github.com/protocolbuffers/protobuf/issues/6335.
+ return errors.New("invalid name: %q", s)
+ }
+ }
+ return nil
+}
+
+type EnumRanges struct {
+ List [][2]pref.EnumNumber // start inclusive; end inclusive
+ once sync.Once
+ sorted [][2]pref.EnumNumber // protected by once
+}
+
+func (p *EnumRanges) Len() int { return len(p.List) }
+func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] }
+func (p *EnumRanges) Has(n pref.EnumNumber) bool {
+ for ls := p.lazyInit().sorted; len(ls) > 0; {
+ i := len(ls) / 2
+ switch r := enumRange(ls[i]); {
+ case n < r.Start():
+ ls = ls[:i] // search lower
+ case n > r.End():
+ ls = ls[i+1:] // search upper
+ default:
+ return true
+ }
+ }
+ return false
+}
+func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {}
+func (p *EnumRanges) lazyInit() *EnumRanges {
+ p.once.Do(func() {
+ p.sorted = append(p.sorted, p.List...)
+ sort.Slice(p.sorted, func(i, j int) bool {
+ return p.sorted[i][0] < p.sorted[j][0]
+ })
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of names with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *EnumRanges) CheckValid() error {
+ var rp enumRange
+ for i, r := range p.lazyInit().sorted {
+ r := enumRange(r)
+ switch {
+ case !(r.Start() <= r.End()):
+ return errors.New("invalid range: %v", r)
+ case !(rp.End() < r.Start()) && i > 0:
+ return errors.New("overlapping ranges: %v with %v", rp, r)
+ }
+ rp = r
+ }
+ return nil
+}
+
+type enumRange [2]protoreflect.EnumNumber
+
+func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive
+func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive
+func (r enumRange) String() string {
+ if r.Start() == r.End() {
+ return fmt.Sprintf("%d", r.Start())
+ }
+ return fmt.Sprintf("%d to %d", r.Start(), r.End())
+}
+
+type FieldRanges struct {
+ List [][2]pref.FieldNumber // start inclusive; end exclusive
+ once sync.Once
+ sorted [][2]pref.FieldNumber // protected by once
+}
+
+func (p *FieldRanges) Len() int { return len(p.List) }
+func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] }
+func (p *FieldRanges) Has(n pref.FieldNumber) bool {
+ for ls := p.lazyInit().sorted; len(ls) > 0; {
+ i := len(ls) / 2
+ switch r := fieldRange(ls[i]); {
+ case n < r.Start():
+ ls = ls[:i] // search lower
+ case n > r.End():
+ ls = ls[i+1:] // search upper
+ default:
+ return true
+ }
+ }
+ return false
+}
+func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {}
+func (p *FieldRanges) lazyInit() *FieldRanges {
+ p.once.Do(func() {
+ p.sorted = append(p.sorted, p.List...)
+ sort.Slice(p.sorted, func(i, j int) bool {
+ return p.sorted[i][0] < p.sorted[j][0]
+ })
+ })
+ return p
+}
+
+// CheckValid reports any errors with the set of ranges with an error message
+// that completes the sentence: "ranges is invalid because it has ..."
+func (p *FieldRanges) CheckValid(isMessageSet bool) error {
+ var rp fieldRange
+ for i, r := range p.lazyInit().sorted {
+ r := fieldRange(r)
+ switch {
+ case !isValidFieldNumber(r.Start(), isMessageSet):
+ return errors.New("invalid field number: %d", r.Start())
+ case !isValidFieldNumber(r.End(), isMessageSet):
+ return errors.New("invalid field number: %d", r.End())
+ case !(r.Start() <= r.End()):
+ return errors.New("invalid range: %v", r)
+ case !(rp.End() < r.Start()) && i > 0:
+ return errors.New("overlapping ranges: %v with %v", rp, r)
+ }
+ rp = r
+ }
+ return nil
+}
+
+// isValidFieldNumber reports whether the field number is valid.
+// Unlike the FieldNumber.IsValid method, it allows ranges that cover the
+// reserved number range.
+func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool {
+ if isMessageSet {
+ return protowire.MinValidNumber <= n && n <= math.MaxInt32
+ }
+ return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber
+}
+
+// CheckOverlap reports an error if p and q overlap.
+func (p *FieldRanges) CheckOverlap(q *FieldRanges) error {
+ rps := p.lazyInit().sorted
+ rqs := q.lazyInit().sorted
+ for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); {
+ rp := fieldRange(rps[pi])
+ rq := fieldRange(rqs[qi])
+ if !(rp.End() < rq.Start() || rq.End() < rp.Start()) {
+ return errors.New("overlapping ranges: %v with %v", rp, rq)
+ }
+ if rp.Start() < rq.Start() {
+ pi++
+ } else {
+ qi++
+ }
+ }
+ return nil
+}
+
+type fieldRange [2]protoreflect.FieldNumber
+
+func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive
+func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive
+func (r fieldRange) String() string {
+ if r.Start() == r.End() {
+ return fmt.Sprintf("%d", r.Start())
+ }
+ return fmt.Sprintf("%d to %d", r.Start(), r.End())
+}
+
+type FieldNumbers struct {
+ List []pref.FieldNumber
+ once sync.Once
+ has map[pref.FieldNumber]struct{} // protected by once
+}
+
+func (p *FieldNumbers) Len() int { return len(p.List) }
+func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] }
+func (p *FieldNumbers) Has(n pref.FieldNumber) bool {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.has = make(map[pref.FieldNumber]struct{}, len(p.List))
+ for _, n := range p.List {
+ p.has[n] = struct{}{}
+ }
+ }
+ })
+ _, ok := p.has[n]
+ return ok
+}
+func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {}
+
+type OneofFields struct {
+ List []pref.FieldDescriptor
+ once sync.Once
+ byName map[pref.Name]pref.FieldDescriptor // protected by once
+ byJSON map[string]pref.FieldDescriptor // protected by once
+ byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once
+}
+
+func (p *OneofFields) Len() int { return len(p.List) }
+func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] }
+func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] }
+func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] }
+func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] }
+func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) }
+func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {}
+
+func (p *OneofFields) lazyInit() *OneofFields {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List))
+ p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List))
+ p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List))
+ for _, f := range p.List {
+ // Field names and numbers are guaranteed to be unique.
+ p.byName[f.Name()] = f
+ p.byJSON[f.JSONName()] = f
+ p.byNum[f.Number()] = f
+ }
+ }
+ })
+ return p
+}
+
+type SourceLocations struct {
+ List []pref.SourceLocation
+}
+
+func (p *SourceLocations) Len() int { return len(p.List) }
+func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] }
+func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
new file mode 100644
index 00000000..6a8825e8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
@@ -0,0 +1,345 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package filedesc
+
+import (
+ "fmt"
+ "sync"
+
+ "google.golang.org/protobuf/internal/descfmt"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type Enums struct {
+ List []Enum
+ once sync.Once
+ byName map[protoreflect.Name]*Enum // protected by once
+}
+
+func (p *Enums) Len() int {
+ return len(p.List)
+}
+func (p *Enums) Get(i int) protoreflect.EnumDescriptor {
+ return &p.List[i]
+}
+func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Enums) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Enums) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Enums) lazyInit() *Enums {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Enum, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type EnumValues struct {
+ List []EnumValue
+ once sync.Once
+ byName map[protoreflect.Name]*EnumValue // protected by once
+ byNum map[protoreflect.EnumNumber]*EnumValue // protected by once
+}
+
+func (p *EnumValues) Len() int {
+ return len(p.List)
+}
+func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor {
+ return &p.List[i]
+}
+func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
+ if d := p.lazyInit().byNum[n]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *EnumValues) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {}
+func (p *EnumValues) lazyInit() *EnumValues {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List))
+ p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ if _, ok := p.byNum[d.Number()]; !ok {
+ p.byNum[d.Number()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Messages struct {
+ List []Message
+ once sync.Once
+ byName map[protoreflect.Name]*Message // protected by once
+}
+
+func (p *Messages) Len() int {
+ return len(p.List)
+}
+func (p *Messages) Get(i int) protoreflect.MessageDescriptor {
+ return &p.List[i]
+}
+func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Messages) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Messages) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Messages) lazyInit() *Messages {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Message, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Fields struct {
+ List []Field
+ once sync.Once
+ byName map[protoreflect.Name]*Field // protected by once
+ byJSON map[string]*Field // protected by once
+ byNum map[protoreflect.FieldNumber]*Field // protected by once
+}
+
+func (p *Fields) Len() int {
+ return len(p.List)
+}
+func (p *Fields) Get(i int) protoreflect.FieldDescriptor {
+ return &p.List[i]
+}
+func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byJSON[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
+ if d := p.lazyInit().byNum[n]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Fields) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Fields) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Fields) lazyInit() *Fields {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Field, len(p.List))
+ p.byJSON = make(map[string]*Field, len(p.List))
+ p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ if _, ok := p.byJSON[d.JSONName()]; !ok {
+ p.byJSON[d.JSONName()] = d
+ }
+ if _, ok := p.byNum[d.Number()]; !ok {
+ p.byNum[d.Number()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Oneofs struct {
+ List []Oneof
+ once sync.Once
+ byName map[protoreflect.Name]*Oneof // protected by once
+}
+
+func (p *Oneofs) Len() int {
+ return len(p.List)
+}
+func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor {
+ return &p.List[i]
+}
+func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Oneofs) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Oneofs) lazyInit() *Oneofs {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Oneof, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Extensions struct {
+ List []Extension
+ once sync.Once
+ byName map[protoreflect.Name]*Extension // protected by once
+}
+
+func (p *Extensions) Len() int {
+ return len(p.List)
+}
+func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor {
+ return &p.List[i]
+}
+func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Extensions) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Extensions) lazyInit() *Extensions {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Extension, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Services struct {
+ List []Service
+ once sync.Once
+ byName map[protoreflect.Name]*Service // protected by once
+}
+
+func (p *Services) Len() int {
+ return len(p.List)
+}
+func (p *Services) Get(i int) protoreflect.ServiceDescriptor {
+ return &p.List[i]
+}
+func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Services) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Services) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Services) lazyInit() *Services {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Service, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
+
+type Methods struct {
+ List []Method
+ once sync.Once
+ byName map[protoreflect.Name]*Method // protected by once
+}
+
+func (p *Methods) Len() int {
+ return len(p.List)
+}
+func (p *Methods) Get(i int) protoreflect.MethodDescriptor {
+ return &p.List[i]
+}
+func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor {
+ if d := p.lazyInit().byName[s]; d != nil {
+ return d
+ }
+ return nil
+}
+func (p *Methods) Format(s fmt.State, r rune) {
+ descfmt.FormatList(s, r, p)
+}
+func (p *Methods) ProtoInternal(pragma.DoNotImplement) {}
+func (p *Methods) lazyInit() *Methods {
+ p.once.Do(func() {
+ if len(p.List) > 0 {
+ p.byName = make(map[protoreflect.Name]*Method, len(p.List))
+ for i := range p.List {
+ d := &p.List[i]
+ if _, ok := p.byName[d.Name()]; !ok {
+ p.byName[d.Name()] = d
+ }
+ }
+ }
+ })
+ return p
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
new file mode 100644
index 00000000..dbf2c605
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
@@ -0,0 +1,107 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import (
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var (
+ emptyNames = new(Names)
+ emptyEnumRanges = new(EnumRanges)
+ emptyFieldRanges = new(FieldRanges)
+ emptyFieldNumbers = new(FieldNumbers)
+ emptySourceLocations = new(SourceLocations)
+
+ emptyFiles = new(FileImports)
+ emptyMessages = new(Messages)
+ emptyFields = new(Fields)
+ emptyOneofs = new(Oneofs)
+ emptyEnums = new(Enums)
+ emptyEnumValues = new(EnumValues)
+ emptyExtensions = new(Extensions)
+ emptyServices = new(Services)
+)
+
+// PlaceholderFile is a placeholder, representing only the file path.
+type PlaceholderFile string
+
+func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f }
+func (f PlaceholderFile) Parent() pref.Descriptor { return nil }
+func (f PlaceholderFile) Index() int { return 0 }
+func (f PlaceholderFile) Syntax() pref.Syntax { return 0 }
+func (f PlaceholderFile) Name() pref.Name { return "" }
+func (f PlaceholderFile) FullName() pref.FullName { return "" }
+func (f PlaceholderFile) IsPlaceholder() bool { return true }
+func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File }
+func (f PlaceholderFile) Path() string { return string(f) }
+func (f PlaceholderFile) Package() pref.FullName { return "" }
+func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles }
+func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages }
+func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums }
+func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
+func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices }
+func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations }
+func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return }
+func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderEnum is a placeholder, representing only the full name.
+type PlaceholderEnum pref.FullName
+
+func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil }
+func (e PlaceholderEnum) Parent() pref.Descriptor { return nil }
+func (e PlaceholderEnum) Index() int { return 0 }
+func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 }
+func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() }
+func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) }
+func (e PlaceholderEnum) IsPlaceholder() bool { return true }
+func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum }
+func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues }
+func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames }
+func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges }
+func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return }
+func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderEnumValue is a placeholder, representing only the full name.
+type PlaceholderEnumValue pref.FullName
+
+func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil }
+func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil }
+func (e PlaceholderEnumValue) Index() int { return 0 }
+func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 }
+func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() }
+func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) }
+func (e PlaceholderEnumValue) IsPlaceholder() bool { return true }
+func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue }
+func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 }
+func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return }
+func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return }
+
+// PlaceholderMessage is a placeholder, representing only the full name.
+type PlaceholderMessage pref.FullName
+
+func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil }
+func (m PlaceholderMessage) Parent() pref.Descriptor { return nil }
+func (m PlaceholderMessage) Index() int { return 0 }
+func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 }
+func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() }
+func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) }
+func (m PlaceholderMessage) IsPlaceholder() bool { return true }
+func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message }
+func (m PlaceholderMessage) IsMapEntry() bool { return false }
+func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields }
+func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs }
+func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames }
+func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers }
+func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges }
+func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") }
+func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages }
+func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums }
+func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions }
+func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return }
+func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
new file mode 100644
index 00000000..0a0dd35d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -0,0 +1,297 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package filetype provides functionality for wrapping descriptors
+// with Go type information.
+package filetype
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/internal/descopts"
+ fdesc "google.golang.org/protobuf/internal/filedesc"
+ pimpl "google.golang.org/protobuf/internal/impl"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Builder constructs type descriptors from a raw file descriptor
+// and associated Go types for each enum and message declaration.
+//
+//
+// Flattened Ordering
+//
+// The protobuf type system represents declarations as a tree. Certain nodes in
+// the tree require us to either associate it with a concrete Go type or to
+// resolve a dependency, which is information that must be provided separately
+// since it cannot be derived from the file descriptor alone.
+//
+// However, representing a tree as Go literals is difficult to simply do in a
+// space and time efficient way. Thus, we store them as a flattened list of
+// objects where the serialization order from the tree-based form is important.
+//
+// The "flattened ordering" is defined as a tree traversal of all enum, message,
+// extension, and service declarations using the following algorithm:
+//
+// def VisitFileDecls(fd):
+// for e in fd.Enums: yield e
+// for m in fd.Messages: yield m
+// for x in fd.Extensions: yield x
+// for s in fd.Services: yield s
+// for m in fd.Messages: yield from VisitMessageDecls(m)
+//
+// def VisitMessageDecls(md):
+// for e in md.Enums: yield e
+// for m in md.Messages: yield m
+// for x in md.Extensions: yield x
+// for m in md.Messages: yield from VisitMessageDecls(m)
+//
+// The traversal starts at the root file descriptor and yields each direct
+// declaration within each node before traversing into sub-declarations
+// that children themselves may have.
+type Builder struct {
+ // File is the underlying file descriptor builder.
+ File fdesc.Builder
+
+ // GoTypes is a unique set of the Go types for all declarations and
+ // dependencies. Each type is represented as a zero value of the Go type.
+ //
+ // Declarations are Go types generated for enums and messages directly
+ // declared (not publicly imported) in the proto source file.
+ // Messages for map entries are accounted for, but represented by nil.
+ // Enum declarations in "flattened ordering" come first, followed by
+ // message declarations in "flattened ordering".
+ //
+ // Dependencies are Go types for enums or messages referenced by
+ // message fields (excluding weak fields), for parent extended messages of
+ // extension fields, for enums or messages referenced by extension fields,
+ // and for input and output messages referenced by service methods.
+ // Dependencies must come after declarations, but the ordering of
+ // dependencies themselves is unspecified.
+ GoTypes []interface{}
+
+ // DependencyIndexes is an ordered list of indexes into GoTypes for the
+ // dependencies of messages, extensions, or services.
+ //
+ // There are 5 sub-lists in "flattened ordering" concatenated back-to-back:
+ // 0. Message field dependencies: list of the enum or message type
+ // referred to by every message field.
+ // 1. Extension field targets: list of the extended parent message of
+ // every extension.
+ // 2. Extension field dependencies: list of the enum or message type
+ // referred to by every extension field.
+ // 3. Service method inputs: list of the input message type
+ // referred to by every service method.
+ // 4. Service method outputs: list of the output message type
+ // referred to by every service method.
+ //
+ // The offset into DependencyIndexes for the start of each sub-list
+ // is appended to the end in reverse order.
+ DependencyIndexes []int32
+
+ // EnumInfos is a list of enum infos in "flattened ordering".
+ EnumInfos []pimpl.EnumInfo
+
+ // MessageInfos is a list of message infos in "flattened ordering".
+ // If provided, the GoType and PBType for each element is populated.
+ //
+ // Requirement: len(MessageInfos) == len(Build.Messages)
+ MessageInfos []pimpl.MessageInfo
+
+ // ExtensionInfos is a list of extension infos in "flattened ordering".
+ // Each element is initialized and registered with the protoregistry package.
+ //
+ // Requirement: len(LegacyExtensions) == len(Build.Extensions)
+ ExtensionInfos []pimpl.ExtensionInfo
+
+ // TypeRegistry is the registry to register each type descriptor.
+ // If nil, it uses protoregistry.GlobalTypes.
+ TypeRegistry interface {
+ RegisterMessage(pref.MessageType) error
+ RegisterEnum(pref.EnumType) error
+ RegisterExtension(pref.ExtensionType) error
+ }
+}
+
+// Out is the output of the builder.
+type Out struct {
+ File pref.FileDescriptor
+}
+
+func (tb Builder) Build() (out Out) {
+ // Replace the resolver with one that resolves dependencies by index,
+ // which is faster and more reliable than relying on the global registry.
+ if tb.File.FileRegistry == nil {
+ tb.File.FileRegistry = preg.GlobalFiles
+ }
+ tb.File.FileRegistry = &resolverByIndex{
+ goTypes: tb.GoTypes,
+ depIdxs: tb.DependencyIndexes,
+ fileRegistry: tb.File.FileRegistry,
+ }
+
+ // Initialize registry if unpopulated.
+ if tb.TypeRegistry == nil {
+ tb.TypeRegistry = preg.GlobalTypes
+ }
+
+ fbOut := tb.File.Build()
+ out.File = fbOut.File
+
+ // Process enums.
+ enumGoTypes := tb.GoTypes[:len(fbOut.Enums)]
+ if len(tb.EnumInfos) != len(fbOut.Enums) {
+ panic("mismatching enum lengths")
+ }
+ if len(fbOut.Enums) > 0 {
+ for i := range fbOut.Enums {
+ tb.EnumInfos[i] = pimpl.EnumInfo{
+ GoReflectType: reflect.TypeOf(enumGoTypes[i]),
+ Desc: &fbOut.Enums[i],
+ }
+ // Register enum types.
+ if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ // Process messages.
+ messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)]
+ if len(tb.MessageInfos) != len(fbOut.Messages) {
+ panic("mismatching message lengths")
+ }
+ if len(fbOut.Messages) > 0 {
+ for i := range fbOut.Messages {
+ if messageGoTypes[i] == nil {
+ continue // skip map entry
+ }
+
+ tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i])
+ tb.MessageInfos[i].Desc = &fbOut.Messages[i]
+
+ // Register message types.
+ if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+
+ // As a special-case for descriptor.proto,
+ // locally register concrete message type for the options.
+ if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" {
+ for i := range fbOut.Messages {
+ switch fbOut.Messages[i].Name() {
+ case "FileOptions":
+ descopts.File = messageGoTypes[i].(pref.ProtoMessage)
+ case "EnumOptions":
+ descopts.Enum = messageGoTypes[i].(pref.ProtoMessage)
+ case "EnumValueOptions":
+ descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage)
+ case "MessageOptions":
+ descopts.Message = messageGoTypes[i].(pref.ProtoMessage)
+ case "FieldOptions":
+ descopts.Field = messageGoTypes[i].(pref.ProtoMessage)
+ case "OneofOptions":
+ descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage)
+ case "ExtensionRangeOptions":
+ descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage)
+ case "ServiceOptions":
+ descopts.Service = messageGoTypes[i].(pref.ProtoMessage)
+ case "MethodOptions":
+ descopts.Method = messageGoTypes[i].(pref.ProtoMessage)
+ }
+ }
+ }
+ }
+
+ // Process extensions.
+ if len(tb.ExtensionInfos) != len(fbOut.Extensions) {
+ panic("mismatching extension lengths")
+ }
+ var depIdx int32
+ for i := range fbOut.Extensions {
+ // For enum and message kinds, determine the referent Go type so
+ // that we can construct their constructors.
+ const listExtDeps = 2
+ var goType reflect.Type
+ switch fbOut.Extensions[i].L1.Kind {
+ case pref.EnumKind:
+ j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
+ goType = reflect.TypeOf(tb.GoTypes[j])
+ depIdx++
+ case pref.MessageKind, pref.GroupKind:
+ j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx)
+ goType = reflect.TypeOf(tb.GoTypes[j])
+ depIdx++
+ default:
+ goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind]
+ }
+ if fbOut.Extensions[i].IsList() {
+ goType = reflect.SliceOf(goType)
+ }
+
+ pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType)
+
+ // Register extension types.
+ if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil {
+ panic(err)
+ }
+ }
+
+ return out
+}
+
+var goTypeForPBKind = map[pref.Kind]reflect.Type{
+ pref.BoolKind: reflect.TypeOf(bool(false)),
+ pref.Int32Kind: reflect.TypeOf(int32(0)),
+ pref.Sint32Kind: reflect.TypeOf(int32(0)),
+ pref.Sfixed32Kind: reflect.TypeOf(int32(0)),
+ pref.Int64Kind: reflect.TypeOf(int64(0)),
+ pref.Sint64Kind: reflect.TypeOf(int64(0)),
+ pref.Sfixed64Kind: reflect.TypeOf(int64(0)),
+ pref.Uint32Kind: reflect.TypeOf(uint32(0)),
+ pref.Fixed32Kind: reflect.TypeOf(uint32(0)),
+ pref.Uint64Kind: reflect.TypeOf(uint64(0)),
+ pref.Fixed64Kind: reflect.TypeOf(uint64(0)),
+ pref.FloatKind: reflect.TypeOf(float32(0)),
+ pref.DoubleKind: reflect.TypeOf(float64(0)),
+ pref.StringKind: reflect.TypeOf(string("")),
+ pref.BytesKind: reflect.TypeOf([]byte(nil)),
+}
+
+type depIdxs []int32
+
+// Get retrieves the jth element of the ith sub-list.
+func (x depIdxs) Get(i, j int32) int32 {
+ return x[x[int32(len(x))-i-1]+j]
+}
+
+type (
+ resolverByIndex struct {
+ goTypes []interface{}
+ depIdxs depIdxs
+ fileRegistry
+ }
+ fileRegistry interface {
+ FindFileByPath(string) (pref.FileDescriptor, error)
+ FindDescriptorByName(pref.FullName) (pref.Descriptor, error)
+ RegisterFile(pref.FileDescriptor) error
+ }
+)
+
+func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor {
+ if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) {
+ return &es[depIdx]
+ } else {
+ return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx])
+ }
+}
+
+func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor {
+ if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) {
+ return &ms[depIdx-len(es)]
+ } else {
+ return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx])
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
new file mode 100644
index 00000000..58372dd3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -0,0 +1,24 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flags provides a set of flags controlled by build tags.
+package flags
+
+// ProtoLegacy specifies whether to enable support for legacy functionality
+// such as MessageSets, weak fields, and various other obscure behavior
+// that is necessary to maintain backwards compatibility with proto1 or
+// the pre-release variants of proto2 and proto3.
+//
+// This is disabled by default unless built with the "protolegacy" tag.
+//
+// WARNING: The compatibility agreement covers nothing provided by this flag.
+// As such, functionality may suddenly be removed or changed at our discretion.
+const ProtoLegacy = protoLegacy
+
+// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions.
+//
+// Lazy extension unmarshaling validates the contents of message-valued
+// extension fields at unmarshal time, but defers creating the message
+// structure until the extension is first accessed.
+const LazyUnmarshalExtensions = ProtoLegacy
diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
new file mode 100644
index 00000000..a72995f0
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !protolegacy
+
+package flags
+
+const protoLegacy = false
diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
new file mode 100644
index 00000000..772e2f0e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build protolegacy
+
+package flags
+
+const protoLegacy = true
diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go
new file mode 100644
index 00000000..f45509fb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genname/name.go
@@ -0,0 +1,25 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package genname contains constants for generated names.
+package genname
+
+const (
+ State = "state"
+
+ SizeCache = "sizeCache"
+ SizeCacheA = "XXX_sizecache"
+
+ WeakFields = "weakFields"
+ WeakFieldsA = "XXX_weak"
+
+ UnknownFields = "unknownFields"
+ UnknownFieldsA = "XXX_unrecognized"
+
+ ExtensionFields = "extensionFields"
+ ExtensionFieldsA = "XXX_InternalExtensions"
+ ExtensionFieldsB = "XXX_extensions"
+
+ WeakFieldPrefix = "XXX_weak_"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
new file mode 100644
index 00000000..4d22c960
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -0,0 +1,170 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Export is a zero-length named type that exists only to export a set of
+// functions that we do not want to appear in godoc.
+type Export struct{}
+
+// enum is any enum type generated by protoc-gen-go
+// and must be a named int32 type.
+type enum = interface{}
+
+// EnumOf returns the protoreflect.Enum interface over e.
+// It returns nil if e is nil.
+func (Export) EnumOf(e enum) pref.Enum {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e
+ default:
+ return legacyWrapEnum(reflect.ValueOf(e))
+ }
+}
+
+// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e.
+// It returns nil if e is nil.
+func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e.Descriptor()
+ default:
+ return LegacyLoadEnumDesc(reflect.TypeOf(e))
+ }
+}
+
+// EnumTypeOf returns the protoreflect.EnumType for e.
+// It returns nil if e is nil.
+func (Export) EnumTypeOf(e enum) pref.EnumType {
+ switch e := e.(type) {
+ case nil:
+ return nil
+ case pref.Enum:
+ return e.Type()
+ default:
+ return legacyLoadEnumType(reflect.TypeOf(e))
+ }
+}
+
+// EnumStringOf returns the enum value as a string, either as the name if
+// the number is resolvable, or the number formatted as a string.
+func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string {
+ ev := ed.Values().ByNumber(n)
+ if ev != nil {
+ return string(ev.Name())
+ }
+ return strconv.Itoa(int(n))
+}
+
+// message is any message type generated by protoc-gen-go
+// and must be a pointer to a named struct type.
+type message = interface{}
+
+// legacyMessageWrapper wraps a v2 message as a v1 message.
+type legacyMessageWrapper struct{ m pref.ProtoMessage }
+
+func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) }
+func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) }
+func (m legacyMessageWrapper) ProtoMessage() {}
+
+// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func (Export) ProtoMessageV1Of(m message) piface.MessageV1 {
+ switch mv := m.(type) {
+ case nil:
+ return nil
+ case piface.MessageV1:
+ return mv
+ case unwrapper:
+ return Export{}.ProtoMessageV1Of(mv.protoUnwrap())
+ case pref.ProtoMessage:
+ return legacyMessageWrapper{mv}
+ default:
+ panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
+ }
+}
+
+func (Export) protoMessageV2Of(m message) pref.ProtoMessage {
+ switch mv := m.(type) {
+ case nil:
+ return nil
+ case pref.ProtoMessage:
+ return mv
+ case legacyMessageWrapper:
+ return mv.m
+ case piface.MessageV1:
+ return nil
+ default:
+ panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m))
+ }
+}
+
+// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv
+ }
+ return legacyWrapMessage(reflect.ValueOf(m)).Interface()
+}
+
+// MessageOf returns the protoreflect.Message interface over m.
+// It returns nil if m is nil.
+func (Export) MessageOf(m message) pref.Message {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect()
+ }
+ return legacyWrapMessage(reflect.ValueOf(m))
+}
+
+// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m.
+// It returns nil if m is nil.
+func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Descriptor()
+ }
+ return LegacyLoadMessageDesc(reflect.TypeOf(m))
+}
+
+// MessageTypeOf returns the protoreflect.MessageType for m.
+// It returns nil if m is nil.
+func (Export) MessageTypeOf(m message) pref.MessageType {
+ if m == nil {
+ return nil
+ }
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Type()
+ }
+ return legacyLoadMessageInfo(reflect.TypeOf(m), "")
+}
+
+// MessageStringOf returns the message value as a string,
+// which is the message serialized in the protobuf text format.
+func (Export) MessageStringOf(m pref.ProtoMessage) string {
+ return prototext.MarshalOptions{Multiline: false}.Format(m)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
new file mode 100644
index 00000000..b82341e5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -0,0 +1,141 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p)
+}
+
+func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
+ mi.init()
+ if !mi.needsInitCheck {
+ return nil
+ }
+ if p.IsNil() {
+ for _, f := range mi.orderedCoderFields {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ }
+ return nil
+ }
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ if err := mi.isInitExtensions(e); err != nil {
+ return err
+ }
+ }
+ for _, f := range mi.orderedCoderFields {
+ if !f.isRequired && f.funcs.isInit == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ continue
+ }
+ if f.funcs.isInit == nil {
+ continue
+ }
+ if err := f.funcs.isInit(fptr, f); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
+ if ext == nil {
+ return nil
+ }
+ for _, x := range *ext {
+ ei := getExtensionFieldInfo(x.Type())
+ if ei.funcs.isInit == nil {
+ continue
+ }
+ v := x.Value()
+ if !v.IsValid() {
+ continue
+ }
+ if err := ei.funcs.isInit(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var (
+ needsInitCheckMu sync.Mutex
+ needsInitCheckMap sync.Map
+)
+
+// needsInitCheck reports whether a message needs to be checked for partial initialization.
+//
+// It returns true if the message transitively includes any required or extension fields.
+func needsInitCheck(md pref.MessageDescriptor) bool {
+ if v, ok := needsInitCheckMap.Load(md); ok {
+ if has, ok := v.(bool); ok {
+ return has
+ }
+ }
+ needsInitCheckMu.Lock()
+ defer needsInitCheckMu.Unlock()
+ return needsInitCheckLocked(md)
+}
+
+func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) {
+ if v, ok := needsInitCheckMap.Load(md); ok {
+ // If has is true, we've previously determined that this message
+ // needs init checks.
+ //
+ // If has is false, we've previously determined that it can never
+ // be uninitialized.
+ //
+ // If has is not a bool, we've just encountered a cycle in the
+ // message graph. In this case, it is safe to return false: If
+ // the message does have required fields, we'll detect them later
+ // in the graph traversal.
+ has, ok := v.(bool)
+ return ok && has
+ }
+ needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message
+ defer func() {
+ needsInitCheckMap.Store(md, has)
+ }()
+ if md.RequiredNumbers().Len() > 0 {
+ return true
+ }
+ if md.ExtensionRanges().Len() > 0 {
+ return true
+ }
+ for i := 0; i < md.Fields().Len(); i++ {
+ fd := md.Fields().Get(i)
+ // Map keys are never messages, so just consider the map value.
+ if fd.IsMap() {
+ fd = fd.MapValue()
+ }
+ fmd := fd.Message()
+ if fmd != nil && needsInitCheckLocked(fmd) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
new file mode 100644
index 00000000..08d35170
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -0,0 +1,223 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type extensionFieldInfo struct {
+ wiretag uint64
+ tagsize int
+ unmarshalNeedsValue bool
+ funcs valueCoderFuncs
+ validation validationInfo
+}
+
+var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo
+
+func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+ if xi, ok := xt.(*ExtensionInfo); ok {
+ xi.lazyInit()
+ return xi.info
+ }
+ return legacyLoadExtensionFieldInfo(xt)
+}
+
+// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt.
+func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo {
+ if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok {
+ return xi.(*extensionFieldInfo)
+ }
+ e := makeExtensionFieldInfo(xt.TypeDescriptor())
+ if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok {
+ return e.(*extensionFieldInfo)
+ }
+ return e
+}
+
+func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo {
+ var wiretag uint64
+ if !xd.IsPacked() {
+ wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType)
+ }
+ e := &extensionFieldInfo{
+ wiretag: wiretag,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: encoderFuncsForValue(xd),
+ }
+ // Does the unmarshal function need a value passed to it?
+ // This is true for composite types, where we pass in a message, list, or map to fill in,
+ // and for enums, where we pass in a prototype value to specify the concrete enum type.
+ switch xd.Kind() {
+ case pref.MessageKind, pref.GroupKind, pref.EnumKind:
+ e.unmarshalNeedsValue = true
+ default:
+ if xd.Cardinality() == pref.Repeated {
+ e.unmarshalNeedsValue = true
+ }
+ }
+ return e
+}
+
+type lazyExtensionValue struct {
+ atomicOnce uint32 // atomically set if value is valid
+ mu sync.Mutex
+ xi *extensionFieldInfo
+ value pref.Value
+ b []byte
+ fn func() pref.Value
+}
+
+type ExtensionField struct {
+ typ pref.ExtensionType
+
+ // value is either the value of GetValue,
+ // or a *lazyExtensionValue that then returns the value of GetValue.
+ value pref.Value
+ lazy *lazyExtensionValue
+}
+
+func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) {
+ if f.lazy == nil {
+ f.lazy = &lazyExtensionValue{xi: xi}
+ }
+ f.typ = xt
+ f.lazy.xi = xi
+ f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp)
+ f.lazy.b = append(f.lazy.b, b...)
+}
+
+func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool {
+ if f.typ == nil {
+ return true
+ }
+ if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 {
+ return true
+ }
+ return false
+}
+
+func (f *ExtensionField) lazyInit() {
+ f.lazy.mu.Lock()
+ defer f.lazy.mu.Unlock()
+ if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 {
+ return
+ }
+ if f.lazy.xi != nil {
+ b := f.lazy.b
+ val := f.typ.New()
+ for len(b) > 0 {
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ panic(errors.New("bad tag in lazy extension decoding"))
+ }
+ b = b[n:]
+ }
+ num := protowire.Number(tag >> 3)
+ wtyp := protowire.Type(tag & 7)
+ var out unmarshalOutput
+ var err error
+ val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions)
+ if err != nil {
+ panic(errors.New("decode failure in lazy extension decoding: %v", err))
+ }
+ b = b[out.n:]
+ }
+ f.lazy.value = val
+ } else {
+ f.lazy.value = f.lazy.fn()
+ }
+ f.lazy.xi = nil
+ f.lazy.fn = nil
+ f.lazy.b = nil
+ atomic.StoreUint32(&f.lazy.atomicOnce, 1)
+}
+
+// Set sets the type and value of the extension field.
+// This must not be called concurrently.
+func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) {
+ f.typ = t
+ f.value = v
+ f.lazy = nil
+}
+
+// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
+// This must not be called concurrently.
+func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) {
+ f.typ = t
+ f.lazy = &lazyExtensionValue{fn: fn}
+}
+
+// Value returns the value of the extension field.
+// This may be called concurrently.
+func (f *ExtensionField) Value() pref.Value {
+ if f.lazy != nil {
+ if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 {
+ f.lazyInit()
+ }
+ return f.lazy.value
+ }
+ return f.value
+}
+
+// Type returns the type of the extension field.
+// This may be called concurrently.
+func (f ExtensionField) Type() pref.ExtensionType {
+ return f.typ
+}
+
+// IsSet returns whether the extension field is set.
+// This may be called concurrently.
+func (f ExtensionField) IsSet() bool {
+ return f.typ != nil
+}
+
+// IsLazy reports whether a field is lazily encoded.
+// It is exported for testing.
+func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool {
+ var mi *MessageInfo
+ var p pointer
+ switch m := m.(type) {
+ case *messageState:
+ mi = m.messageInfo()
+ p = m.pointer()
+ case *messageReflectWrapper:
+ mi = m.messageInfo()
+ p = m.pointer()
+ default:
+ return false
+ }
+ xd, ok := fd.(pref.ExtensionTypeDescriptor)
+ if !ok {
+ return false
+ }
+ xt := xd.Type()
+ ext := mi.extensionMap(p)
+ if ext == nil {
+ return false
+ }
+ f, ok := (*ext)[int32(fd.Number())]
+ if !ok {
+ return false
+ }
+ return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
new file mode 100644
index 00000000..c00744d3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -0,0 +1,828 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type errInvalidUTF8 struct{}
+
+func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" }
+func (errInvalidUTF8) InvalidUTF8() bool { return true }
+
+// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof.
+//
+// For size, marshal, and isInit operations, functions are set only on the first field
+// in the oneof. The functions are called when the oneof is non-nil, and will dispatch
+// to the appropriate field-specific function as necessary.
+//
+// The unmarshal function is set on each field individually as usual.
+func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) {
+ fs := si.oneofsByName[od.Name()]
+ ft := fs.Type
+ oneofFields := make(map[reflect.Type]*coderFieldInfo)
+ needIsInit := false
+ fields := od.Fields()
+ for i, lim := 0, fields.Len(); i < lim; i++ {
+ fd := od.Fields().Get(i)
+ num := fd.Number()
+ // Make a copy of the original coderFieldInfo for use in unmarshaling.
+ //
+ // oneofFields[oneofType].funcs.marshal is the field-specific marshal function.
+ //
+ // mi.coderFields[num].marshal is set on only the first field in the oneof,
+ // and dispatches to the field-specific marshaler in oneofFields.
+ cf := *mi.coderFields[num]
+ ot := si.oneofWrappersByNumber[num]
+ cf.ft = ot.Field(0).Type
+ cf.mi, cf.funcs = fieldCoder(fd, cf.ft)
+ oneofFields[ot] = &cf
+ if cf.funcs.isInit != nil {
+ needIsInit = true
+ }
+ mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ var vw reflect.Value // pointer to wrapper type
+ vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind
+ if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot {
+ vw = vi.Elem()
+ } else {
+ vw = reflect.New(ot)
+ }
+ out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts)
+ if err != nil {
+ return out, err
+ }
+ vi.Set(vw)
+ return out, nil
+ }
+ }
+ getInfo := func(p pointer) (pointer, *coderFieldInfo) {
+ v := p.AsValueOf(ft).Elem()
+ if v.IsNil() {
+ return pointer{}, nil
+ }
+ v = v.Elem() // interface -> *struct
+ if v.IsNil() {
+ return pointer{}, nil
+ }
+ return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()]
+ }
+ first := mi.coderFields[od.Fields().Get(0).Number()]
+ first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.size == nil {
+ return 0
+ }
+ return info.funcs.size(p, info, opts)
+ }
+ first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.marshal == nil {
+ return b, nil
+ }
+ return info.funcs.marshal(b, p, info, opts)
+ }
+ first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) {
+ srcp, srcinfo := getInfo(src)
+ if srcinfo == nil || srcinfo.funcs.merge == nil {
+ return
+ }
+ dstp, dstinfo := getInfo(dst)
+ if dstinfo != srcinfo {
+ dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type()))
+ dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset)
+ }
+ srcinfo.funcs.merge(dstp, srcp, srcinfo, opts)
+ }
+ if needIsInit {
+ first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error {
+ p, info := getInfo(p)
+ if info == nil || info.funcs.isInit == nil {
+ return nil
+ }
+ return info.funcs.isInit(p, info)
+ }
+ }
+}
+
+func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs {
+ var once sync.Once
+ var messageType pref.MessageType
+ lazyInit := func() {
+ once.Do(func() {
+ messageName := fd.Message().FullName()
+ messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ })
+ }
+
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return 0
+ }
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ return sizeMessage(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return b, nil
+ }
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ return appendMessage(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ fs := p.WeakFields()
+ m, ok := fs.get(f.num)
+ if !ok {
+ lazyInit()
+ if messageType == nil {
+ return unmarshalOutput{}, errUnknown
+ }
+ m = messageType.New().Interface()
+ fs.set(f.num, m)
+ }
+ return consumeMessage(b, m, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m, ok := p.WeakFields().get(f.num)
+ if !ok {
+ return nil
+ }
+ return proto.CheckInitialized(m)
+ },
+ merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ sm, ok := src.WeakFields().get(f.num)
+ if !ok {
+ return
+ }
+ dm, ok := dst.WeakFields().get(f.num)
+ if !ok {
+ lazyInit()
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
+ }
+ dm = messageType.New().Interface()
+ dst.WeakFields().set(f.num, dm)
+ }
+ opts.Merge(dm, sm)
+ },
+ }
+}
+
+func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeMessageInfo,
+ marshal: appendMessageInfo,
+ unmarshal: consumeMessageInfo,
+ merge: mergeMessage,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageInfo
+ }
+ return funcs
+ } else {
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return sizeMessage(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return appendMessage(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft).Elem()
+ if mp.IsNil() {
+ mp.Set(reflect.New(ft.Elem()))
+ }
+ return consumeMessage(b, asMessage(mp), wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return proto.CheckInitialized(m)
+ },
+ merge: mergeMessage,
+ }
+ }
+}
+
+func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize
+}
+
+func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts)))
+ return f.mi.marshalAppendPointer(b, p.Elem(), opts)
+}
+
+func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if p.Elem().IsNil() {
+ p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts)
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitMessageInfo(p pointer, f *coderFieldInfo) error {
+ return f.mi.checkInitializedPointer(p.Elem())
+}
+
+func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int {
+ return protowire.SizeBytes(proto.Size(m)) + tagsize
+}
+
+func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(proto.Size(m)))
+ return opts.Options().MarshalAppend(b, m)
+}
+
+func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: m.ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int {
+ m := v.Message().Interface()
+ return sizeMessage(m, tagsize, opts)
+}
+
+func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ m := v.Message().Interface()
+ return appendMessage(b, m, wiretag, opts)
+}
+
+func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+ m := v.Message().Interface()
+ out, err := consumeMessage(b, m, wtyp, opts)
+ return v, out, err
+}
+
+func isInitMessageValue(v pref.Value) error {
+ m := v.Message().Interface()
+ return proto.CheckInitialized(m)
+}
+
+var coderMessageValue = valueCoderFuncs{
+ size: sizeMessageValue,
+ marshal: appendMessageValue,
+ unmarshal: consumeMessageValue,
+ isInit: isInitMessageValue,
+ merge: mergeMessageValue,
+}
+
+func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int {
+ m := v.Message().Interface()
+ return sizeGroup(m, tagsize, opts)
+}
+
+func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ m := v.Message().Interface()
+ return appendGroup(b, m, wiretag, opts)
+}
+
+func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) {
+ m := v.Message().Interface()
+ out, err := consumeGroup(b, m, num, wtyp, opts)
+ return v, out, err
+}
+
+var coderGroupValue = valueCoderFuncs{
+ size: sizeGroupValue,
+ marshal: appendGroupValue,
+ unmarshal: consumeGroupValue,
+ isInit: isInitMessageValue,
+ merge: mergeMessageValue,
+}
+
+func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ num := fd.Number()
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeGroupType,
+ marshal: appendGroupType,
+ unmarshal: consumeGroupType,
+ merge: mergeMessage,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageInfo
+ }
+ return funcs
+ } else {
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return sizeGroup(m, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return appendGroup(b, m, f.wiretag, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft).Elem()
+ if mp.IsNil() {
+ mp.Set(reflect.New(ft.Elem()))
+ }
+ return consumeGroup(b, asMessage(mp), num, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ m := asMessage(p.AsValueOf(ft).Elem())
+ return proto.CheckInitialized(m)
+ },
+ merge: mergeMessage,
+ }
+ }
+}
+
+func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts)
+}
+
+func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts)
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ return b, err
+}
+
+func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ if p.Elem().IsNil() {
+ p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts)
+}
+
+func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int {
+ return 2*tagsize + proto.Size(m)
+}
+
+func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag) // start group
+ b, err := opts.Options().MarshalAppend(b, m)
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ return b, err
+}
+
+func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: m.ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeMessageSliceInfo,
+ marshal: appendMessageSliceInfo,
+ unmarshal: consumeMessageSliceInfo,
+ merge: mergeMessageSlice,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageSliceInfo
+ }
+ return funcs
+ }
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeMessageSlice(p, ft, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendMessageSlice(b, p, f.wiretag, ft, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ return consumeMessageSlice(b, p, ft, wtyp, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ return isInitMessageSlice(p, ft)
+ },
+ merge: mergeMessageSlice,
+ }
+}
+
+func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+ }
+ return n
+}
+
+func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ siz := f.mi.sizePointer(v, opts)
+ b = protowire.AppendVarint(b, uint64(siz))
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
+ mp := pointerOfIface(m)
+ o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(mp)
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error {
+ s := p.PointerSlice()
+ for _, v := range s {
+ if err := f.mi.checkInitializedPointer(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ }
+ return n
+}
+
+func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ b = protowire.AppendVarint(b, wiretag)
+ siz := proto.Size(m)
+ b = protowire.AppendVarint(b, uint64(siz))
+ b, err = opts.Options().MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ mp := reflect.New(goType.Elem())
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: asMessage(mp).ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(pointerOfValue(mp))
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func isInitMessageSlice(p pointer, goType reflect.Type) error {
+ s := p.PointerSlice()
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(goType.Elem()))
+ if err := proto.CheckInitialized(m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Slices of messages
+
+func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+ list := listv.List()
+ n := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ }
+ return n
+}
+
+func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ mopts := opts.Options()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ b = protowire.AppendVarint(b, wiretag)
+ siz := proto.Size(m)
+ b = protowire.AppendVarint(b, uint64(siz))
+ var err error
+ b, err = mopts.MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return pref.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return pref.Value{}, out, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: v,
+ Message: m.Message(),
+ })
+ if err != nil {
+ return pref.Value{}, out, err
+ }
+ list.Append(m)
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return listv, out, nil
+}
+
+func isInitMessageSliceValue(listv pref.Value) error {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ if err := proto.CheckInitialized(m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+var coderMessageSliceValue = valueCoderFuncs{
+ size: sizeMessageSliceValue,
+ marshal: appendMessageSliceValue,
+ unmarshal: consumeMessageSliceValue,
+ isInit: isInitMessageSliceValue,
+ merge: mergeMessageListValue,
+}
+
+func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int {
+ list := listv.List()
+ n := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ n += 2*tagsize + proto.Size(m)
+ }
+ return n
+}
+
+func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ list := listv.List()
+ mopts := opts.Options()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ m := list.Get(i).Message().Interface()
+ b = protowire.AppendVarint(b, wiretag) // start group
+ var err error
+ b, err = mopts.MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.StartGroupType {
+ return pref.Value{}, out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return pref.Value{}, out, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: m.Message(),
+ })
+ if err != nil {
+ return pref.Value{}, out, err
+ }
+ list.Append(m)
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return listv, out, nil
+}
+
+var coderGroupSliceValue = valueCoderFuncs{
+ size: sizeGroupSliceValue,
+ marshal: appendGroupSliceValue,
+ unmarshal: consumeGroupSliceValue,
+ isInit: isInitMessageSliceValue,
+ merge: mergeMessageListValue,
+}
+
+func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
+ num := fd.Number()
+ if mi := getMessageInfo(ft); mi != nil {
+ funcs := pointerCoderFuncs{
+ size: sizeGroupSliceInfo,
+ marshal: appendGroupSliceInfo,
+ unmarshal: consumeGroupSliceInfo,
+ merge: mergeMessageSlice,
+ }
+ if needsInitCheck(mi.Desc) {
+ funcs.isInit = isInitMessageSliceInfo
+ }
+ return funcs
+ }
+ return pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeGroupSlice(p, ft, f.tagsize, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendGroupSlice(b, p, f.wiretag, ft, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ return consumeGroupSlice(b, p, num, wtyp, ft, opts)
+ },
+ isInit: func(p pointer, f *coderFieldInfo) error {
+ return isInitMessageSlice(p, ft)
+ },
+ merge: mergeMessageSlice,
+ }
+}
+
+func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(messageType.Elem()))
+ n += 2*tagsize + proto.Size(m)
+ }
+ return n
+}
+
+func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ m := asMessage(v.AsValueOf(messageType.Elem()))
+ b = protowire.AppendVarint(b, wiretag) // start group
+ b, err = opts.Options().MarshalAppend(b, m)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeGroup(num, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ mp := reflect.New(goType.Elem())
+ o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{
+ Buf: b,
+ Message: asMessage(mp).ProtoReflect(),
+ })
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(pointerOfValue(mp))
+ out.n = n
+ out.initialized = o.Flags&piface.UnmarshalInitialized != 0
+ return out, nil
+}
+
+func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ s := p.PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+ }
+ return n
+}
+
+func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ if wtyp != protowire.StartGroupType {
+ return unmarshalOutput{}, errUnknown
+ }
+ m := reflect.New(f.mi.GoReflectType.Elem()).Interface()
+ mp := pointerOfIface(m)
+ out, err := f.mi.unmarshalPointer(b, mp, f.num, opts)
+ if err != nil {
+ return out, err
+ }
+ p.AppendPointerSlice(mp)
+ return out, nil
+}
+
+func asMessage(v reflect.Value) pref.ProtoMessage {
+ if m, ok := v.Interface().(pref.ProtoMessage); ok {
+ return m
+ }
+ return legacyWrapMessage(v).Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
new file mode 100644
index 00000000..ff198d0a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -0,0 +1,5637 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// sizeBool returns the size of wire encoding a bool pointer as a Bool.
+func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bool()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBool wire encodes a bool pointer as a Bool.
+func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bool()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+// consumeBool wire decodes a bool pointer as a Bool.
+func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Bool() = protowire.DecodeBool(v)
+ out.n = n
+ return out, nil
+}
+
+var coderBool = pointerCoderFuncs{
+ size: sizeBool,
+ marshal: appendBool,
+ unmarshal: consumeBool,
+ merge: mergeBool,
+}
+
+// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool.
+// The zero value is not encoded.
+func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bool()
+ if v == false {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBoolNoZero wire encodes a bool pointer as a Bool.
+// The zero value is not encoded.
+func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bool()
+ if v == false {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+var coderBoolNoZero = pointerCoderFuncs{
+ size: sizeBoolNoZero,
+ marshal: appendBoolNoZero,
+ unmarshal: consumeBool,
+ merge: mergeBoolNoZero,
+}
+
+// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool.
+// It panics if the pointer is nil.
+func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.BoolPtr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+}
+
+// appendBoolPtr wire encodes a *bool pointer as a Bool.
+// It panics if the pointer is nil.
+func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.BoolPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ return b, nil
+}
+
+// consumeBoolPtr wire decodes a *bool pointer as a Bool.
+func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.BoolPtr()
+ if *vp == nil {
+ *vp = new(bool)
+ }
+ **vp = protowire.DecodeBool(v)
+ out.n = n
+ return out, nil
+}
+
+var coderBoolPtr = pointerCoderFuncs{
+ size: sizeBoolPtr,
+ marshal: appendBoolPtr,
+ unmarshal: consumeBoolPtr,
+ merge: mergeBoolPtr,
+}
+
+// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool.
+func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.BoolSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ return size
+}
+
+// appendBoolSlice encodes a []bool pointer as a repeated Bool.
+func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BoolSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ }
+ return b, nil
+}
+
+// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool.
+func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BoolSlice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, protowire.DecodeBool(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, protowire.DecodeBool(v))
+ out.n = n
+ return out, nil
+}
+
+var coderBoolSlice = pointerCoderFuncs{
+ size: sizeBoolSlice,
+ marshal: appendBoolSlice,
+ unmarshal: consumeBoolSlice,
+ merge: mergeBoolSlice,
+}
+
+// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool.
+func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.BoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool.
+func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeBool(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v))
+ }
+ return b, nil
+}
+
+var coderBoolPackedSlice = pointerCoderFuncs{
+ size: sizeBoolPackedSlice,
+ marshal: appendBoolPackedSlice,
+ unmarshal: consumeBoolSlice,
+ merge: mergeBoolSlice,
+}
+
+// sizeBoolValue returns the size of wire encoding a bool value as a Bool.
+func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+}
+
+// appendBoolValue encodes a bool value as a Bool.
+func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ return b, nil
+}
+
+// consumeBoolValue decodes a bool value as a Bool.
+func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil
+}
+
+var coderBoolValue = valueCoderFuncs{
+ size: sizeBoolValue,
+ marshal: appendBoolValue,
+ unmarshal: consumeBoolValue,
+ merge: mergeScalarValue,
+}
+
+// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool.
+func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ return size
+}
+
+// appendBoolSliceValue encodes a []bool value as a repeated Bool.
+func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ }
+ return b, nil
+}
+
+// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool.
+func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderBoolSliceValue = valueCoderFuncs{
+ size: sizeBoolSliceValue,
+ marshal: appendBoolSliceValue,
+ unmarshal: consumeBoolSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool.
+func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool.
+func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ }
+ return b, nil
+}
+
+var coderBoolPackedSliceValue = valueCoderFuncs{
+ size: sizeBoolPackedSliceValue,
+ marshal: appendBoolPackedSliceValue,
+ unmarshal: consumeBoolSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeEnumValue returns the size of wire encoding a value as a Enum.
+func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(v.Enum()))
+}
+
+// appendEnumValue encodes a value as a Enum.
+func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ return b, nil
+}
+
+// consumeEnumValue decodes a value as a Enum.
+func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil
+}
+
+var coderEnumValue = valueCoderFuncs{
+ size: sizeEnumValue,
+ marshal: appendEnumValue,
+ unmarshal: consumeEnumValue,
+ merge: mergeScalarValue,
+}
+
+// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum.
+func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(v.Enum()))
+ }
+ return size
+}
+
+// appendEnumSliceValue encodes a [] value as a repeated Enum.
+func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ }
+ return b, nil
+}
+
+// consumeEnumSliceValue wire decodes a [] value as a repeated Enum.
+func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderEnumSliceValue = valueCoderFuncs{
+ size: sizeEnumSliceValue,
+ marshal: appendEnumSliceValue,
+ unmarshal: consumeEnumSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum.
+func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Enum()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum.
+func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Enum()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ }
+ return b, nil
+}
+
+var coderEnumPackedSliceValue = valueCoderFuncs{
+ size: sizeEnumPackedSliceValue,
+ marshal: appendEnumPackedSliceValue,
+ unmarshal: consumeEnumSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32.
+func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32 wire encodes a int32 pointer as a Int32.
+func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt32 wire decodes a int32 pointer as a Int32.
+func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int32() = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt32 = pointerCoderFuncs{
+ size: sizeInt32,
+ marshal: appendInt32,
+ unmarshal: consumeInt32,
+ merge: mergeInt32,
+}
+
+// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32.
+// The zero value is not encoded.
+func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32NoZero wire encodes a int32 pointer as a Int32.
+// The zero value is not encoded.
+func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderInt32NoZero = pointerCoderFuncs{
+ size: sizeInt32NoZero,
+ marshal: appendInt32NoZero,
+ unmarshal: consumeInt32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32.
+// It panics if the pointer is nil.
+func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int32Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt32Ptr wire encodes a *int32 pointer as a Int32.
+// It panics if the pointer is nil.
+func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt32Ptr wire decodes a *int32 pointer as a Int32.
+func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt32Ptr = pointerCoderFuncs{
+ size: sizeInt32Ptr,
+ marshal: appendInt32Ptr,
+ unmarshal: consumeInt32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32.
+func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendInt32Slice encodes a []int32 pointer as a repeated Int32.
+func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32.
+func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderInt32Slice = pointerCoderFuncs{
+ size: sizeInt32Slice,
+ marshal: appendInt32Slice,
+ unmarshal: consumeInt32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32.
+func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32.
+func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderInt32PackedSlice = pointerCoderFuncs{
+ size: sizeInt32PackedSlice,
+ marshal: appendInt32PackedSlice,
+ unmarshal: consumeInt32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeInt32Value returns the size of wire encoding a int32 value as a Int32.
+func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
+}
+
+// appendInt32Value encodes a int32 value as a Int32.
+func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ return b, nil
+}
+
+// consumeInt32Value decodes a int32 value as a Int32.
+func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(v)), out, nil
+}
+
+var coderInt32Value = valueCoderFuncs{
+ size: sizeInt32Value,
+ marshal: appendInt32Value,
+ unmarshal: consumeInt32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32.
+func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ return size
+}
+
+// appendInt32SliceValue encodes a []int32 value as a repeated Int32.
+func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ }
+ return b, nil
+}
+
+// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32.
+func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderInt32SliceValue = valueCoderFuncs{
+ size: sizeInt32SliceValue,
+ marshal: appendInt32SliceValue,
+ unmarshal: consumeInt32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32.
+func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32.
+func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(int32(v.Int())))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ }
+ return b, nil
+}
+
+var coderInt32PackedSliceValue = valueCoderFuncs{
+ size: sizeInt32PackedSliceValue,
+ marshal: appendInt32PackedSliceValue,
+ unmarshal: consumeInt32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32.
+func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32 wire encodes a int32 pointer as a Sint32.
+func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+// consumeSint32 wire decodes a int32 pointer as a Sint32.
+func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32 = pointerCoderFuncs{
+ size: sizeSint32,
+ marshal: appendSint32,
+ unmarshal: consumeSint32,
+ merge: mergeInt32,
+}
+
+// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32.
+// The zero value is not encoded.
+func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32NoZero wire encodes a int32 pointer as a Sint32.
+// The zero value is not encoded.
+func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+var coderSint32NoZero = pointerCoderFuncs{
+ size: sizeSint32NoZero,
+ marshal: appendSint32NoZero,
+ unmarshal: consumeSint32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32.
+// It panics if the pointer is nil.
+func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int32Ptr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+}
+
+// appendSint32Ptr wire encodes a *int32 pointer as a Sint32.
+// It panics if the pointer is nil.
+func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ return b, nil
+}
+
+// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32.
+func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32Ptr = pointerCoderFuncs{
+ size: sizeSint32Ptr,
+ marshal: appendSint32Ptr,
+ unmarshal: consumeSint32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32.
+func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ return size
+}
+
+// appendSint32Slice encodes a []int32 pointer as a repeated Sint32.
+func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ }
+ return b, nil
+}
+
+// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32.
+func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32)))
+ out.n = n
+ return out, nil
+}
+
+var coderSint32Slice = pointerCoderFuncs{
+ size: sizeSint32Slice,
+ marshal: appendSint32Slice,
+ unmarshal: consumeSint32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32.
+func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32.
+func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v)))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v)))
+ }
+ return b, nil
+}
+
+var coderSint32PackedSlice = pointerCoderFuncs{
+ size: sizeSint32PackedSlice,
+ marshal: appendSint32PackedSlice,
+ unmarshal: consumeSint32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32.
+func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+}
+
+// appendSint32Value encodes a int32 value as a Sint32.
+func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ return b, nil
+}
+
+// consumeSint32Value decodes a int32 value as a Sint32.
+func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil
+}
+
+var coderSint32Value = valueCoderFuncs{
+ size: sizeSint32Value,
+ marshal: appendSint32Value,
+ unmarshal: consumeSint32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32.
+func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return size
+}
+
+// appendSint32SliceValue encodes a []int32 value as a repeated Sint32.
+func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return b, nil
+}
+
+// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32.
+func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSint32SliceValue = valueCoderFuncs{
+ size: sizeSint32SliceValue,
+ marshal: appendSint32SliceValue,
+ unmarshal: consumeSint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32.
+func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32.
+func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ }
+ return b, nil
+}
+
+var coderSint32PackedSliceValue = valueCoderFuncs{
+ size: sizeSint32PackedSliceValue,
+ marshal: appendSint32PackedSliceValue,
+ unmarshal: consumeSint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32.
+func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint32()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32 wire encodes a uint32 pointer as a Uint32.
+func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeUint32 wire decodes a uint32 pointer as a Uint32.
+func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint32() = uint32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint32 = pointerCoderFuncs{
+ size: sizeUint32,
+ marshal: appendUint32,
+ unmarshal: consumeUint32,
+ merge: mergeUint32,
+}
+
+// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32.
+// The zero value is not encoded.
+func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32NoZero wire encodes a uint32 pointer as a Uint32.
+// The zero value is not encoded.
+func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderUint32NoZero = pointerCoderFuncs{
+ size: sizeUint32NoZero,
+ marshal: appendUint32NoZero,
+ unmarshal: consumeUint32,
+ merge: mergeUint32NoZero,
+}
+
+// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32.
+// It panics if the pointer is nil.
+func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Uint32Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32.
+// It panics if the pointer is nil.
+func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32.
+func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint32Ptr()
+ if *vp == nil {
+ *vp = new(uint32)
+ }
+ **vp = uint32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint32Ptr = pointerCoderFuncs{
+ size: sizeUint32Ptr,
+ marshal: appendUint32Ptr,
+ unmarshal: consumeUint32Ptr,
+ merge: mergeUint32Ptr,
+}
+
+// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32.
+func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32.
+func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32.
+func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, uint32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, uint32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderUint32Slice = pointerCoderFuncs{
+ size: sizeUint32Slice,
+ marshal: appendUint32Slice,
+ unmarshal: consumeUint32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32.
+func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32.
+func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderUint32PackedSlice = pointerCoderFuncs{
+ size: sizeUint32PackedSlice,
+ marshal: appendUint32PackedSlice,
+ unmarshal: consumeUint32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32.
+func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
+}
+
+// appendUint32Value encodes a uint32 value as a Uint32.
+func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ return b, nil
+}
+
+// consumeUint32Value decodes a uint32 value as a Uint32.
+func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint32(uint32(v)), out, nil
+}
+
+var coderUint32Value = valueCoderFuncs{
+ size: sizeUint32Value,
+ marshal: appendUint32Value,
+ unmarshal: consumeUint32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32.
+func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ return size
+}
+
+// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32.
+func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ }
+ return b, nil
+}
+
+// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32.
+func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderUint32SliceValue = valueCoderFuncs{
+ size: sizeUint32SliceValue,
+ marshal: appendUint32SliceValue,
+ unmarshal: consumeUint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32.
+func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32.
+func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(uint32(v.Uint())))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ }
+ return b, nil
+}
+
+var coderUint32PackedSliceValue = valueCoderFuncs{
+ size: sizeUint32PackedSliceValue,
+ marshal: appendUint32PackedSliceValue,
+ unmarshal: consumeUint32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64.
+func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64 wire encodes a int64 pointer as a Int64.
+func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt64 wire decodes a int64 pointer as a Int64.
+func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int64() = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt64 = pointerCoderFuncs{
+ size: sizeInt64,
+ marshal: appendInt64,
+ unmarshal: consumeInt64,
+ merge: mergeInt64,
+}
+
+// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64.
+// The zero value is not encoded.
+func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64NoZero wire encodes a int64 pointer as a Int64.
+// The zero value is not encoded.
+func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+var coderInt64NoZero = pointerCoderFuncs{
+ size: sizeInt64NoZero,
+ marshal: appendInt64NoZero,
+ unmarshal: consumeInt64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64.
+// It panics if the pointer is nil.
+func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int64Ptr()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+// appendInt64Ptr wire encodes a *int64 pointer as a Int64.
+// It panics if the pointer is nil.
+func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+// consumeInt64Ptr wire decodes a *int64 pointer as a Int64.
+func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderInt64Ptr = pointerCoderFuncs{
+ size: sizeInt64Ptr,
+ marshal: appendInt64Ptr,
+ unmarshal: consumeInt64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64.
+func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(uint64(v))
+ }
+ return size
+}
+
+// appendInt64Slice encodes a []int64 pointer as a repeated Int64.
+func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64.
+func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int64(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int64(v))
+ out.n = n
+ return out, nil
+}
+
+var coderInt64Slice = pointerCoderFuncs{
+ size: sizeInt64Slice,
+ marshal: appendInt64Slice,
+ unmarshal: consumeInt64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64.
+func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64.
+func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(uint64(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderInt64PackedSlice = pointerCoderFuncs{
+ size: sizeInt64PackedSlice,
+ marshal: appendInt64PackedSlice,
+ unmarshal: consumeInt64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeInt64Value returns the size of wire encoding a int64 value as a Int64.
+func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(uint64(v.Int()))
+}
+
+// appendInt64Value encodes a int64 value as a Int64.
+func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ return b, nil
+}
+
+// consumeInt64Value decodes a int64 value as a Int64.
+func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(int64(v)), out, nil
+}
+
+var coderInt64Value = valueCoderFuncs{
+ size: sizeInt64Value,
+ marshal: appendInt64Value,
+ unmarshal: consumeInt64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64.
+func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(uint64(v.Int()))
+ }
+ return size
+}
+
+// appendInt64SliceValue encodes a []int64 value as a repeated Int64.
+func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64.
+func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderInt64SliceValue = valueCoderFuncs{
+ size: sizeInt64SliceValue,
+ marshal: appendInt64SliceValue,
+ unmarshal: consumeInt64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64.
+func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Int()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64.
+func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(uint64(v.Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+var coderInt64PackedSliceValue = valueCoderFuncs{
+ size: sizeInt64PackedSliceValue,
+ marshal: appendInt64PackedSliceValue,
+ unmarshal: consumeInt64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64.
+func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64 wire encodes a int64 pointer as a Sint64.
+func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+// consumeSint64 wire decodes a int64 pointer as a Sint64.
+func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int64() = protowire.DecodeZigZag(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSint64 = pointerCoderFuncs{
+ size: sizeSint64,
+ marshal: appendSint64,
+ unmarshal: consumeSint64,
+ merge: mergeInt64,
+}
+
+// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64.
+// The zero value is not encoded.
+func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64NoZero wire encodes a int64 pointer as a Sint64.
+// The zero value is not encoded.
+func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+var coderSint64NoZero = pointerCoderFuncs{
+ size: sizeSint64NoZero,
+ marshal: appendSint64NoZero,
+ unmarshal: consumeSint64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64.
+// It panics if the pointer is nil.
+func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Int64Ptr()
+ return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+}
+
+// appendSint64Ptr wire encodes a *int64 pointer as a Sint64.
+// It panics if the pointer is nil.
+func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ return b, nil
+}
+
+// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64.
+func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = protowire.DecodeZigZag(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSint64Ptr = pointerCoderFuncs{
+ size: sizeSint64Ptr,
+ marshal: appendSint64Ptr,
+ unmarshal: consumeSint64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64.
+func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ return size
+}
+
+// appendSint64Slice encodes a []int64 pointer as a repeated Sint64.
+func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ }
+ return b, nil
+}
+
+// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64.
+func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, protowire.DecodeZigZag(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, protowire.DecodeZigZag(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSint64Slice = pointerCoderFuncs{
+ size: sizeSint64Slice,
+ marshal: appendSint64Slice,
+ unmarshal: consumeSint64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64.
+func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64.
+func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v))
+ }
+ return b, nil
+}
+
+var coderSint64PackedSlice = pointerCoderFuncs{
+ size: sizeSint64PackedSlice,
+ marshal: appendSint64PackedSlice,
+ unmarshal: consumeSint64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64.
+func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+}
+
+// appendSint64Value encodes a int64 value as a Sint64.
+func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ return b, nil
+}
+
+// consumeSint64Value decodes a int64 value as a Sint64.
+func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil
+}
+
+var coderSint64Value = valueCoderFuncs{
+ size: sizeSint64Value,
+ marshal: appendSint64Value,
+ unmarshal: consumeSint64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64.
+func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ return size
+}
+
+// appendSint64SliceValue encodes a []int64 value as a repeated Sint64.
+func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64.
+func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSint64SliceValue = valueCoderFuncs{
+ size: sizeSint64SliceValue,
+ marshal: appendSint64SliceValue,
+ unmarshal: consumeSint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64.
+func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64.
+func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSint64PackedSliceValue = valueCoderFuncs{
+ size: sizeSint64PackedSliceValue,
+ marshal: appendSint64PackedSliceValue,
+ unmarshal: consumeSint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64.
+func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint64()
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64 wire encodes a uint64 pointer as a Uint64.
+func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+// consumeUint64 wire decodes a uint64 pointer as a Uint64.
+func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint64() = v
+ out.n = n
+ return out, nil
+}
+
+var coderUint64 = pointerCoderFuncs{
+ size: sizeUint64,
+ marshal: appendUint64,
+ unmarshal: consumeUint64,
+ merge: mergeUint64,
+}
+
+// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64.
+// The zero value is not encoded.
+func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64NoZero wire encodes a uint64 pointer as a Uint64.
+// The zero value is not encoded.
+func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+var coderUint64NoZero = pointerCoderFuncs{
+ size: sizeUint64NoZero,
+ marshal: appendUint64NoZero,
+ unmarshal: consumeUint64,
+ merge: mergeUint64NoZero,
+}
+
+// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64.
+// It panics if the pointer is nil.
+func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.Uint64Ptr()
+ return f.tagsize + protowire.SizeVarint(v)
+}
+
+// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64.
+// It panics if the pointer is nil.
+func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ return b, nil
+}
+
+// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64.
+func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint64Ptr()
+ if *vp == nil {
+ *vp = new(uint64)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderUint64Ptr = pointerCoderFuncs{
+ size: sizeUint64Ptr,
+ marshal: appendUint64Ptr,
+ unmarshal: consumeUint64Ptr,
+ merge: mergeUint64Ptr,
+}
+
+// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64.
+func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeVarint(v)
+ }
+ return size
+}
+
+// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64.
+func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, v)
+ }
+ return b, nil
+}
+
+// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64.
+func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderUint64Slice = pointerCoderFuncs{
+ size: sizeUint64Slice,
+ marshal: appendUint64Slice,
+ unmarshal: consumeUint64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64.
+func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(v)
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64.
+func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeVarint(v)
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendVarint(b, v)
+ }
+ return b, nil
+}
+
+var coderUint64PackedSlice = pointerCoderFuncs{
+ size: sizeUint64PackedSlice,
+ marshal: appendUint64PackedSlice,
+ unmarshal: consumeUint64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64.
+func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeVarint(v.Uint())
+}
+
+// appendUint64Value encodes a uint64 value as a Uint64.
+func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, v.Uint())
+ return b, nil
+}
+
+// consumeUint64Value decodes a uint64 value as a Uint64.
+func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint64(v), out, nil
+}
+
+var coderUint64Value = valueCoderFuncs{
+ size: sizeUint64Value,
+ marshal: appendUint64Value,
+ unmarshal: consumeUint64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64.
+func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeVarint(v.Uint())
+ }
+ return size
+}
+
+// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64.
+func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendVarint(b, v.Uint())
+ }
+ return b, nil
+}
+
+// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64.
+func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ var v uint64
+ var n int
+ if len(b) >= 1 && b[0] < 0x80 {
+ v = uint64(b[0])
+ n = 1
+ } else if len(b) >= 2 && b[1] < 128 {
+ v = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ n = 2
+ } else {
+ v, n = protowire.ConsumeVarint(b)
+ }
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderUint64SliceValue = valueCoderFuncs{
+ size: sizeUint64SliceValue,
+ marshal: appendUint64SliceValue,
+ unmarshal: consumeUint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64.
+func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i, llen := 0, llen; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(v.Uint())
+ }
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64.
+func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ n += protowire.SizeVarint(v.Uint())
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, v.Uint())
+ }
+ return b, nil
+}
+
+var coderUint64PackedSliceValue = valueCoderFuncs{
+ size: sizeUint64PackedSliceValue,
+ marshal: appendUint64PackedSliceValue,
+ unmarshal: consumeUint64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32.
+func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32 wire encodes a int32 pointer as a Sfixed32.
+func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32.
+func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int32() = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32 = pointerCoderFuncs{
+ size: sizeSfixed32,
+ marshal: appendSfixed32,
+ unmarshal: consumeSfixed32,
+ merge: mergeInt32,
+}
+
+// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32.
+// The zero value is not encoded.
+func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32.
+// The zero value is not encoded.
+func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+var coderSfixed32NoZero = pointerCoderFuncs{
+ size: sizeSfixed32NoZero,
+ marshal: appendSfixed32NoZero,
+ unmarshal: consumeSfixed32,
+ merge: mergeInt32NoZero,
+}
+
+// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32.
+// It panics if the pointer is nil.
+func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32.
+// It panics if the pointer is nil.
+func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ return b, nil
+}
+
+// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32.
+func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int32Ptr()
+ if *vp == nil {
+ *vp = new(int32)
+ }
+ **vp = int32(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32Ptr = pointerCoderFuncs{
+ size: sizeSfixed32Ptr,
+ marshal: appendSfixed32Ptr,
+ unmarshal: consumeSfixed32Ptr,
+ merge: mergeInt32Ptr,
+}
+
+// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32.
+func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32.
+func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+
+// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32.
+func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int32(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int32(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed32Slice = pointerCoderFuncs{
+ size: sizeSfixed32Slice,
+ marshal: appendSfixed32Slice,
+ unmarshal: consumeSfixed32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32.
+func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32.
+func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+
+var coderSfixed32PackedSlice = pointerCoderFuncs{
+ size: sizeSfixed32PackedSlice,
+ marshal: appendSfixed32PackedSlice,
+ unmarshal: consumeSfixed32Slice,
+ merge: mergeInt32Slice,
+}
+
+// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32.
+func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendSfixed32Value encodes a int32 value as a Sfixed32.
+func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ return b, nil
+}
+
+// consumeSfixed32Value decodes a int32 value as a Sfixed32.
+func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt32(int32(v)), out, nil
+}
+
+var coderSfixed32Value = valueCoderFuncs{
+ size: sizeSfixed32Value,
+ marshal: appendSfixed32Value,
+ unmarshal: consumeSfixed32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32.
+func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32.
+func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32.
+func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSfixed32SliceValue = valueCoderFuncs{
+ size: sizeSfixed32SliceValue,
+ marshal: appendSfixed32SliceValue,
+ unmarshal: consumeSfixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32.
+func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32.
+func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSfixed32PackedSliceValue = valueCoderFuncs{
+ size: sizeSfixed32PackedSliceValue,
+ marshal: appendSfixed32PackedSliceValue,
+ unmarshal: consumeSfixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32.
+func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32 wire encodes a uint32 pointer as a Fixed32.
+func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+// consumeFixed32 wire decodes a uint32 pointer as a Fixed32.
+func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint32() = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32 = pointerCoderFuncs{
+ size: sizeFixed32,
+ marshal: appendFixed32,
+ unmarshal: consumeFixed32,
+ merge: mergeUint32,
+}
+
+// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32.
+// The zero value is not encoded.
+func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint32()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32.
+// The zero value is not encoded.
+func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+var coderFixed32NoZero = pointerCoderFuncs{
+ size: sizeFixed32NoZero,
+ marshal: appendFixed32NoZero,
+ unmarshal: consumeFixed32,
+ merge: mergeUint32NoZero,
+}
+
+// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32.
+// It panics if the pointer is nil.
+func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32.
+// It panics if the pointer is nil.
+func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ return b, nil
+}
+
+// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32.
+func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint32Ptr()
+ if *vp == nil {
+ *vp = new(uint32)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32Ptr = pointerCoderFuncs{
+ size: sizeFixed32Ptr,
+ marshal: appendFixed32Ptr,
+ unmarshal: consumeFixed32Ptr,
+ merge: mergeUint32Ptr,
+}
+
+// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32.
+func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32.
+func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, v)
+ }
+ return b, nil
+}
+
+// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32.
+func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderFixed32Slice = pointerCoderFuncs{
+ size: sizeFixed32Slice,
+ marshal: appendFixed32Slice,
+ unmarshal: consumeFixed32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32.
+func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32.
+func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, v)
+ }
+ return b, nil
+}
+
+var coderFixed32PackedSlice = pointerCoderFuncs{
+ size: sizeFixed32PackedSlice,
+ marshal: appendFixed32PackedSlice,
+ unmarshal: consumeFixed32Slice,
+ merge: mergeUint32Slice,
+}
+
+// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32.
+func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendFixed32Value encodes a uint32 value as a Fixed32.
+func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ return b, nil
+}
+
+// consumeFixed32Value decodes a uint32 value as a Fixed32.
+func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint32(uint32(v)), out, nil
+}
+
+var coderFixed32Value = valueCoderFuncs{
+ size: sizeFixed32Value,
+ marshal: appendFixed32Value,
+ unmarshal: consumeFixed32Value,
+ merge: mergeScalarValue,
+}
+
+// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32.
+func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32.
+func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ }
+ return b, nil
+}
+
+// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32.
+func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFixed32SliceValue = valueCoderFuncs{
+ size: sizeFixed32SliceValue,
+ marshal: appendFixed32SliceValue,
+ unmarshal: consumeFixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32.
+func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32.
+func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ }
+ return b, nil
+}
+
+var coderFixed32PackedSliceValue = valueCoderFuncs{
+ size: sizeFixed32PackedSliceValue,
+ marshal: appendFixed32PackedSliceValue,
+ unmarshal: consumeFixed32SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFloat returns the size of wire encoding a float32 pointer as a Float.
+func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloat wire encodes a float32 pointer as a Float.
+func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float32()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+// consumeFloat wire decodes a float32 pointer as a Float.
+func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Float32() = math.Float32frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderFloat = pointerCoderFuncs{
+ size: sizeFloat,
+ marshal: appendFloat,
+ unmarshal: consumeFloat,
+ merge: mergeFloat32,
+}
+
+// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float.
+// The zero value is not encoded.
+func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Float32()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatNoZero wire encodes a float32 pointer as a Float.
+// The zero value is not encoded.
+func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float32()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+var coderFloatNoZero = pointerCoderFuncs{
+ size: sizeFloatNoZero,
+ marshal: appendFloatNoZero,
+ unmarshal: consumeFloat,
+ merge: mergeFloat32NoZero,
+}
+
+// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float.
+// It panics if the pointer is nil.
+func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatPtr wire encodes a *float32 pointer as a Float.
+// It panics if the pointer is nil.
+func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Float32Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ return b, nil
+}
+
+// consumeFloatPtr wire decodes a *float32 pointer as a Float.
+func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Float32Ptr()
+ if *vp == nil {
+ *vp = new(float32)
+ }
+ **vp = math.Float32frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderFloatPtr = pointerCoderFuncs{
+ size: sizeFloatPtr,
+ marshal: appendFloatPtr,
+ unmarshal: consumeFloatPtr,
+ merge: mergeFloat32Ptr,
+}
+
+// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float.
+func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float32Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFloatSlice encodes a []float32 pointer as a repeated Float.
+func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float32Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+
+// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float.
+func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Float32Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, math.Float32frombits(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, math.Float32frombits(v))
+ out.n = n
+ return out, nil
+}
+
+var coderFloatSlice = pointerCoderFuncs{
+ size: sizeFloatSlice,
+ marshal: appendFloatSlice,
+ unmarshal: consumeFloatSlice,
+ merge: mergeFloat32Slice,
+}
+
+// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float.
+func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed32()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float.
+func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+
+var coderFloatPackedSlice = pointerCoderFuncs{
+ size: sizeFloatPackedSlice,
+ marshal: appendFloatPackedSlice,
+ unmarshal: consumeFloatSlice,
+ merge: mergeFloat32Slice,
+}
+
+// sizeFloatValue returns the size of wire encoding a float32 value as a Float.
+func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed32()
+}
+
+// appendFloatValue encodes a float32 value as a Float.
+func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ return b, nil
+}
+
+// consumeFloatValue decodes a float32 value as a Float.
+func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil
+}
+
+var coderFloatValue = valueCoderFuncs{
+ size: sizeFloatValue,
+ marshal: appendFloatValue,
+ unmarshal: consumeFloatValue,
+ merge: mergeScalarValue,
+}
+
+// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float.
+func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed32())
+ return size
+}
+
+// appendFloatSliceValue encodes a []float32 value as a repeated Float.
+func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ }
+ return b, nil
+}
+
+// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float.
+func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFloatSliceValue = valueCoderFuncs{
+ size: sizeFloatSliceValue,
+ marshal: appendFloatSliceValue,
+ unmarshal: consumeFloatSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float.
+func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed32()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float.
+func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed32()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ }
+ return b, nil
+}
+
+var coderFloatPackedSliceValue = valueCoderFuncs{
+ size: sizeFloatPackedSliceValue,
+ marshal: appendFloatPackedSliceValue,
+ unmarshal: consumeFloatSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64.
+func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64 wire encodes a int64 pointer as a Sfixed64.
+func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64.
+func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Int64() = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64 = pointerCoderFuncs{
+ size: sizeSfixed64,
+ marshal: appendSfixed64,
+ unmarshal: consumeSfixed64,
+ merge: mergeInt64,
+}
+
+// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64.
+// The zero value is not encoded.
+func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Int64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64.
+// The zero value is not encoded.
+func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Int64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+var coderSfixed64NoZero = pointerCoderFuncs{
+ size: sizeSfixed64NoZero,
+ marshal: appendSfixed64NoZero,
+ unmarshal: consumeSfixed64,
+ merge: mergeInt64NoZero,
+}
+
+// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64.
+// It panics if the pointer is nil.
+func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64.
+// It panics if the pointer is nil.
+func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Int64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ return b, nil
+}
+
+// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64.
+func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Int64Ptr()
+ if *vp == nil {
+ *vp = new(int64)
+ }
+ **vp = int64(v)
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64Ptr = pointerCoderFuncs{
+ size: sizeSfixed64Ptr,
+ marshal: appendSfixed64Ptr,
+ unmarshal: consumeSfixed64Ptr,
+ merge: mergeInt64Ptr,
+}
+
+// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64.
+func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64.
+func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+
+// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64.
+func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Int64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, int64(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, int64(v))
+ out.n = n
+ return out, nil
+}
+
+var coderSfixed64Slice = pointerCoderFuncs{
+ size: sizeSfixed64Slice,
+ marshal: appendSfixed64Slice,
+ unmarshal: consumeSfixed64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64.
+func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64.
+func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Int64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+
+var coderSfixed64PackedSlice = pointerCoderFuncs{
+ size: sizeSfixed64PackedSlice,
+ marshal: appendSfixed64PackedSlice,
+ unmarshal: consumeSfixed64Slice,
+ merge: mergeInt64Slice,
+}
+
+// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64.
+func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendSfixed64Value encodes a int64 value as a Sfixed64.
+func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ return b, nil
+}
+
+// consumeSfixed64Value decodes a int64 value as a Sfixed64.
+func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfInt64(int64(v)), out, nil
+}
+
+var coderSfixed64Value = valueCoderFuncs{
+ size: sizeSfixed64Value,
+ marshal: appendSfixed64Value,
+ unmarshal: consumeSfixed64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64.
+func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64.
+func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64.
+func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderSfixed64SliceValue = valueCoderFuncs{
+ size: sizeSfixed64SliceValue,
+ marshal: appendSfixed64SliceValue,
+ unmarshal: consumeSfixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64.
+func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64.
+func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ }
+ return b, nil
+}
+
+var coderSfixed64PackedSliceValue = valueCoderFuncs{
+ size: sizeSfixed64PackedSliceValue,
+ marshal: appendSfixed64PackedSliceValue,
+ unmarshal: consumeSfixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64.
+func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64 wire encodes a uint64 pointer as a Fixed64.
+func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+// consumeFixed64 wire decodes a uint64 pointer as a Fixed64.
+func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Uint64() = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64 = pointerCoderFuncs{
+ size: sizeFixed64,
+ marshal: appendFixed64,
+ unmarshal: consumeFixed64,
+ merge: mergeUint64,
+}
+
+// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64.
+// The zero value is not encoded.
+func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Uint64()
+ if v == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64.
+// The zero value is not encoded.
+func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Uint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+var coderFixed64NoZero = pointerCoderFuncs{
+ size: sizeFixed64NoZero,
+ marshal: appendFixed64NoZero,
+ unmarshal: consumeFixed64,
+ merge: mergeUint64NoZero,
+}
+
+// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64.
+// It panics if the pointer is nil.
+func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64.
+// It panics if the pointer is nil.
+func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Uint64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ return b, nil
+}
+
+// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64.
+func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Uint64Ptr()
+ if *vp == nil {
+ *vp = new(uint64)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64Ptr = pointerCoderFuncs{
+ size: sizeFixed64Ptr,
+ marshal: appendFixed64Ptr,
+ unmarshal: consumeFixed64Ptr,
+ merge: mergeUint64Ptr,
+}
+
+// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64.
+func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64.
+func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, v)
+ }
+ return b, nil
+}
+
+// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64.
+func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Uint64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, v)
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderFixed64Slice = pointerCoderFuncs{
+ size: sizeFixed64Slice,
+ marshal: appendFixed64Slice,
+ unmarshal: consumeFixed64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64.
+func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64.
+func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Uint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, v)
+ }
+ return b, nil
+}
+
+var coderFixed64PackedSlice = pointerCoderFuncs{
+ size: sizeFixed64PackedSlice,
+ marshal: appendFixed64PackedSlice,
+ unmarshal: consumeFixed64Slice,
+ merge: mergeUint64Slice,
+}
+
+// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64.
+func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendFixed64Value encodes a uint64 value as a Fixed64.
+func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, v.Uint())
+ return b, nil
+}
+
+// consumeFixed64Value decodes a uint64 value as a Fixed64.
+func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfUint64(v), out, nil
+}
+
+var coderFixed64Value = valueCoderFuncs{
+ size: sizeFixed64Value,
+ marshal: appendFixed64Value,
+ unmarshal: consumeFixed64Value,
+ merge: mergeScalarValue,
+}
+
+// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64.
+func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64.
+func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, v.Uint())
+ }
+ return b, nil
+}
+
+// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64.
+func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderFixed64SliceValue = valueCoderFuncs{
+ size: sizeFixed64SliceValue,
+ marshal: appendFixed64SliceValue,
+ unmarshal: consumeFixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64.
+func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64.
+func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, v.Uint())
+ }
+ return b, nil
+}
+
+var coderFixed64PackedSliceValue = valueCoderFuncs{
+ size: sizeFixed64PackedSliceValue,
+ marshal: appendFixed64PackedSliceValue,
+ unmarshal: consumeFixed64SliceValue,
+ merge: mergeListValue,
+}
+
+// sizeDouble returns the size of wire encoding a float64 pointer as a Double.
+func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDouble wire encodes a float64 pointer as a Double.
+func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float64()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+// consumeDouble wire decodes a float64 pointer as a Double.
+func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Float64() = math.Float64frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderDouble = pointerCoderFuncs{
+ size: sizeDouble,
+ marshal: appendDouble,
+ unmarshal: consumeDouble,
+ merge: mergeFloat64,
+}
+
+// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double.
+// The zero value is not encoded.
+func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Float64()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return 0
+ }
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDoubleNoZero wire encodes a float64 pointer as a Double.
+// The zero value is not encoded.
+func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Float64()
+ if v == 0 && !math.Signbit(float64(v)) {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+var coderDoubleNoZero = pointerCoderFuncs{
+ size: sizeDoubleNoZero,
+ marshal: appendDoubleNoZero,
+ unmarshal: consumeDouble,
+ merge: mergeFloat64NoZero,
+}
+
+// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double.
+// It panics if the pointer is nil.
+func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ return f.tagsize + protowire.SizeFixed64()
+}
+
+// appendDoublePtr wire encodes a *float64 pointer as a Double.
+// It panics if the pointer is nil.
+func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.Float64Ptr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ return b, nil
+}
+
+// consumeDoublePtr wire decodes a *float64 pointer as a Double.
+func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.Float64Ptr()
+ if *vp == nil {
+ *vp = new(float64)
+ }
+ **vp = math.Float64frombits(v)
+ out.n = n
+ return out, nil
+}
+
+var coderDoublePtr = pointerCoderFuncs{
+ size: sizeDoublePtr,
+ marshal: appendDoublePtr,
+ unmarshal: consumeDoublePtr,
+ merge: mergeFloat64Ptr,
+}
+
+// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double.
+func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float64Slice()
+ size = len(s) * (f.tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendDoubleSlice encodes a []float64 pointer as a repeated Double.
+func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float64Slice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+
+// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double.
+func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.Float64Slice()
+ if wtyp == protowire.BytesType {
+ s := *sp
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ s = append(s, math.Float64frombits(v))
+ b = b[n:]
+ }
+ *sp = s
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, math.Float64frombits(v))
+ out.n = n
+ return out, nil
+}
+
+var coderDoubleSlice = pointerCoderFuncs{
+ size: sizeDoubleSlice,
+ marshal: appendDoubleSlice,
+ unmarshal: consumeDoubleSlice,
+ merge: mergeFloat64Slice,
+}
+
+// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double.
+func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.Float64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := len(s) * protowire.SizeFixed64()
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double.
+func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.Float64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := len(s) * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for _, v := range s {
+ b = protowire.AppendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+
+var coderDoublePackedSlice = pointerCoderFuncs{
+ size: sizeDoublePackedSlice,
+ marshal: appendDoublePackedSlice,
+ unmarshal: consumeDoubleSlice,
+ merge: mergeFloat64Slice,
+}
+
+// sizeDoubleValue returns the size of wire encoding a float64 value as a Double.
+func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeFixed64()
+}
+
+// appendDoubleValue encodes a float64 value as a Double.
+func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ return b, nil
+}
+
+// consumeDoubleValue decodes a float64 value as a Double.
+func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil
+}
+
+var coderDoubleValue = valueCoderFuncs{
+ size: sizeDoubleValue,
+ marshal: appendDoubleValue,
+ unmarshal: consumeDoubleValue,
+ merge: mergeScalarValue,
+}
+
+// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double.
+func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ size = list.Len() * (tagsize + protowire.SizeFixed64())
+ return size
+}
+
+// appendDoubleSliceValue encodes a []float64 value as a repeated Double.
+func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ }
+ return b, nil
+}
+
+// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double.
+func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ b = b[n:]
+ }
+ out.n = n
+ return listv, out, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderDoubleSliceValue = valueCoderFuncs{
+ size: sizeDoubleSliceValue,
+ marshal: appendDoubleSliceValue,
+ unmarshal: consumeDoubleSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double.
+func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := llen * protowire.SizeFixed64()
+ return tagsize + protowire.SizeBytes(n)
+}
+
+// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double.
+func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ llen := list.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, wiretag)
+ n := llen * protowire.SizeFixed64()
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ }
+ return b, nil
+}
+
+var coderDoublePackedSliceValue = valueCoderFuncs{
+ size: sizeDoublePackedSliceValue,
+ marshal: appendDoublePackedSliceValue,
+ unmarshal: consumeDoubleSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeString returns the size of wire encoding a string pointer as a String.
+func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.String()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendString wire encodes a string pointer as a String.
+func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+// consumeString wire decodes a string pointer as a String.
+func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.String() = v
+ out.n = n
+ return out, nil
+}
+
+var coderString = pointerCoderFuncs{
+ size: sizeString,
+ marshal: appendString,
+ unmarshal: consumeString,
+ merge: mergeString,
+}
+
+// appendStringValidateUTF8 wire encodes a string pointer as a String.
+func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringValidateUTF8 wire decodes a string pointer as a String.
+func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.String() = v
+ out.n = n
+ return out, nil
+}
+
+var coderStringValidateUTF8 = pointerCoderFuncs{
+ size: sizeString,
+ marshal: appendStringValidateUTF8,
+ unmarshal: consumeStringValidateUTF8,
+ merge: mergeString,
+}
+
+// sizeStringNoZero returns the size of wire encoding a string pointer as a String.
+// The zero value is not encoded.
+func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.String()
+ if len(v) == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendStringNoZero wire encodes a string pointer as a String.
+// The zero value is not encoded.
+func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+var coderStringNoZero = pointerCoderFuncs{
+ size: sizeStringNoZero,
+ marshal: appendStringNoZero,
+ unmarshal: consumeString,
+ merge: mergeStringNoZero,
+}
+
+// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String.
+// The zero value is not encoded.
+func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.String()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringNoZero,
+ marshal: appendStringNoZeroValidateUTF8,
+ unmarshal: consumeStringValidateUTF8,
+ merge: mergeStringNoZero,
+}
+
+// sizeStringPtr returns the size of wire encoding a *string pointer as a String.
+// It panics if the pointer is nil.
+func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := **p.StringPtr()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendStringPtr wire encodes a *string pointer as a String.
+// It panics if the pointer is nil.
+func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.StringPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ return b, nil
+}
+
+// consumeStringPtr wire decodes a *string pointer as a String.
+func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ vp := p.StringPtr()
+ if *vp == nil {
+ *vp = new(string)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderStringPtr = pointerCoderFuncs{
+ size: sizeStringPtr,
+ marshal: appendStringPtr,
+ unmarshal: consumeStringPtr,
+ merge: mergeStringPtr,
+}
+
+// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String.
+// It panics if the pointer is nil.
+func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := **p.StringPtr()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String.
+func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return out, errInvalidUTF8{}
+ }
+ vp := p.StringPtr()
+ if *vp == nil {
+ *vp = new(string)
+ }
+ **vp = v
+ out.n = n
+ return out, nil
+}
+
+var coderStringPtrValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringPtr,
+ marshal: appendStringPtrValidateUTF8,
+ unmarshal: consumeStringPtrValidateUTF8,
+ merge: mergeStringPtr,
+}
+
+// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String.
+func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeBytes(len(v))
+ }
+ return size
+}
+
+// appendStringSlice encodes a []string pointer as a repeated String.
+func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ }
+ return b, nil
+}
+
+// consumeStringSlice wire decodes a []string pointer as a repeated String.
+func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.StringSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringSlice = pointerCoderFuncs{
+ size: sizeStringSlice,
+ marshal: appendStringSlice,
+ unmarshal: consumeStringSlice,
+ merge: mergeStringSlice,
+}
+
+// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String.
+func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.StringSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendString(b, v)
+ if !utf8.ValidString(v) {
+ return b, errInvalidUTF8{}
+ }
+ }
+ return b, nil
+}
+
+// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String.
+func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.StringSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return out, errInvalidUTF8{}
+ }
+ *sp = append(*sp, v)
+ out.n = n
+ return out, nil
+}
+
+var coderStringSliceValidateUTF8 = pointerCoderFuncs{
+ size: sizeStringSlice,
+ marshal: appendStringSliceValidateUTF8,
+ unmarshal: consumeStringSliceValidateUTF8,
+ merge: mergeStringSlice,
+}
+
+// sizeStringValue returns the size of wire encoding a string value as a String.
+func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeBytes(len(v.String()))
+}
+
+// appendStringValue encodes a string value as a String.
+func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ return b, nil
+}
+
+// consumeStringValue decodes a string value as a String.
+func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfString(string(v)), out, nil
+}
+
+var coderStringValue = valueCoderFuncs{
+ size: sizeStringValue,
+ marshal: appendStringValue,
+ unmarshal: consumeStringValue,
+ merge: mergeScalarValue,
+}
+
+// appendStringValueValidateUTF8 encodes a string value as a String.
+func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ if !utf8.ValidString(v.String()) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeStringValueValidateUTF8 decodes a string value as a String.
+func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ if !utf8.ValidString(v) {
+ return protoreflect.Value{}, out, errInvalidUTF8{}
+ }
+ out.n = n
+ return protoreflect.ValueOfString(string(v)), out, nil
+}
+
+var coderStringValueValidateUTF8 = valueCoderFuncs{
+ size: sizeStringValue,
+ marshal: appendStringValueValidateUTF8,
+ unmarshal: consumeStringValueValidateUTF8,
+ merge: mergeScalarValue,
+}
+
+// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String.
+func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeBytes(len(v.String()))
+ }
+ return size
+}
+
+// appendStringSliceValue encodes a []string value as a repeated String.
+func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendString(b, v.String())
+ }
+ return b, nil
+}
+
+// consumeStringSliceValue wire decodes a []string value as a repeated String.
+func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeString(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfString(string(v)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderStringSliceValue = valueCoderFuncs{
+ size: sizeStringSliceValue,
+ marshal: appendStringSliceValue,
+ unmarshal: consumeStringSliceValue,
+ merge: mergeListValue,
+}
+
+// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes.
+func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bytes()
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendBytes wire encodes a []byte pointer as a Bytes.
+func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ return b, nil
+}
+
+// consumeBytes wire decodes a []byte pointer as a Bytes.
+func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Bytes() = append(emptyBuf[:], v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytes = pointerCoderFuncs{
+ size: sizeBytes,
+ marshal: appendBytes,
+ unmarshal: consumeBytes,
+ merge: mergeBytes,
+}
+
+// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes.
+func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes.
+func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.Bytes() = append(emptyBuf[:], v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytes,
+ marshal: appendBytesValidateUTF8,
+ unmarshal: consumeBytesValidateUTF8,
+ merge: mergeBytes,
+}
+
+// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return f.tagsize + protowire.SizeBytes(len(v))
+}
+
+// appendBytesNoZero wire encodes a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ return b, nil
+}
+
+// consumeBytesNoZero wire decodes a []byte pointer as a Bytes.
+// The zero value is not decoded.
+func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *p.Bytes() = append(([]byte)(nil), v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesNoZero = pointerCoderFuncs{
+ size: sizeBytesNoZero,
+ marshal: appendBytesNoZero,
+ unmarshal: consumeBytesNoZero,
+ merge: mergeBytesNoZero,
+}
+
+// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes.
+// The zero value is not encoded.
+func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ v := *p.Bytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ return b, nil
+}
+
+// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes.
+func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *p.Bytes() = append(([]byte)(nil), v...)
+ out.n = n
+ return out, nil
+}
+
+var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytesNoZero,
+ marshal: appendBytesNoZeroValidateUTF8,
+ unmarshal: consumeBytesNoZeroValidateUTF8,
+ merge: mergeBytesNoZero,
+}
+
+// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes.
+func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ size += f.tagsize + protowire.SizeBytes(len(v))
+ }
+ return size
+}
+
+// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes.
+func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ }
+ return b, nil
+}
+
+// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes.
+func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BytesSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ *sp = append(*sp, append(emptyBuf[:], v...))
+ out.n = n
+ return out, nil
+}
+
+var coderBytesSlice = pointerCoderFuncs{
+ size: sizeBytesSlice,
+ marshal: appendBytesSlice,
+ unmarshal: consumeBytesSlice,
+ merge: mergeBytesSlice,
+}
+
+// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes.
+func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) {
+ s := *p.BytesSlice()
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendBytes(b, v)
+ if !utf8.Valid(v) {
+ return b, errInvalidUTF8{}
+ }
+ }
+ return b, nil
+}
+
+// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes.
+func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ sp := p.BytesSlice()
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !utf8.Valid(v) {
+ return out, errInvalidUTF8{}
+ }
+ *sp = append(*sp, append(emptyBuf[:], v...))
+ out.n = n
+ return out, nil
+}
+
+var coderBytesSliceValidateUTF8 = pointerCoderFuncs{
+ size: sizeBytesSlice,
+ marshal: appendBytesSliceValidateUTF8,
+ unmarshal: consumeBytesSliceValidateUTF8,
+ merge: mergeBytesSlice,
+}
+
+// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes.
+func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int {
+ return tagsize + protowire.SizeBytes(len(v.Bytes()))
+}
+
+// appendBytesValue encodes a []byte value as a Bytes.
+func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendBytes(b, v.Bytes())
+ return b, nil
+}
+
+// consumeBytesValue decodes a []byte value as a Bytes.
+func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ out.n = n
+ return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil
+}
+
+var coderBytesValue = valueCoderFuncs{
+ size: sizeBytesValue,
+ marshal: appendBytesValue,
+ unmarshal: consumeBytesValue,
+ merge: mergeBytesValue,
+}
+
+// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes.
+func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ size += tagsize + protowire.SizeBytes(len(v.Bytes()))
+ }
+ return size
+}
+
+// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes.
+func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) {
+ list := listv.List()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ v := list.Get(i)
+ b = protowire.AppendVarint(b, wiretag)
+ b = protowire.AppendBytes(b, v.Bytes())
+ }
+ return b, nil
+}
+
+// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes.
+func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) {
+ list := listv.List()
+ if wtyp != protowire.BytesType {
+ return protoreflect.Value{}, out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return protoreflect.Value{}, out, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
+ out.n = n
+ return listv, out, nil
+}
+
+var coderBytesSliceValue = valueCoderFuncs{
+ size: sizeBytesSliceValue,
+ marshal: appendBytesSliceValue,
+ unmarshal: consumeBytesSliceValue,
+ merge: mergeBytesListValue,
+}
+
+// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices.
+var emptyBuf [0]byte
+
+var wireTypes = map[protoreflect.Kind]protowire.Type{
+ protoreflect.BoolKind: protowire.VarintType,
+ protoreflect.EnumKind: protowire.VarintType,
+ protoreflect.Int32Kind: protowire.VarintType,
+ protoreflect.Sint32Kind: protowire.VarintType,
+ protoreflect.Uint32Kind: protowire.VarintType,
+ protoreflect.Int64Kind: protowire.VarintType,
+ protoreflect.Sint64Kind: protowire.VarintType,
+ protoreflect.Uint64Kind: protowire.VarintType,
+ protoreflect.Sfixed32Kind: protowire.Fixed32Type,
+ protoreflect.Fixed32Kind: protowire.Fixed32Type,
+ protoreflect.FloatKind: protowire.Fixed32Type,
+ protoreflect.Sfixed64Kind: protowire.Fixed64Type,
+ protoreflect.Fixed64Kind: protowire.Fixed64Type,
+ protoreflect.DoubleKind: protowire.Fixed64Type,
+ protoreflect.StringKind: protowire.BytesType,
+ protoreflect.BytesKind: protowire.BytesType,
+ protoreflect.MessageKind: protowire.BytesType,
+ protoreflect.GroupKind: protowire.StartGroupType,
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
new file mode 100644
index 00000000..35a67c25
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -0,0 +1,388 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "errors"
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type mapInfo struct {
+ goType reflect.Type
+ keyWiretag uint64
+ valWiretag uint64
+ keyFuncs valueCoderFuncs
+ valFuncs valueCoderFuncs
+ keyZero pref.Value
+ keyKind pref.Kind
+ conv *mapConverter
+}
+
+func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) {
+ // TODO: Consider generating specialized map coders.
+ keyField := fd.MapKey()
+ valField := fd.MapValue()
+ keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()])
+ valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()])
+ keyFuncs := encoderFuncsForValue(keyField)
+ valFuncs := encoderFuncsForValue(valField)
+ conv := newMapConverter(ft, fd)
+
+ mapi := &mapInfo{
+ goType: ft,
+ keyWiretag: keyWiretag,
+ valWiretag: valWiretag,
+ keyFuncs: keyFuncs,
+ valFuncs: valFuncs,
+ keyZero: keyField.Default(),
+ keyKind: keyField.Kind(),
+ conv: conv,
+ }
+ if valField.Kind() == pref.MessageKind {
+ valueMessage = getMessageInfo(ft.Elem())
+ }
+
+ funcs = pointerCoderFuncs{
+ size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
+ return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts)
+ },
+ marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts)
+ },
+ unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
+ mp := p.AsValueOf(ft)
+ if mp.Elem().IsNil() {
+ mp.Elem().Set(reflect.MakeMap(mapi.goType))
+ }
+ if f.mi == nil {
+ return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts)
+ } else {
+ return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts)
+ }
+ },
+ }
+ switch valField.Kind() {
+ case pref.MessageKind:
+ funcs.merge = mergeMapOfMessage
+ case pref.BytesKind:
+ funcs.merge = mergeMapOfBytes
+ default:
+ funcs.merge = mergeMap
+ }
+ if valFuncs.isInit != nil {
+ funcs.isInit = func(p pointer, f *coderFieldInfo) error {
+ return isInitMap(p.AsValueOf(ft).Elem(), mapi, f)
+ }
+ }
+ return valueMessage, funcs
+}
+
+const (
+ mapKeyTagSize = 1 // field 1, tag size 1.
+ mapValTagSize = 1 // field 2, tag size 2.
+)
+
+func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int {
+ if mapv.Len() == 0 {
+ return 0
+ }
+ n := 0
+ iter := mapRange(mapv)
+ for iter.Next() {
+ key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
+ keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ var valSize int
+ value := mapi.conv.valConv.PBValueOf(iter.Value())
+ if f.mi == nil {
+ valSize = mapi.valFuncs.size(value, mapValTagSize, opts)
+ } else {
+ p := pointerOfValue(iter.Value())
+ valSize += mapValTagSize
+ valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts))
+ }
+ n += f.tagsize + protowire.SizeBytes(keySize+valSize)
+ }
+ return n
+}
+
+func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ var (
+ key = mapi.keyZero
+ val = mapi.conv.valConv.New()
+ )
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if num > protowire.MaxValidNumber {
+ return out, errors.New("invalid field number")
+ }
+ b = b[n:]
+ err := errUnknown
+ switch num {
+ case 1:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ key = v
+ n = o.n
+ case 2:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ val = v
+ n = o.n
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ } else if err != nil {
+ return out, err
+ }
+ b = b[n:]
+ }
+ mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val))
+ out.n = n
+ return out, nil
+}
+
+func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ var (
+ key = mapi.keyZero
+ val = reflect.New(f.mi.GoReflectType.Elem())
+ )
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if num > protowire.MaxValidNumber {
+ return out, errors.New("invalid field number")
+ }
+ b = b[n:]
+ err := errUnknown
+ switch num {
+ case 1:
+ var v pref.Value
+ var o unmarshalOutput
+ v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts)
+ if err != nil {
+ break
+ }
+ key = v
+ n = o.n
+ case 2:
+ if wtyp != protowire.BytesType {
+ break
+ }
+ var v []byte
+ v, n = protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ var o unmarshalOutput
+ o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts)
+ if o.initialized {
+ // Consider this map item initialized so long as we see
+ // an initialized value.
+ out.initialized = true
+ }
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ } else if err != nil {
+ return out, err
+ }
+ b = b[n:]
+ }
+ mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val)
+ out.n = n
+ return out, nil
+}
+
+func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if f.mi == nil {
+ key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
+ val := mapi.conv.valConv.PBValueOf(valrv)
+ size := 0
+ size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ size += mapi.valFuncs.size(val, mapValTagSize, opts)
+ b = protowire.AppendVarint(b, uint64(size))
+ b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
+ if err != nil {
+ return nil, err
+ }
+ return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
+ } else {
+ key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
+ val := pointerOfValue(valrv)
+ valSize := f.mi.sizePointer(val, opts)
+ size := 0
+ size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
+ size += mapValTagSize + protowire.SizeBytes(valSize)
+ b = protowire.AppendVarint(b, uint64(size))
+ b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
+ if err != nil {
+ return nil, err
+ }
+ b = protowire.AppendVarint(b, mapi.valWiretag)
+ b = protowire.AppendVarint(b, uint64(valSize))
+ return f.mi.marshalAppendPointer(b, val, opts)
+ }
+}
+
+func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if mapv.Len() == 0 {
+ return b, nil
+ }
+ if opts.Deterministic() {
+ return appendMapDeterministic(b, mapv, mapi, f, opts)
+ }
+ iter := mapRange(mapv)
+ for iter.Next() {
+ var err error
+ b = protowire.AppendVarint(b, f.wiretag)
+ b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ keys := mapv.MapKeys()
+ sort.Slice(keys, func(i, j int) bool {
+ switch keys[i].Kind() {
+ case reflect.Bool:
+ return !keys[i].Bool() && keys[j].Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return keys[i].Int() < keys[j].Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return keys[i].Uint() < keys[j].Uint()
+ case reflect.Float32, reflect.Float64:
+ return keys[i].Float() < keys[j].Float()
+ case reflect.String:
+ return keys[i].String() < keys[j].String()
+ default:
+ panic("invalid kind: " + keys[i].Kind().String())
+ }
+ })
+ for _, key := range keys {
+ var err error
+ b = protowire.AppendVarint(b, f.wiretag)
+ b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
+ if mi := f.mi; mi != nil {
+ mi.init()
+ if !mi.needsInitCheck {
+ return nil
+ }
+ iter := mapRange(mapv)
+ for iter.Next() {
+ val := pointerOfValue(iter.Value())
+ if err := mi.checkInitializedPointer(val); err != nil {
+ return err
+ }
+ }
+ } else {
+ iter := mapRange(mapv)
+ for iter.Next() {
+ val := mapi.conv.valConv.PBValueOf(iter.Value())
+ if err := mapi.valFuncs.isInit(val); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ dstm.SetMapIndex(iter.Key(), iter.Value())
+ }
+}
+
+func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
+ }
+}
+
+func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstm := dst.AsValueOf(f.ft).Elem()
+ srcm := src.AsValueOf(f.ft).Elem()
+ if srcm.Len() == 0 {
+ return
+ }
+ if dstm.IsNil() {
+ dstm.Set(reflect.MakeMap(f.ft))
+ }
+ iter := mapRange(srcm)
+ for iter.Next() {
+ val := reflect.New(f.ft.Elem().Elem())
+ if f.mi != nil {
+ f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts)
+ } else {
+ opts.Merge(asMessage(val), asMessage(iter.Value()))
+ }
+ dstm.SetMapIndex(iter.Key(), val)
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
new file mode 100644
index 00000000..2706bb67
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.12
+
+package impl
+
+import "reflect"
+
+type mapIter struct {
+ v reflect.Value
+ keys []reflect.Value
+}
+
+// mapRange provides a less-efficient equivalent to
+// the Go 1.12 reflect.Value.MapRange method.
+func mapRange(v reflect.Value) *mapIter {
+ return &mapIter{v: v}
+}
+
+func (i *mapIter) Next() bool {
+ if i.keys == nil {
+ i.keys = i.v.MapKeys()
+ } else {
+ i.keys = i.keys[1:]
+ }
+ return len(i.keys) > 0
+}
+
+func (i *mapIter) Key() reflect.Value {
+ return i.keys[0]
+}
+
+func (i *mapIter) Value() reflect.Value {
+ return i.v.MapIndex(i.keys[0])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
new file mode 100644
index 00000000..1533ef60
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+
+package impl
+
+import "reflect"
+
+func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
new file mode 100644
index 00000000..0e176d56
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -0,0 +1,159 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/fieldsort"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// coderMessageInfo contains per-message information used by the fast-path functions.
+// This is a different type from MessageInfo to keep MessageInfo as general-purpose as
+// possible.
+type coderMessageInfo struct {
+ methods piface.Methods
+
+ orderedCoderFields []*coderFieldInfo
+ denseCoderFields []*coderFieldInfo
+ coderFields map[protowire.Number]*coderFieldInfo
+ sizecacheOffset offset
+ unknownOffset offset
+ extensionOffset offset
+ needsInitCheck bool
+ isMessageSet bool
+ numRequiredFields uint8
+}
+
+type coderFieldInfo struct {
+ funcs pointerCoderFuncs // fast-path per-field functions
+ mi *MessageInfo // field's message
+ ft reflect.Type
+ validation validationInfo // information used by message validation
+ num pref.FieldNumber // field number
+ offset offset // struct field offset
+ wiretag uint64 // field tag (number + wire type)
+ tagsize int // size of the varint-encoded tag
+ isPointer bool // true if IsNil may be called on the struct field
+ isRequired bool // true if field is required
+}
+
+func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
+ mi.sizecacheOffset = si.sizecacheOffset
+ mi.unknownOffset = si.unknownOffset
+ mi.extensionOffset = si.extensionOffset
+
+ mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+ fields := mi.Desc.Fields()
+ preallocFields := make([]coderFieldInfo, fields.Len())
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+
+ fs := si.fieldsByNumber[fd.Number()]
+ isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic()
+ if isOneof {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ ft := fs.Type
+ var wiretag uint64
+ if !fd.IsPacked() {
+ wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+ }
+ var fieldOffset offset
+ var funcs pointerCoderFuncs
+ var childMessage *MessageInfo
+ switch {
+ case isOneof:
+ fieldOffset = offsetOf(fs, mi.Exporter)
+ case fd.IsWeak():
+ fieldOffset = si.weakOffset
+ funcs = makeWeakMessageFieldCoder(fd)
+ default:
+ fieldOffset = offsetOf(fs, mi.Exporter)
+ childMessage, funcs = fieldCoder(fd, ft)
+ }
+ cf := &preallocFields[i]
+ *cf = coderFieldInfo{
+ num: fd.Number(),
+ offset: fieldOffset,
+ wiretag: wiretag,
+ ft: ft,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: funcs,
+ mi: childMessage,
+ validation: newFieldValidationInfo(mi, si, fd, ft),
+ isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(),
+ isRequired: fd.Cardinality() == pref.Required,
+ }
+ mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+ mi.coderFields[cf.num] = cf
+ }
+ for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+ if od := oneofs.Get(i); !od.IsSynthetic() {
+ mi.initOneofFieldCoders(od, si)
+ }
+ }
+ if messageset.IsMessageSet(mi.Desc) {
+ if !mi.extensionOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+ }
+ if !mi.unknownOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+ }
+ mi.isMessageSet = true
+ }
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+ })
+
+ var maxDense pref.FieldNumber
+ for _, cf := range mi.orderedCoderFields {
+ if cf.num >= 16 && cf.num >= 2*maxDense {
+ break
+ }
+ maxDense = cf.num
+ }
+ mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+ for _, cf := range mi.orderedCoderFields {
+ if int(cf.num) >= len(mi.denseCoderFields) {
+ break
+ }
+ mi.denseCoderFields[cf.num] = cf
+ }
+
+ // To preserve compatibility with historic wire output, marshal oneofs last.
+ if mi.Desc.Oneofs().Len() > 0 {
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+ fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+ return fieldsort.Less(fi, fj)
+ })
+ }
+
+ mi.needsInitCheck = needsInitCheck(mi.Desc)
+ if mi.methods.Marshal == nil && mi.methods.Size == nil {
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ mi.methods.Marshal = mi.marshal
+ mi.methods.Size = mi.size
+ }
+ if mi.methods.Unmarshal == nil {
+ mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+ mi.methods.Unmarshal = mi.unmarshal
+ }
+ if mi.methods.CheckInitialized == nil {
+ mi.methods.CheckInitialized = mi.checkInitialized
+ }
+ if mi.methods.Merge == nil {
+ mi.methods.Merge = mi.merge
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
new file mode 100644
index 00000000..cfb68e12
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -0,0 +1,120 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+)
+
+func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) {
+ if !flags.ProtoLegacy {
+ return 0
+ }
+
+ ext := *p.Apply(mi.extensionOffset).Extensions()
+ for _, x := range ext {
+ xi := getExtensionFieldInfo(x.Type())
+ if xi.funcs.size == nil {
+ continue
+ }
+ num, _ := protowire.DecodeTag(xi.wiretag)
+ size += messageset.SizeField(num)
+ size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
+ }
+
+ unknown := *p.Apply(mi.unknownOffset).Bytes()
+ size += messageset.SizeUnknown(unknown)
+
+ return size
+}
+
+func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) {
+ if !flags.ProtoLegacy {
+ return b, errors.New("no support for message_set_wire_format")
+ }
+
+ ext := *p.Apply(mi.extensionOffset).Extensions()
+ switch len(ext) {
+ case 0:
+ case 1:
+ // Fast-path for one extension: Don't bother sorting the keys.
+ for _, x := range ext {
+ var err error
+ b, err = marshalMessageSetField(mi, b, x, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ default:
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(ext))
+ for k := range ext {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ for _, k := range keys {
+ var err error
+ b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ }
+
+ unknown := *p.Apply(mi.unknownOffset).Bytes()
+ b, err := messageset.AppendUnknown(b, unknown)
+ if err != nil {
+ return b, err
+ }
+
+ return b, nil
+}
+
+func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) {
+ xi := getExtensionFieldInfo(x.Type())
+ num, _ := protowire.DecodeTag(xi.wiretag)
+ b = messageset.AppendFieldStart(b, num)
+ b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
+ if err != nil {
+ return b, err
+ }
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+}
+
+func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if !flags.ProtoLegacy {
+ return out, errors.New("no support for message_set_wire_format")
+ }
+
+ ep := p.Apply(mi.extensionOffset).Extensions()
+ if *ep == nil {
+ *ep = make(map[int32]ExtensionField)
+ }
+ ext := *ep
+ unknown := p.Apply(mi.unknownOffset).Bytes()
+ initialized := true
+ err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error {
+ o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts)
+ if err == errUnknown {
+ *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType)
+ *unknown = append(*unknown, v...)
+ return nil
+ }
+ if !o.initialized {
+ initialized = false
+ }
+ return err
+ })
+ out.n = len(b)
+ out.initialized = initialized
+ return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
new file mode 100644
index 00000000..86f7dc3c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
@@ -0,0 +1,209 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+)
+
+func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+ v := p.v.Elem().Int()
+ return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ v := p.v.Elem().Int()
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(v))
+ return b, nil
+}
+
+func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ p.v.Elem().SetInt(int64(v))
+ out.n = n
+ return out, nil
+}
+
+func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ dst.v.Elem().Set(src.v.Elem())
+}
+
+var coderEnum = pointerCoderFuncs{
+ size: sizeEnum,
+ marshal: appendEnum,
+ unmarshal: consumeEnum,
+ merge: mergeEnum,
+}
+
+func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ if p.v.Elem().Int() == 0 {
+ return 0
+ }
+ return sizeEnum(p, f, opts)
+}
+
+func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ if p.v.Elem().Int() == 0 {
+ return b, nil
+ }
+ return appendEnum(b, p, f, opts)
+}
+
+func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ if src.v.Elem().Int() != 0 {
+ dst.v.Elem().Set(src.v.Elem())
+ }
+}
+
+var coderEnumNoZero = pointerCoderFuncs{
+ size: sizeEnumNoZero,
+ marshal: appendEnumNoZero,
+ unmarshal: consumeEnum,
+ merge: mergeEnumNoZero,
+}
+
+func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return sizeEnum(pointer{p.v.Elem()}, f, opts)
+}
+
+func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ return appendEnum(b, pointer{p.v.Elem()}, f, opts)
+}
+
+func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ if p.v.Elem().IsNil() {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
+ }
+ return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
+}
+
+func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ if !src.v.Elem().IsNil() {
+ v := reflect.New(dst.v.Type().Elem().Elem())
+ v.Elem().Set(src.v.Elem().Elem())
+ dst.v.Elem().Set(v)
+ }
+}
+
+var coderEnumPtr = pointerCoderFuncs{
+ size: sizeEnumPtr,
+ marshal: appendEnumPtr,
+ unmarshal: consumeEnumPtr,
+ merge: mergeEnumPtr,
+}
+
+func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.v.Elem()
+ for i, llen := 0, s.Len(); i < llen; i++ {
+ size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
+ }
+ return size
+}
+
+func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.v.Elem()
+ for i, llen := 0, s.Len(); i < llen; i++ {
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+ }
+ return b, nil
+}
+
+func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ s := p.v.Elem()
+ if wtyp == protowire.BytesType {
+ b, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ for len(b) > 0 {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ rv := reflect.New(s.Type().Elem()).Elem()
+ rv.SetInt(int64(v))
+ s.Set(reflect.Append(s, rv))
+ b = b[n:]
+ }
+ out.n = n
+ return out, nil
+ }
+ if wtyp != protowire.VarintType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ rv := reflect.New(s.Type().Elem()).Elem()
+ rv.SetInt(int64(v))
+ s.Set(reflect.Append(s, rv))
+ out.n = n
+ return out, nil
+}
+
+func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
+}
+
+var coderEnumSlice = pointerCoderFuncs{
+ size: sizeEnumSlice,
+ marshal: appendEnumSlice,
+ unmarshal: consumeEnumSlice,
+ merge: mergeEnumSlice,
+}
+
+func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.v.Elem()
+ llen := s.Len()
+ if llen == 0 {
+ return 0
+ }
+ n := 0
+ for i := 0; i < llen; i++ {
+ n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+ }
+ return f.tagsize + protowire.SizeBytes(n)
+}
+
+func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.v.Elem()
+ llen := s.Len()
+ if llen == 0 {
+ return b, nil
+ }
+ b = protowire.AppendVarint(b, f.wiretag)
+ n := 0
+ for i := 0; i < llen; i++ {
+ n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+ }
+ b = protowire.AppendVarint(b, uint64(n))
+ for i := 0; i < llen; i++ {
+ b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+ }
+ return b, nil
+}
+
+var coderEnumPackedSlice = pointerCoderFuncs{
+ size: sizeEnumPackedSlice,
+ marshal: appendEnumPackedSlice,
+ unmarshal: consumeEnumSlice,
+ merge: mergeEnumSlice,
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
new file mode 100644
index 00000000..e8997123
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go
@@ -0,0 +1,557 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// pointerCoderFuncs is a set of pointer encoding functions.
+type pointerCoderFuncs struct {
+ mi *MessageInfo
+ size func(p pointer, f *coderFieldInfo, opts marshalOptions) int
+ marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error)
+ unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error)
+ isInit func(p pointer, f *coderFieldInfo) error
+ merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions)
+}
+
+// valueCoderFuncs is a set of protoreflect.Value encoding functions.
+type valueCoderFuncs struct {
+ size func(v pref.Value, tagsize int, opts marshalOptions) int
+ marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error)
+ unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error)
+ isInit func(v pref.Value) error
+ merge func(dst, src pref.Value, opts mergeOptions) pref.Value
+}
+
+// fieldCoder returns pointer functions for a field, used for operating on
+// struct fields.
+func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+ switch {
+ case fd.IsMap():
+ return encoderFuncsForMap(fd, ft)
+ case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ // Repeated fields (not packed).
+ if ft.Kind() != reflect.Slice {
+ break
+ }
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolSlice
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumSlice
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32Slice
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32Slice
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32Slice
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64Slice
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64Slice
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64Slice
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32Slice
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32Slice
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatSlice
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64Slice
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64Slice
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoubleSlice
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringSliceValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringSlice
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesSliceValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesSlice
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringSlice
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesSlice
+ }
+ case pref.MessageKind:
+ return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft)
+ case pref.GroupKind:
+ return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft)
+ }
+ case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ // Packed repeated fields.
+ //
+ // Only repeated fields of primitive numeric types
+ // (Varint, Fixed32, or Fixed64 wire type) can be packed.
+ if ft.Kind() != reflect.Slice {
+ break
+ }
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolPackedSlice
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumPackedSlice
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32PackedSlice
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32PackedSlice
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32PackedSlice
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64PackedSlice
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64PackedSlice
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64PackedSlice
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32PackedSlice
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32PackedSlice
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatPackedSlice
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64PackedSlice
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64PackedSlice
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoublePackedSlice
+ }
+ }
+ case fd.Kind() == pref.MessageKind:
+ return getMessageInfo(ft), makeMessageFieldCoder(fd, ft)
+ case fd.Kind() == pref.GroupKind:
+ return getMessageInfo(ft), makeGroupFieldCoder(fd, ft)
+ case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil:
+ // Populated oneof fields always encode even if set to the zero value,
+ // which normally are not encoded in proto3.
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolNoZero
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumNoZero
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32NoZero
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32NoZero
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32NoZero
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64NoZero
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64NoZero
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64NoZero
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32NoZero
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32NoZero
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatNoZero
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64NoZero
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64NoZero
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoubleNoZero
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringNoZeroValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringNoZero
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesNoZeroValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesNoZero
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringNoZero
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytesNoZero
+ }
+ }
+ case ft.Kind() == reflect.Ptr:
+ ft := ft.Elem()
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBoolPtr
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnumPtr
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32Ptr
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32Ptr
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32Ptr
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64Ptr
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64Ptr
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64Ptr
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32Ptr
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32Ptr
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloatPtr
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64Ptr
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64Ptr
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDoublePtr
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringPtrValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderStringPtr
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderStringPtr
+ }
+ }
+ default:
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if ft.Kind() == reflect.Bool {
+ return nil, coderBool
+ }
+ case pref.EnumKind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderEnum
+ }
+ case pref.Int32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderInt32
+ }
+ case pref.Sint32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSint32
+ }
+ case pref.Uint32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderUint32
+ }
+ case pref.Int64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderInt64
+ }
+ case pref.Sint64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSint64
+ }
+ case pref.Uint64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderUint64
+ }
+ case pref.Sfixed32Kind:
+ if ft.Kind() == reflect.Int32 {
+ return nil, coderSfixed32
+ }
+ case pref.Fixed32Kind:
+ if ft.Kind() == reflect.Uint32 {
+ return nil, coderFixed32
+ }
+ case pref.FloatKind:
+ if ft.Kind() == reflect.Float32 {
+ return nil, coderFloat
+ }
+ case pref.Sfixed64Kind:
+ if ft.Kind() == reflect.Int64 {
+ return nil, coderSfixed64
+ }
+ case pref.Fixed64Kind:
+ if ft.Kind() == reflect.Uint64 {
+ return nil, coderFixed64
+ }
+ case pref.DoubleKind:
+ if ft.Kind() == reflect.Float64 {
+ return nil, coderDouble
+ }
+ case pref.StringKind:
+ if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) {
+ return nil, coderStringValidateUTF8
+ }
+ if ft.Kind() == reflect.String {
+ return nil, coderString
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) {
+ return nil, coderBytesValidateUTF8
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytes
+ }
+ case pref.BytesKind:
+ if ft.Kind() == reflect.String {
+ return nil, coderString
+ }
+ if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 {
+ return nil, coderBytes
+ }
+ }
+ }
+ panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft))
+}
+
+// encoderFuncsForValue returns value functions for a field, used for
+// extension values and map encoding.
+func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs {
+ switch {
+ case fd.Cardinality() == pref.Repeated && !fd.IsPacked():
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return coderBoolSliceValue
+ case pref.EnumKind:
+ return coderEnumSliceValue
+ case pref.Int32Kind:
+ return coderInt32SliceValue
+ case pref.Sint32Kind:
+ return coderSint32SliceValue
+ case pref.Uint32Kind:
+ return coderUint32SliceValue
+ case pref.Int64Kind:
+ return coderInt64SliceValue
+ case pref.Sint64Kind:
+ return coderSint64SliceValue
+ case pref.Uint64Kind:
+ return coderUint64SliceValue
+ case pref.Sfixed32Kind:
+ return coderSfixed32SliceValue
+ case pref.Fixed32Kind:
+ return coderFixed32SliceValue
+ case pref.FloatKind:
+ return coderFloatSliceValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64SliceValue
+ case pref.Fixed64Kind:
+ return coderFixed64SliceValue
+ case pref.DoubleKind:
+ return coderDoubleSliceValue
+ case pref.StringKind:
+ // We don't have a UTF-8 validating coder for repeated string fields.
+ // Value coders are used for extensions and maps.
+ // Extensions are never proto3, and maps never contain lists.
+ return coderStringSliceValue
+ case pref.BytesKind:
+ return coderBytesSliceValue
+ case pref.MessageKind:
+ return coderMessageSliceValue
+ case pref.GroupKind:
+ return coderGroupSliceValue
+ }
+ case fd.Cardinality() == pref.Repeated && fd.IsPacked():
+ switch fd.Kind() {
+ case pref.BoolKind:
+ return coderBoolPackedSliceValue
+ case pref.EnumKind:
+ return coderEnumPackedSliceValue
+ case pref.Int32Kind:
+ return coderInt32PackedSliceValue
+ case pref.Sint32Kind:
+ return coderSint32PackedSliceValue
+ case pref.Uint32Kind:
+ return coderUint32PackedSliceValue
+ case pref.Int64Kind:
+ return coderInt64PackedSliceValue
+ case pref.Sint64Kind:
+ return coderSint64PackedSliceValue
+ case pref.Uint64Kind:
+ return coderUint64PackedSliceValue
+ case pref.Sfixed32Kind:
+ return coderSfixed32PackedSliceValue
+ case pref.Fixed32Kind:
+ return coderFixed32PackedSliceValue
+ case pref.FloatKind:
+ return coderFloatPackedSliceValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64PackedSliceValue
+ case pref.Fixed64Kind:
+ return coderFixed64PackedSliceValue
+ case pref.DoubleKind:
+ return coderDoublePackedSliceValue
+ }
+ default:
+ switch fd.Kind() {
+ default:
+ case pref.BoolKind:
+ return coderBoolValue
+ case pref.EnumKind:
+ return coderEnumValue
+ case pref.Int32Kind:
+ return coderInt32Value
+ case pref.Sint32Kind:
+ return coderSint32Value
+ case pref.Uint32Kind:
+ return coderUint32Value
+ case pref.Int64Kind:
+ return coderInt64Value
+ case pref.Sint64Kind:
+ return coderSint64Value
+ case pref.Uint64Kind:
+ return coderUint64Value
+ case pref.Sfixed32Kind:
+ return coderSfixed32Value
+ case pref.Fixed32Kind:
+ return coderFixed32Value
+ case pref.FloatKind:
+ return coderFloatValue
+ case pref.Sfixed64Kind:
+ return coderSfixed64Value
+ case pref.Fixed64Kind:
+ return coderFixed64Value
+ case pref.DoubleKind:
+ return coderDoubleValue
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ return coderStringValueValidateUTF8
+ }
+ return coderStringValue
+ case pref.BytesKind:
+ return coderBytesValue
+ case pref.MessageKind:
+ return coderMessageValue
+ case pref.GroupKind:
+ return coderGroupValue
+ }
+ }
+ panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind()))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
new file mode 100644
index 00000000..e118af1e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package impl
+
+// When using unsafe pointers, we can just treat enum values as int32s.
+
+var (
+ coderEnumNoZero = coderInt32NoZero
+ coderEnum = coderInt32
+ coderEnumPtr = coderInt32Ptr
+ coderEnumSlice = coderInt32Slice
+ coderEnumPackedSlice = coderInt32PackedSlice
+)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
new file mode 100644
index 00000000..36a90dff
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -0,0 +1,467 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// unwrapper unwraps the value to the underlying value.
+// This is implemented by List and Map.
+type unwrapper interface {
+ protoUnwrap() interface{}
+}
+
+// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
+type Converter interface {
+ // PBValueOf converts a reflect.Value to a protoreflect.Value.
+ PBValueOf(reflect.Value) pref.Value
+
+ // GoValueOf converts a protoreflect.Value to a reflect.Value.
+ GoValueOf(pref.Value) reflect.Value
+
+ // IsValidPB returns whether a protoreflect.Value is compatible with this type.
+ IsValidPB(pref.Value) bool
+
+ // IsValidGo returns whether a reflect.Value is compatible with this type.
+ IsValidGo(reflect.Value) bool
+
+ // New returns a new field value.
+ // For scalars, it returns the default value of the field.
+ // For composite types, it returns a new mutable value.
+ New() pref.Value
+
+ // Zero returns a new field value.
+ // For scalars, it returns the default value of the field.
+ // For composite types, it returns an immutable, empty value.
+ Zero() pref.Value
+}
+
+// NewConverter matches a Go type with a protobuf field and returns a Converter
+// that converts between the two. Enums must be a named int32 kind that
+// implements protoreflect.Enum, and messages must be pointer to a named
+// struct type that implements protoreflect.ProtoMessage.
+//
+// This matcher deliberately supports a wider range of Go types than what
+// protoc-gen-go historically generated to be able to automatically wrap some
+// v1 messages generated by other forks of protoc-gen-go.
+func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ switch {
+ case fd.IsList():
+ return newListConverter(t, fd)
+ case fd.IsMap():
+ return newMapConverter(t, fd)
+ default:
+ return newSingularConverter(t, fd)
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+var (
+ boolType = reflect.TypeOf(bool(false))
+ int32Type = reflect.TypeOf(int32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float32Type = reflect.TypeOf(float32(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf(string(""))
+ bytesType = reflect.TypeOf([]byte(nil))
+ byteType = reflect.TypeOf(byte(0))
+)
+
+var (
+ boolZero = pref.ValueOfBool(false)
+ int32Zero = pref.ValueOfInt32(0)
+ int64Zero = pref.ValueOfInt64(0)
+ uint32Zero = pref.ValueOfUint32(0)
+ uint64Zero = pref.ValueOfUint64(0)
+ float32Zero = pref.ValueOfFloat32(0)
+ float64Zero = pref.ValueOfFloat64(0)
+ stringZero = pref.ValueOfString("")
+ bytesZero = pref.ValueOfBytes(nil)
+)
+
+func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value {
+ if fd.Cardinality() == pref.Repeated {
+ // Default isn't defined for repeated fields.
+ return zero
+ }
+ return fd.Default()
+ }
+ switch fd.Kind() {
+ case pref.BoolKind:
+ if t.Kind() == reflect.Bool {
+ return &boolConverter{t, defVal(fd, boolZero)}
+ }
+ case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
+ if t.Kind() == reflect.Int32 {
+ return &int32Converter{t, defVal(fd, int32Zero)}
+ }
+ case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
+ if t.Kind() == reflect.Int64 {
+ return &int64Converter{t, defVal(fd, int64Zero)}
+ }
+ case pref.Uint32Kind, pref.Fixed32Kind:
+ if t.Kind() == reflect.Uint32 {
+ return &uint32Converter{t, defVal(fd, uint32Zero)}
+ }
+ case pref.Uint64Kind, pref.Fixed64Kind:
+ if t.Kind() == reflect.Uint64 {
+ return &uint64Converter{t, defVal(fd, uint64Zero)}
+ }
+ case pref.FloatKind:
+ if t.Kind() == reflect.Float32 {
+ return &float32Converter{t, defVal(fd, float32Zero)}
+ }
+ case pref.DoubleKind:
+ if t.Kind() == reflect.Float64 {
+ return &float64Converter{t, defVal(fd, float64Zero)}
+ }
+ case pref.StringKind:
+ if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
+ return &stringConverter{t, defVal(fd, stringZero)}
+ }
+ case pref.BytesKind:
+ if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) {
+ return &bytesConverter{t, defVal(fd, bytesZero)}
+ }
+ case pref.EnumKind:
+ // Handle enums, which must be a named int32 type.
+ if t.Kind() == reflect.Int32 {
+ return newEnumConverter(t, fd)
+ }
+ case pref.MessageKind, pref.GroupKind:
+ return newMessageConverter(t)
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+type boolConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfBool(v.Bool())
+}
+func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Bool()).Convert(c.goType)
+}
+func (c *boolConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(bool)
+ return ok
+}
+func (c *boolConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *boolConverter) New() pref.Value { return c.def }
+func (c *boolConverter) Zero() pref.Value { return c.def }
+
+type int32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfInt32(int32(v.Int()))
+}
+func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(int32(v.Int())).Convert(c.goType)
+}
+func (c *int32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(int32)
+ return ok
+}
+func (c *int32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *int32Converter) New() pref.Value { return c.def }
+func (c *int32Converter) Zero() pref.Value { return c.def }
+
+type int64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfInt64(int64(v.Int()))
+}
+func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(int64(v.Int())).Convert(c.goType)
+}
+func (c *int64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(int64)
+ return ok
+}
+func (c *int64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *int64Converter) New() pref.Value { return c.def }
+func (c *int64Converter) Zero() pref.Value { return c.def }
+
+type uint32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfUint32(uint32(v.Uint()))
+}
+func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType)
+}
+func (c *uint32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(uint32)
+ return ok
+}
+func (c *uint32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *uint32Converter) New() pref.Value { return c.def }
+func (c *uint32Converter) Zero() pref.Value { return c.def }
+
+type uint64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfUint64(uint64(v.Uint()))
+}
+func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType)
+}
+func (c *uint64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(uint64)
+ return ok
+}
+func (c *uint64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *uint64Converter) New() pref.Value { return c.def }
+func (c *uint64Converter) Zero() pref.Value { return c.def }
+
+type float32Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfFloat32(float32(v.Float()))
+}
+func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(float32(v.Float())).Convert(c.goType)
+}
+func (c *float32Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(float32)
+ return ok
+}
+func (c *float32Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *float32Converter) New() pref.Value { return c.def }
+func (c *float32Converter) Zero() pref.Value { return c.def }
+
+type float64Converter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfFloat64(float64(v.Float()))
+}
+func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(float64(v.Float())).Convert(c.goType)
+}
+func (c *float64Converter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(float64)
+ return ok
+}
+func (c *float64Converter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *float64Converter) New() pref.Value { return c.def }
+func (c *float64Converter) Zero() pref.Value { return c.def }
+
+type stringConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfString(v.Convert(stringType).String())
+}
+func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value {
+ // pref.Value.String never panics, so we go through an interface
+ // conversion here to check the type.
+ s := v.Interface().(string)
+ if c.goType.Kind() == reflect.Slice && s == "" {
+ return reflect.Zero(c.goType) // ensure empty string is []byte(nil)
+ }
+ return reflect.ValueOf(s).Convert(c.goType)
+}
+func (c *stringConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(string)
+ return ok
+}
+func (c *stringConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *stringConverter) New() pref.Value { return c.def }
+func (c *stringConverter) Zero() pref.Value { return c.def }
+
+type bytesConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ if c.goType.Kind() == reflect.String && v.Len() == 0 {
+ return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil)
+ }
+ return pref.ValueOfBytes(v.Convert(bytesType).Bytes())
+}
+func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Bytes()).Convert(c.goType)
+}
+func (c *bytesConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().([]byte)
+ return ok
+}
+func (c *bytesConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+func (c *bytesConverter) New() pref.Value { return c.def }
+func (c *bytesConverter) Zero() pref.Value { return c.def }
+
+type enumConverter struct {
+ goType reflect.Type
+ def pref.Value
+}
+
+func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter {
+ var def pref.Value
+ if fd.Cardinality() == pref.Repeated {
+ def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+ } else {
+ def = fd.Default()
+ }
+ return &enumConverter{goType, def}
+}
+
+func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfEnum(pref.EnumNumber(v.Int()))
+}
+
+func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value {
+ return reflect.ValueOf(v.Enum()).Convert(c.goType)
+}
+
+func (c *enumConverter) IsValidPB(v pref.Value) bool {
+ _, ok := v.Interface().(pref.EnumNumber)
+ return ok
+}
+
+func (c *enumConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *enumConverter) New() pref.Value {
+ return c.def
+}
+
+func (c *enumConverter) Zero() pref.Value {
+ return c.def
+}
+
+type messageConverter struct {
+ goType reflect.Type
+}
+
+func newMessageConverter(goType reflect.Type) Converter {
+ return &messageConverter{goType}
+}
+
+func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ if m, ok := v.Interface().(pref.ProtoMessage); ok {
+ return pref.ValueOfMessage(m.ProtoReflect())
+ }
+ return pref.ValueOfMessage(legacyWrapMessage(v))
+}
+
+func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value {
+ m := v.Message()
+ var rv reflect.Value
+ if u, ok := m.(unwrapper); ok {
+ rv = reflect.ValueOf(u.protoUnwrap())
+ } else {
+ rv = reflect.ValueOf(m.Interface())
+ }
+ if rv.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType))
+ }
+ return rv
+}
+
+func (c *messageConverter) IsValidPB(v pref.Value) bool {
+ m := v.Message()
+ var rv reflect.Value
+ if u, ok := m.(unwrapper); ok {
+ rv = reflect.ValueOf(u.protoUnwrap())
+ } else {
+ rv = reflect.ValueOf(m.Interface())
+ }
+ return rv.Type() == c.goType
+}
+
+func (c *messageConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *messageConverter) New() pref.Value {
+ return c.PBValueOf(reflect.New(c.goType.Elem()))
+}
+
+func (c *messageConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
new file mode 100644
index 00000000..6fccab52
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -0,0 +1,141 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter {
+ switch {
+ case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice:
+ return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)}
+ case t.Kind() == reflect.Slice:
+ return &listConverter{t, newSingularConverter(t.Elem(), fd)}
+ }
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+}
+
+type listConverter struct {
+ goType reflect.Type // []T
+ c Converter
+}
+
+func (c *listConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ pv := reflect.New(c.goType)
+ pv.Elem().Set(v)
+ return pref.ValueOfList(&listReflect{pv, c.c})
+}
+
+func (c *listConverter) GoValueOf(v pref.Value) reflect.Value {
+ rv := v.List().(*listReflect).v
+ if rv.IsNil() {
+ return reflect.Zero(c.goType)
+ }
+ return rv.Elem()
+}
+
+func (c *listConverter) IsValidPB(v pref.Value) bool {
+ list, ok := v.Interface().(*listReflect)
+ if !ok {
+ return false
+ }
+ return list.v.Type().Elem() == c.goType
+}
+
+func (c *listConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *listConverter) New() pref.Value {
+ return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c})
+}
+
+func (c *listConverter) Zero() pref.Value {
+ return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c})
+}
+
+type listPtrConverter struct {
+ goType reflect.Type // *[]T
+ c Converter
+}
+
+func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfList(&listReflect{v, c.c})
+}
+
+func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value {
+ return v.List().(*listReflect).v
+}
+
+func (c *listPtrConverter) IsValidPB(v pref.Value) bool {
+ list, ok := v.Interface().(*listReflect)
+ if !ok {
+ return false
+ }
+ return list.v.Type() == c.goType
+}
+
+func (c *listPtrConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *listPtrConverter) New() pref.Value {
+ return c.PBValueOf(reflect.New(c.goType.Elem()))
+}
+
+func (c *listPtrConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
+
+type listReflect struct {
+ v reflect.Value // *[]T
+ conv Converter
+}
+
+func (ls *listReflect) Len() int {
+ if ls.v.IsNil() {
+ return 0
+ }
+ return ls.v.Elem().Len()
+}
+func (ls *listReflect) Get(i int) pref.Value {
+ return ls.conv.PBValueOf(ls.v.Elem().Index(i))
+}
+func (ls *listReflect) Set(i int, v pref.Value) {
+ ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v))
+}
+func (ls *listReflect) Append(v pref.Value) {
+ ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v)))
+}
+func (ls *listReflect) AppendMutable() pref.Value {
+ if _, ok := ls.conv.(*messageConverter); !ok {
+ panic("invalid AppendMutable on list with non-message type")
+ }
+ v := ls.NewElement()
+ ls.Append(v)
+ return v
+}
+func (ls *listReflect) Truncate(i int) {
+ ls.v.Elem().Set(ls.v.Elem().Slice(0, i))
+}
+func (ls *listReflect) NewElement() pref.Value {
+ return ls.conv.New()
+}
+func (ls *listReflect) IsValid() bool {
+ return !ls.v.IsNil()
+}
+func (ls *listReflect) protoUnwrap() interface{} {
+ return ls.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
new file mode 100644
index 00000000..de06b259
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -0,0 +1,121 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type mapConverter struct {
+ goType reflect.Type // map[K]V
+ keyConv, valConv Converter
+}
+
+func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter {
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
+ }
+ return &mapConverter{
+ goType: t,
+ keyConv: newSingularConverter(t.Key(), fd.MapKey()),
+ valConv: newSingularConverter(t.Elem(), fd.MapValue()),
+ }
+}
+
+func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value {
+ if v.Type() != c.goType {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType))
+ }
+ return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv})
+}
+
+func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value {
+ return v.Map().(*mapReflect).v
+}
+
+func (c *mapConverter) IsValidPB(v pref.Value) bool {
+ mapv, ok := v.Interface().(*mapReflect)
+ if !ok {
+ return false
+ }
+ return mapv.v.Type() == c.goType
+}
+
+func (c *mapConverter) IsValidGo(v reflect.Value) bool {
+ return v.IsValid() && v.Type() == c.goType
+}
+
+func (c *mapConverter) New() pref.Value {
+ return c.PBValueOf(reflect.MakeMap(c.goType))
+}
+
+func (c *mapConverter) Zero() pref.Value {
+ return c.PBValueOf(reflect.Zero(c.goType))
+}
+
+type mapReflect struct {
+ v reflect.Value // map[K]V
+ keyConv Converter
+ valConv Converter
+}
+
+func (ms *mapReflect) Len() int {
+ return ms.v.Len()
+}
+func (ms *mapReflect) Has(k pref.MapKey) bool {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.v.MapIndex(rk)
+ return rv.IsValid()
+}
+func (ms *mapReflect) Get(k pref.MapKey) pref.Value {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.v.MapIndex(rk)
+ if !rv.IsValid() {
+ return pref.Value{}
+ }
+ return ms.valConv.PBValueOf(rv)
+}
+func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ rv := ms.valConv.GoValueOf(v)
+ ms.v.SetMapIndex(rk, rv)
+}
+func (ms *mapReflect) Clear(k pref.MapKey) {
+ rk := ms.keyConv.GoValueOf(k.Value())
+ ms.v.SetMapIndex(rk, reflect.Value{})
+}
+func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value {
+ if _, ok := ms.valConv.(*messageConverter); !ok {
+ panic("invalid Mutable on map with non-message value type")
+ }
+ v := ms.Get(k)
+ if !v.IsValid() {
+ v = ms.NewValue()
+ ms.Set(k, v)
+ }
+ return v
+}
+func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) {
+ iter := mapRange(ms.v)
+ for iter.Next() {
+ k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
+ v := ms.valConv.PBValueOf(iter.Value())
+ if !f(k, v) {
+ return
+ }
+ }
+}
+func (ms *mapReflect) NewValue() pref.Value {
+ return ms.valConv.New()
+}
+func (ms *mapReflect) IsValid() bool {
+ return !ms.v.IsNil()
+}
+func (ms *mapReflect) protoUnwrap() interface{} {
+ return ms.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
new file mode 100644
index 00000000..85ba1d3b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -0,0 +1,274 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "math/bits"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type unmarshalOptions struct {
+ flags protoiface.UnmarshalInputFlags
+ resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+func (o unmarshalOptions) Options() proto.UnmarshalOptions {
+ return proto.UnmarshalOptions{
+ Merge: true,
+ AllowPartial: true,
+ DiscardUnknown: o.DiscardUnknown(),
+ Resolver: o.resolver,
+ }
+}
+
+func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 }
+
+func (o unmarshalOptions) IsDefault() bool {
+ return o.flags == 0 && o.resolver == preg.GlobalTypes
+}
+
+var lazyUnmarshalOptions = unmarshalOptions{
+ resolver: preg.GlobalTypes,
+}
+
+type unmarshalOutput struct {
+ n int // number of bytes consumed
+ initialized bool
+}
+
+// unmarshal is protoreflect.Methods.Unmarshal.
+func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{
+ flags: in.Flags,
+ resolver: in.Resolver,
+ })
+ var flags piface.UnmarshalOutputFlags
+ if out.initialized {
+ flags |= piface.UnmarshalInitialized
+ }
+ return piface.UnmarshalOutput{
+ Flags: flags,
+ }, err
+}
+
+// errUnknown is returned during unmarshaling to indicate a parse error that
+// should result in a field being placed in the unknown fields section (for example,
+// when the wire type doesn't match) as opposed to the entire unmarshal operation
+// failing (for example, when a field extends past the available input).
+//
+// This is a sentinel error which should never be visible to the user.
+var errUnknown = errors.New("unknown")
+
+func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ mi.init()
+ if flags.ProtoLegacy && mi.isMessageSet {
+ return unmarshalMessageSet(mi, b, p, opts)
+ }
+ initialized := true
+ var requiredMask uint64
+ var exts *map[int32]ExtensionField
+ start := len(b)
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, errors.New("invalid field number")
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if num != groupTag {
+ return out, errors.New("mismatching end group marker")
+ }
+ groupTag = 0
+ break
+ }
+
+ var f *coderFieldInfo
+ if int(num) < len(mi.denseCoderFields) {
+ f = mi.denseCoderFields[num]
+ } else {
+ f = mi.coderFields[num]
+ }
+ var n int
+ err := errUnknown
+ switch {
+ case f != nil:
+ if f.funcs.unmarshal == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+ n = o.n
+ if err != nil {
+ break
+ }
+ requiredMask |= f.validation.requiredBit
+ if f.funcs.isInit != nil && !o.initialized {
+ initialized = false
+ }
+ default:
+ // Possible extension.
+ if exts == nil && mi.extensionOffset.IsValid() {
+ exts = p.Apply(mi.extensionOffset).Extensions()
+ if *exts == nil {
+ *exts = make(map[int32]ExtensionField)
+ }
+ }
+ if exts == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+ if err != nil {
+ break
+ }
+ n = o.n
+ if !o.initialized {
+ initialized = false
+ }
+ }
+ if err != nil {
+ if err != errUnknown {
+ return out, err
+ }
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, protowire.ParseError(n)
+ }
+ if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+ u := p.Apply(mi.unknownOffset).Bytes()
+ *u = protowire.AppendTag(*u, num, wtyp)
+ *u = append(*u, b[:n]...)
+ }
+ }
+ b = b[n:]
+ }
+ if groupTag != 0 {
+ return out, errors.New("missing end group marker")
+ }
+ if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+ initialized = false
+ }
+ if initialized {
+ out.initialized = true
+ }
+ out.n = start - len(b)
+ return out, nil
+}
+
+func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ x := exts[int32(num)]
+ xt := x.Type()
+ if xt == nil {
+ var err error
+ xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num)
+ if err != nil {
+ if err == preg.NotFound {
+ return out, errUnknown
+ }
+ return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err)
+ }
+ }
+ xi := getExtensionFieldInfo(xt)
+ if xi.funcs.unmarshal == nil {
+ return out, errUnknown
+ }
+ if flags.LazyUnmarshalExtensions {
+ if opts.IsDefault() && x.canLazy(xt) {
+ out, valid := skipExtension(b, xi, num, wtyp, opts)
+ switch valid {
+ case ValidationValid:
+ if out.initialized {
+ x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n])
+ exts[int32(num)] = x
+ return out, nil
+ }
+ case ValidationInvalid:
+ return out, errors.New("invalid wire format")
+ case ValidationUnknown:
+ }
+ }
+ }
+ ival := x.Value()
+ if !ival.IsValid() && xi.unmarshalNeedsValue {
+ // Create a new message, list, or map value to fill in.
+ // For enums, create a prototype value to let the unmarshal func know the
+ // concrete type.
+ ival = xt.New()
+ }
+ v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts)
+ if err != nil {
+ return out, err
+ }
+ if xi.funcs.isInit == nil {
+ out.initialized = true
+ }
+ x.Set(xt, v)
+ exts[int32(num)] = x
+ return out, nil
+}
+
+func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+ if xi.validation.mi == nil {
+ return out, ValidationUnknown
+ }
+ xi.validation.mi.init()
+ switch xi.validation.typ {
+ case validationTypeMessage:
+ if wtyp != protowire.BytesType {
+ return out, ValidationUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, ValidationUnknown
+ }
+ out, st := xi.validation.mi.validate(v, 0, opts)
+ out.n = n
+ return out, st
+ case validationTypeGroup:
+ if wtyp != protowire.StartGroupType {
+ return out, ValidationUnknown
+ }
+ out, st := xi.validation.mi.validate(b, num, opts)
+ return out, st
+ default:
+ return out, ValidationUnknown
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
new file mode 100644
index 00000000..8c8a794c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -0,0 +1,199 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/flags"
+ proto "google.golang.org/protobuf/proto"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type marshalOptions struct {
+ flags piface.MarshalInputFlags
+}
+
+func (o marshalOptions) Options() proto.MarshalOptions {
+ return proto.MarshalOptions{
+ AllowPartial: true,
+ Deterministic: o.Deterministic(),
+ UseCachedSize: o.UseCachedSize(),
+ }
+}
+
+func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 }
+func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 }
+
+// size is protoreflect.Methods.Size.
+func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ size := mi.sizePointer(p, marshalOptions{
+ flags: in.Flags,
+ })
+ return piface.SizeOutput{Size: size}
+}
+
+func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
+ mi.init()
+ if p.IsNil() {
+ return 0
+ }
+ if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
+ if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
+ return int(size)
+ }
+ }
+ return mi.sizePointerSlow(p, opts)
+}
+
+func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) {
+ if flags.ProtoLegacy && mi.isMessageSet {
+ size = sizeMessageSet(mi, p, opts)
+ if mi.sizecacheOffset.IsValid() {
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ }
+ return size
+ }
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ size += mi.sizeExtensions(e, opts)
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.size == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ size += f.funcs.size(fptr, f, opts)
+ }
+ if mi.unknownOffset.IsValid() {
+ u := *p.Apply(mi.unknownOffset).Bytes()
+ size += len(u)
+ }
+ if mi.sizecacheOffset.IsValid() {
+ if size > math.MaxInt32 {
+ // The size is too large for the int32 sizecache field.
+ // We will need to recompute the size when encoding;
+ // unfortunately expensive, but better than invalid output.
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
+ } else {
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ }
+ }
+ return size
+}
+
+// marshal is protoreflect.Methods.Marshal.
+func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) {
+ var p pointer
+ if ms, ok := in.Message.(*messageState); ok {
+ p = ms.pointer()
+ } else {
+ p = in.Message.(*messageReflectWrapper).pointer()
+ }
+ b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{
+ flags: in.Flags,
+ })
+ return piface.MarshalOutput{Buf: b}, err
+}
+
+func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) {
+ mi.init()
+ if p.IsNil() {
+ return b, nil
+ }
+ if flags.ProtoLegacy && mi.isMessageSet {
+ return marshalMessageSet(mi, b, p, opts)
+ }
+ var err error
+ // The old marshaler encodes extensions at beginning.
+ if mi.extensionOffset.IsValid() {
+ e := p.Apply(mi.extensionOffset).Extensions()
+ // TODO: Special handling for MessageSet?
+ b, err = mi.appendExtensions(b, e, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.marshal == nil {
+ continue
+ }
+ fptr := p.Apply(f.offset)
+ if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ b, err = f.funcs.marshal(b, fptr, f, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ if mi.unknownOffset.IsValid() && !mi.isMessageSet {
+ u := *p.Apply(mi.unknownOffset).Bytes()
+ b = append(b, u...)
+ }
+ return b, nil
+}
+
+func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
+ if ext == nil {
+ return 0
+ }
+ for _, x := range *ext {
+ xi := getExtensionFieldInfo(x.Type())
+ if xi.funcs.size == nil {
+ continue
+ }
+ n += xi.funcs.size(x.Value(), xi.tagsize, opts)
+ }
+ return n
+}
+
+func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) {
+ if ext == nil {
+ return b, nil
+ }
+
+ switch len(*ext) {
+ case 0:
+ return b, nil
+ case 1:
+ // Fast-path for one extension: Don't bother sorting the keys.
+ var err error
+ for _, x := range *ext {
+ xi := getExtensionFieldInfo(x.Type())
+ b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
+ }
+ return b, err
+ default:
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(*ext))
+ for k := range *ext {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+ var err error
+ for _, k := range keys {
+ x := (*ext)[int32(k)]
+ xi := getExtensionFieldInfo(x.Type())
+ b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go
new file mode 100644
index 00000000..8c1eab4b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type EnumInfo struct {
+ GoReflectType reflect.Type // int32 kind
+ Desc pref.EnumDescriptor
+}
+
+func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum {
+ return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum)
+}
+func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
new file mode 100644
index 00000000..e904fd99
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
@@ -0,0 +1,156 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+ "sync"
+ "sync/atomic"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// ExtensionInfo implements ExtensionType.
+//
+// This type contains a number of exported fields for legacy compatibility.
+// The only non-deprecated use of this type is through the methods of the
+// ExtensionType interface.
+type ExtensionInfo struct {
+ // An ExtensionInfo may exist in several stages of initialization.
+ //
+ // extensionInfoUninitialized: Some or all of the legacy exported
+ // fields may be set, but none of the unexported fields have been
+ // initialized. This is the starting state for an ExtensionInfo
+ // in legacy generated code.
+ //
+ // extensionInfoDescInit: The desc field is set, but other unexported fields
+ // may not be initialized. Legacy exported fields may or may not be set.
+ // This is the starting state for an ExtensionInfo in newly generated code.
+ //
+ // extensionInfoFullInit: The ExtensionInfo is fully initialized.
+ // This state is only entered after lazy initialization is complete.
+ init uint32
+ mu sync.Mutex
+
+ goType reflect.Type
+ desc extensionTypeDescriptor
+ conv Converter
+ info *extensionFieldInfo // for fast-path method implementations
+
+ // ExtendedType is a typed nil-pointer to the parent message type that
+ // is being extended. It is possible for this to be unpopulated in v2
+ // since the message may no longer implement the MessageV1 interface.
+ //
+ // Deprecated: Use the ExtendedType method instead.
+ ExtendedType piface.MessageV1
+
+ // ExtensionType is the zero value of the extension type.
+ //
+ // For historical reasons, reflect.TypeOf(ExtensionType) and the
+ // type returned by InterfaceOf may not be identical.
+ //
+ // Deprecated: Use InterfaceOf(xt.Zero()) instead.
+ ExtensionType interface{}
+
+ // Field is the field number of the extension.
+ //
+ // Deprecated: Use the Descriptor().Number method instead.
+ Field int32
+
+ // Name is the fully qualified name of extension.
+ //
+ // Deprecated: Use the Descriptor().FullName method instead.
+ Name string
+
+ // Tag is the protobuf struct tag used in the v1 API.
+ //
+ // Deprecated: Do not use.
+ Tag string
+
+ // Filename is the proto filename in which the extension is defined.
+ //
+ // Deprecated: Use Descriptor().ParentFile().Path() instead.
+ Filename string
+}
+
+// Stages of initialization: See the ExtensionInfo.init field.
+const (
+ extensionInfoUninitialized = 0
+ extensionInfoDescInit = 1
+ extensionInfoFullInit = 2
+)
+
+func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) {
+ xi.goType = goType
+ xi.desc = extensionTypeDescriptor{xd, xi}
+ xi.init = extensionInfoDescInit
+}
+
+func (xi *ExtensionInfo) New() pref.Value {
+ return xi.lazyInit().New()
+}
+func (xi *ExtensionInfo) Zero() pref.Value {
+ return xi.lazyInit().Zero()
+}
+func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value {
+ return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
+}
+func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} {
+ return xi.lazyInit().GoValueOf(v).Interface()
+}
+func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool {
+ return xi.lazyInit().IsValidPB(v)
+}
+func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
+ return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
+}
+func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor {
+ if atomic.LoadUint32(&xi.init) < extensionInfoDescInit {
+ xi.lazyInitSlow()
+ }
+ return &xi.desc
+}
+
+func (xi *ExtensionInfo) lazyInit() Converter {
+ if atomic.LoadUint32(&xi.init) < extensionInfoFullInit {
+ xi.lazyInitSlow()
+ }
+ return xi.conv
+}
+
+func (xi *ExtensionInfo) lazyInitSlow() {
+ xi.mu.Lock()
+ defer xi.mu.Unlock()
+
+ if xi.init == extensionInfoFullInit {
+ return
+ }
+ defer atomic.StoreUint32(&xi.init, extensionInfoFullInit)
+
+ if xi.desc.ExtensionDescriptor == nil {
+ xi.initFromLegacy()
+ }
+ if !xi.desc.ExtensionDescriptor.IsPlaceholder() {
+ if xi.ExtensionType == nil {
+ xi.initToLegacy()
+ }
+ xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor)
+ xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor)
+ xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType)
+ }
+}
+
+type extensionTypeDescriptor struct {
+ pref.ExtensionDescriptor
+ xi *ExtensionInfo
+}
+
+func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType {
+ return xtd.xi
+}
+func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor {
+ return xtd.ExtensionDescriptor
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
new file mode 100644
index 00000000..f7d7ffb5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
@@ -0,0 +1,219 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// legacyEnumName returns the name of enums used in legacy code.
+// It is neither the protobuf full name nor the qualified Go name,
+// but rather an odd hybrid of both.
+func legacyEnumName(ed pref.EnumDescriptor) string {
+ var protoPkg string
+ enumName := string(ed.FullName())
+ if fd := ed.ParentFile(); fd != nil {
+ protoPkg = string(fd.Package())
+ enumName = strings.TrimPrefix(enumName, protoPkg+".")
+ }
+ if protoPkg == "" {
+ return strs.GoCamelCase(enumName)
+ }
+ return protoPkg + "." + strs.GoCamelCase(enumName)
+}
+
+// legacyWrapEnum wraps v as a protoreflect.Enum,
+// where v must be a int32 kind and not implement the v2 API already.
+func legacyWrapEnum(v reflect.Value) pref.Enum {
+ et := legacyLoadEnumType(v.Type())
+ return et.New(pref.EnumNumber(v.Int()))
+}
+
+var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType
+
+// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t,
+// where t must be an int32 kind and not implement the v2 API already.
+func legacyLoadEnumType(t reflect.Type) pref.EnumType {
+ // Fast-path: check if a EnumType is cached for this concrete type.
+ if et, ok := legacyEnumTypeCache.Load(t); ok {
+ return et.(pref.EnumType)
+ }
+
+ // Slow-path: derive enum descriptor and initialize EnumType.
+ var et pref.EnumType
+ ed := LegacyLoadEnumDesc(t)
+ et = &legacyEnumType{
+ desc: ed,
+ goType: t,
+ }
+ if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok {
+ return et.(pref.EnumType)
+ }
+ return et
+}
+
+type legacyEnumType struct {
+ desc pref.EnumDescriptor
+ goType reflect.Type
+ m sync.Map // map[protoreflect.EnumNumber]proto.Enum
+}
+
+func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum {
+ if e, ok := t.m.Load(n); ok {
+ return e.(pref.Enum)
+ }
+ e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType}
+ t.m.Store(n, e)
+ return e
+}
+func (t *legacyEnumType) Descriptor() pref.EnumDescriptor {
+ return t.desc
+}
+
+type legacyEnumWrapper struct {
+ num pref.EnumNumber
+ pbTyp pref.EnumType
+ goTyp reflect.Type
+}
+
+func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor {
+ return e.pbTyp.Descriptor()
+}
+func (e *legacyEnumWrapper) Type() pref.EnumType {
+ return e.pbTyp
+}
+func (e *legacyEnumWrapper) Number() pref.EnumNumber {
+ return e.num
+}
+func (e *legacyEnumWrapper) ProtoReflect() pref.Enum {
+ return e
+}
+func (e *legacyEnumWrapper) protoUnwrap() interface{} {
+ v := reflect.New(e.goTyp).Elem()
+ v.SetInt(int64(e.num))
+ return v.Interface()
+}
+
+var (
+ _ pref.Enum = (*legacyEnumWrapper)(nil)
+ _ unwrapper = (*legacyEnumWrapper)(nil)
+)
+
+var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
+
+// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type,
+// which must be an int32 kind and not implement the v2 API already.
+//
+// This is exported for testing purposes.
+func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+ // Fast-path: check if an EnumDescriptor is cached for this concrete type.
+ if ed, ok := legacyEnumDescCache.Load(t); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+
+ // Slow-path: initialize EnumDescriptor from the raw descriptor.
+ ev := reflect.Zero(t).Interface()
+ if _, ok := ev.(pref.Enum); ok {
+ panic(fmt.Sprintf("%v already implements proto.Enum", t))
+ }
+ edV1, ok := ev.(enumV1)
+ if !ok {
+ return aberrantLoadEnumDesc(t)
+ }
+ b, idxs := edV1.EnumDescriptor()
+
+ var ed pref.EnumDescriptor
+ if len(idxs) == 1 {
+ ed = legacyLoadFileDesc(b).Enums().Get(idxs[0])
+ } else {
+ md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
+ for _, i := range idxs[1 : len(idxs)-1] {
+ md = md.Messages().Get(i)
+ }
+ ed = md.Enums().Get(idxs[len(idxs)-1])
+ }
+ if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok {
+ return ed.(protoreflect.EnumDescriptor)
+ }
+ return ed
+}
+
+var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor
+
+// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type,
+// which must not implement protoreflect.Enum or enumV1.
+//
+// If the type does not implement enumV1, then there is no reliable
+// way to derive the original protobuf type information.
+// We are unable to use the global enum registry since it is
+// unfortunately keyed by the protobuf full name, which we also do not know.
+// Thus, this produces some bogus enum descriptor based on the Go type name.
+func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor {
+ // Fast-path: check if an EnumDescriptor is cached for this concrete type.
+ if ed, ok := aberrantEnumDescCache.Load(t); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+
+ // Slow-path: construct a bogus, but unique EnumDescriptor.
+ ed := &filedesc.Enum{L2: new(filedesc.EnumL2)}
+ ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum
+ ed.L0.ParentFile = filedesc.SurrogateProto3
+ ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{})
+
+ // TODO: Use the presence of a UnmarshalJSON method to determine proto2?
+
+ vd := &ed.L2.Values.List[0]
+ vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN
+ vd.L0.ParentFile = ed.L0.ParentFile
+ vd.L0.Parent = ed
+
+ // TODO: We could use the String method to obtain some enum value names by
+ // starting at 0 and print the enum until it produces invalid identifiers.
+ // An exhaustive query is clearly impractical, but can be best-effort.
+
+ if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok {
+ return ed.(pref.EnumDescriptor)
+ }
+ return ed
+}
+
+// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type
+// The provided name is not guaranteed to be stable nor universally unique.
+// It should be sufficiently unique within a program.
+//
+// This is exported for testing purposes.
+func AberrantDeriveFullName(t reflect.Type) pref.FullName {
+ sanitize := func(r rune) rune {
+ switch {
+ case r == '/':
+ return '.'
+ case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9':
+ return r
+ default:
+ return '_'
+ }
+ }
+ prefix := strings.Map(sanitize, t.PkgPath())
+ suffix := strings.Map(sanitize, t.Name())
+ if suffix == "" {
+ suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer())
+ }
+
+ ss := append(strings.Split(prefix, "."), suffix)
+ for i, s := range ss {
+ if s == "" || ('0' <= s[0] && s[0] <= '9') {
+ ss[i] = "x" + s
+ }
+ }
+ return pref.FullName(strings.Join(ss, "."))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
new file mode 100644
index 00000000..c3d741c2
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go
@@ -0,0 +1,92 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "hash/crc32"
+ "math"
+ "reflect"
+
+ "google.golang.org/protobuf/internal/errors"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// These functions exist to support exported APIs in generated protobufs.
+// While these are deprecated, they cannot be removed for compatibility reasons.
+
+// LegacyEnumName returns the name of enums used in legacy code.
+func (Export) LegacyEnumName(ed pref.EnumDescriptor) string {
+ return legacyEnumName(ed)
+}
+
+// LegacyMessageTypeOf returns the protoreflect.MessageType for m,
+// with name used as the message name if necessary.
+func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType {
+ if mv := (Export{}).protoMessageV2Of(m); mv != nil {
+ return mv.ProtoReflect().Type()
+ }
+ return legacyLoadMessageInfo(reflect.TypeOf(m), name)
+}
+
+// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input.
+// The input can either be a string representing the enum value by name,
+// or a number representing the enum number itself.
+func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) {
+ if b[0] == '"' {
+ var name pref.Name
+ if err := json.Unmarshal(b, &name); err != nil {
+ return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
+ }
+ ev := ed.Values().ByName(name)
+ if ev == nil {
+ return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name)
+ }
+ return ev.Number(), nil
+ } else {
+ var num pref.EnumNumber
+ if err := json.Unmarshal(b, &num); err != nil {
+ return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b)
+ }
+ return num, nil
+ }
+}
+
+// CompressGZIP compresses the input as a GZIP-encoded file.
+// The current implementation does no compression.
+func (Export) CompressGZIP(in []byte) (out []byte) {
+ // RFC 1952, section 2.3.1.
+ var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff}
+
+ // RFC 1951, section 3.2.4.
+ var blockHeader [5]byte
+ const maxBlockSize = math.MaxUint16
+ numBlocks := 1 + len(in)/maxBlockSize
+
+ // RFC 1952, section 2.3.1.
+ var gzipFooter [8]byte
+ binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in))
+ binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in)))
+
+ // Encode the input without compression using raw DEFLATE blocks.
+ out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter))
+ out = append(out, gzipHeader[:]...)
+ for blockHeader[0] == 0 {
+ blockSize := maxBlockSize
+ if blockSize > len(in) {
+ blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3.
+ blockSize = len(in)
+ }
+ binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000)
+ binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff)
+ out = append(out, blockHeader[:]...)
+ out = append(out, in[:blockSize]...)
+ in = in[blockSize:]
+ }
+ out = append(out, gzipFooter[:]...)
+ return out
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
new file mode 100644
index 00000000..61757ce5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -0,0 +1,175 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/internal/descopts"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ ptag "google.golang.org/protobuf/internal/encoding/tag"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (xi *ExtensionInfo) initToLegacy() {
+ xd := xi.desc
+ var parent piface.MessageV1
+ messageName := xd.ContainingMessage().FullName()
+ if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil {
+ // Create a new parent message and unwrap it if possible.
+ mv := mt.New().Interface()
+ t := reflect.TypeOf(mv)
+ if mv, ok := mv.(unwrapper); ok {
+ t = reflect.TypeOf(mv.protoUnwrap())
+ }
+
+ // Check whether the message implements the legacy v1 Message interface.
+ mz := reflect.Zero(t).Interface()
+ if mz, ok := mz.(piface.MessageV1); ok {
+ parent = mz
+ }
+ }
+
+ // Determine the v1 extension type, which is unfortunately not the same as
+ // the v2 ExtensionType.GoType.
+ extType := xi.goType
+ switch extType.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields
+ }
+
+ // Reconstruct the legacy enum full name.
+ var enumName string
+ if xd.Kind() == pref.EnumKind {
+ enumName = legacyEnumName(xd.Enum())
+ }
+
+ // Derive the proto file that the extension was declared within.
+ var filename string
+ if fd := xd.ParentFile(); fd != nil {
+ filename = fd.Path()
+ }
+
+ // For MessageSet extensions, the name used is the parent message.
+ name := xd.FullName()
+ if messageset.IsMessageSetExtension(xd) {
+ name = name.Parent()
+ }
+
+ xi.ExtendedType = parent
+ xi.ExtensionType = reflect.Zero(extType).Interface()
+ xi.Field = int32(xd.Number())
+ xi.Name = string(name)
+ xi.Tag = ptag.Marshal(xd, enumName)
+ xi.Filename = filename
+}
+
+// initFromLegacy initializes an ExtensionInfo from
+// the contents of the deprecated exported fields of the type.
+func (xi *ExtensionInfo) initFromLegacy() {
+ // The v1 API returns "type incomplete" descriptors where only the
+ // field number is specified. In such a case, use a placeholder.
+ if xi.ExtendedType == nil || xi.ExtensionType == nil {
+ xd := placeholderExtension{
+ name: pref.FullName(xi.Name),
+ number: pref.FieldNumber(xi.Field),
+ }
+ xi.desc = extensionTypeDescriptor{xd, xi}
+ return
+ }
+
+ // Resolve enum or message dependencies.
+ var ed pref.EnumDescriptor
+ var md pref.MessageDescriptor
+ t := reflect.TypeOf(xi.ExtensionType)
+ isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
+ isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+ if isOptional || isRepeated {
+ t = t.Elem()
+ }
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.Enum:
+ ed = v.Descriptor()
+ case enumV1:
+ ed = LegacyLoadEnumDesc(t)
+ case pref.ProtoMessage:
+ md = v.ProtoReflect().Descriptor()
+ case messageV1:
+ md = LegacyLoadMessageDesc(t)
+ }
+
+ // Derive basic field information from the struct tag.
+ var evs pref.EnumValueDescriptors
+ if ed != nil {
+ evs = ed.Values()
+ }
+ fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field)
+
+ // Construct a v2 ExtensionType.
+ xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)}
+ xd.L0.ParentFile = filedesc.SurrogateProto2
+ xd.L0.FullName = pref.FullName(xi.Name)
+ xd.L1.Number = pref.FieldNumber(xi.Field)
+ xd.L1.Cardinality = fd.L1.Cardinality
+ xd.L1.Kind = fd.L1.Kind
+ xd.L2.IsPacked = fd.L1.IsPacked
+ xd.L2.Default = fd.L1.Default
+ xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType)
+ xd.L2.Enum = ed
+ xd.L2.Message = md
+
+ // Derive real extension field name for MessageSets.
+ if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName {
+ xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName)
+ }
+
+ tt := reflect.TypeOf(xi.ExtensionType)
+ if isOptional {
+ tt = tt.Elem()
+ }
+ xi.goType = tt
+ xi.desc = extensionTypeDescriptor{xd, xi}
+}
+
+type placeholderExtension struct {
+ name pref.FullName
+ number pref.FieldNumber
+}
+
+func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil }
+func (x placeholderExtension) Parent() pref.Descriptor { return nil }
+func (x placeholderExtension) Index() int { return 0 }
+func (x placeholderExtension) Syntax() pref.Syntax { return 0 }
+func (x placeholderExtension) Name() pref.Name { return x.name.Name() }
+func (x placeholderExtension) FullName() pref.FullName { return x.name }
+func (x placeholderExtension) IsPlaceholder() bool { return true }
+func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field }
+func (x placeholderExtension) Number() pref.FieldNumber { return x.number }
+func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 }
+func (x placeholderExtension) Kind() pref.Kind { return 0 }
+func (x placeholderExtension) HasJSONName() bool { return false }
+func (x placeholderExtension) JSONName() string { return "" }
+func (x placeholderExtension) HasPresence() bool { return false }
+func (x placeholderExtension) HasOptionalKeyword() bool { return false }
+func (x placeholderExtension) IsExtension() bool { return true }
+func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsPacked() bool { return false }
+func (x placeholderExtension) IsList() bool { return false }
+func (x placeholderExtension) IsMap() bool { return false }
+func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil }
+func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil }
+func (x placeholderExtension) HasDefault() bool { return false }
+func (x placeholderExtension) Default() pref.Value { return pref.Value{} }
+func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil }
+func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil }
+func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil }
+func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil }
+func (x placeholderExtension) Message() pref.MessageDescriptor { return nil }
+func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return }
+func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
new file mode 100644
index 00000000..9ab09108
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
@@ -0,0 +1,81 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+ "sync"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Every enum and message type generated by protoc-gen-go since commit 2fc053c5
+// on February 25th, 2016 has had a method to get the raw descriptor.
+// Types that were not generated by protoc-gen-go or were generated prior
+// to that version are not supported.
+//
+// The []byte returned is the encoded form of a FileDescriptorProto message
+// compressed using GZIP. The []int is the path from the top-level file
+// to the specific message or enum declaration.
+type (
+ enumV1 interface {
+ EnumDescriptor() ([]byte, []int)
+ }
+ messageV1 interface {
+ Descriptor() ([]byte, []int)
+ }
+)
+
+var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor
+
+// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message.
+//
+// This assumes that b is immutable and that b does not refer to part of a
+// concatenated series of GZIP files (which would require shenanigans that
+// rely on the concatenation properties of both protobufs and GZIP).
+// File descriptors generated by protoc-gen-go do not rely on that property.
+func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor {
+ // Fast-path: check whether we already have a cached file descriptor.
+ if fd, ok := legacyFileDescCache.Load(&b[0]); ok {
+ return fd.(protoreflect.FileDescriptor)
+ }
+
+ // Slow-path: decompress and unmarshal the file descriptor proto.
+ zr, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ panic(err)
+ }
+ b2, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(err)
+ }
+
+ fd := filedesc.Builder{
+ RawDescriptor: b2,
+ FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry
+ }.Build().File
+ if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok {
+ return fd.(protoreflect.FileDescriptor)
+ }
+ return fd
+}
+
+type resolverOnly struct {
+ reg *protoregistry.Files
+}
+
+func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ return r.reg.FindFileByPath(path)
+}
+func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ return r.reg.FindDescriptorByName(name)
+}
+func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error {
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
new file mode 100644
index 00000000..06c68e11
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -0,0 +1,502 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/descopts"
+ ptag "google.golang.org/protobuf/internal/encoding/tag"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// legacyWrapMessage wraps v as a protoreflect.Message,
+// where v must be a *struct kind and not implement the v2 API already.
+func legacyWrapMessage(v reflect.Value) pref.Message {
+ typ := v.Type()
+ if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct {
+ return aberrantMessage{v: v}
+ }
+ mt := legacyLoadMessageInfo(typ, "")
+ return mt.MessageOf(v.Interface())
+}
+
+var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo
+
+// legacyLoadMessageInfo dynamically loads a *MessageInfo for t,
+// where t must be a *struct kind and not implement the v2 API already.
+// The provided name is used if it cannot be determined from the message.
+func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo {
+ // Fast-path: check if a MessageInfo is cached for this concrete type.
+ if mt, ok := legacyMessageTypeCache.Load(t); ok {
+ return mt.(*MessageInfo)
+ }
+
+ // Slow-path: derive message descriptor and initialize MessageInfo.
+ mi := &MessageInfo{
+ Desc: legacyLoadMessageDesc(t, name),
+ GoReflectType: t,
+ }
+
+ v := reflect.Zero(t).Interface()
+ if _, ok := v.(legacyMarshaler); ok {
+ mi.methods.Marshal = legacyMarshal
+
+ // We have no way to tell whether the type's Marshal method
+ // supports deterministic serialization or not, but this
+ // preserves the v1 implementation's behavior of always
+ // calling Marshal methods when present.
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ }
+ if _, ok := v.(legacyUnmarshaler); ok {
+ mi.methods.Unmarshal = legacyUnmarshal
+ }
+ if _, ok := v.(legacyMerger); ok {
+ mi.methods.Merge = legacyMerge
+ }
+
+ if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok {
+ return mi.(*MessageInfo)
+ }
+ return mi
+}
+
+var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor
+
+// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type,
+// which must be a *struct kind and not implement the v2 API already.
+//
+// This is exported for testing purposes.
+func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor {
+ return legacyLoadMessageDesc(t, "")
+}
+func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ // Fast-path: check if a MessageDescriptor is cached for this concrete type.
+ if mi, ok := legacyMessageDescCache.Load(t); ok {
+ return mi.(pref.MessageDescriptor)
+ }
+
+ // Slow-path: initialize MessageDescriptor from the raw descriptor.
+ mv := reflect.Zero(t).Interface()
+ if _, ok := mv.(pref.ProtoMessage); ok {
+ panic(fmt.Sprintf("%v already implements proto.Message", t))
+ }
+ mdV1, ok := mv.(messageV1)
+ if !ok {
+ return aberrantLoadMessageDesc(t, name)
+ }
+
+ // If this is a dynamic message type where there isn't a 1-1 mapping between
+ // Go and protobuf types, calling the Descriptor method on the zero value of
+ // the message type isn't likely to work. If it panics, swallow the panic and
+ // continue as if the Descriptor method wasn't present.
+ b, idxs := func() ([]byte, []int) {
+ defer func() {
+ recover()
+ }()
+ return mdV1.Descriptor()
+ }()
+ if b == nil {
+ return aberrantLoadMessageDesc(t, name)
+ }
+
+ // If the Go type has no fields, then this might be a proto3 empty message
+ // from before the size cache was added. If there are any fields, check to
+ // see that at least one of them looks like something we generated.
+ if nfield := t.Elem().NumField(); nfield > 0 {
+ hasProtoField := false
+ for i := 0; i < nfield; i++ {
+ f := t.Elem().Field(i)
+ if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") {
+ hasProtoField = true
+ break
+ }
+ }
+ if !hasProtoField {
+ return aberrantLoadMessageDesc(t, name)
+ }
+ }
+
+ md := legacyLoadFileDesc(b).Messages().Get(idxs[0])
+ for _, i := range idxs[1:] {
+ md = md.Messages().Get(i)
+ }
+ if name != "" && md.FullName() != name {
+ panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name))
+ }
+ if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok {
+ return md.(protoreflect.MessageDescriptor)
+ }
+ return md
+}
+
+var (
+ aberrantMessageDescLock sync.Mutex
+ aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor
+)
+
+// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type,
+// which must not implement protoreflect.ProtoMessage or messageV1.
+//
+// This is a best-effort derivation of the message descriptor using the protobuf
+// tags on the struct fields.
+func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ aberrantMessageDescLock.Lock()
+ defer aberrantMessageDescLock.Unlock()
+ if aberrantMessageDescCache == nil {
+ aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor)
+ }
+ return aberrantLoadMessageDescReentrant(t, name)
+}
+func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor {
+ // Fast-path: check if an MessageDescriptor is cached for this concrete type.
+ if md, ok := aberrantMessageDescCache[t]; ok {
+ return md
+ }
+
+ // Slow-path: construct a descriptor from the Go struct type (best-effort).
+ // Cache the MessageDescriptor early on so that we can resolve internal
+ // cyclic references.
+ md := &filedesc.Message{L2: new(filedesc.MessageL2)}
+ md.L0.FullName = aberrantDeriveMessageName(t, name)
+ md.L0.ParentFile = filedesc.SurrogateProto2
+ aberrantMessageDescCache[t] = md
+
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
+ return md
+ }
+
+ // Try to determine if the message is using proto3 by checking scalars.
+ for i := 0; i < t.Elem().NumField(); i++ {
+ f := t.Elem().Field(i)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ switch f.Type.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ md.L0.ParentFile = filedesc.SurrogateProto3
+ }
+ for _, s := range strings.Split(tag, ",") {
+ if s == "proto3" {
+ md.L0.ParentFile = filedesc.SurrogateProto3
+ }
+ }
+ }
+ }
+
+ // Obtain a list of oneof wrapper types.
+ var oneofWrappers []reflect.Type
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := t.MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ for _, v := range vs {
+ oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
+ }
+ }
+ }
+ }
+ }
+
+ // Obtain a list of the extension ranges.
+ if fn, ok := t.MethodByName("ExtensionRangeArray"); ok {
+ vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0]
+ for i := 0; i < vs.Len(); i++ {
+ v := vs.Index(i)
+ md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{
+ pref.FieldNumber(v.FieldByName("Start").Int()),
+ pref.FieldNumber(v.FieldByName("End").Int() + 1),
+ })
+ md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil)
+ }
+ }
+
+ // Derive the message fields by inspecting the struct fields.
+ for i := 0; i < t.Elem().NumField(); i++ {
+ f := t.Elem().Field(i)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ tagKey := f.Tag.Get("protobuf_key")
+ tagVal := f.Tag.Get("protobuf_val")
+ aberrantAppendField(md, f.Type, tag, tagKey, tagVal)
+ }
+ if tag := f.Tag.Get("protobuf_oneof"); tag != "" {
+ n := len(md.L2.Oneofs.List)
+ md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{})
+ od := &md.L2.Oneofs.List[n]
+ od.L0.FullName = md.FullName().Append(pref.Name(tag))
+ od.L0.ParentFile = md.L0.ParentFile
+ od.L0.Parent = md
+ od.L0.Index = n
+
+ for _, t := range oneofWrappers {
+ if t.Implements(f.Type) {
+ f := t.Elem().Field(0)
+ if tag := f.Tag.Get("protobuf"); tag != "" {
+ aberrantAppendField(md, f.Type, tag, "", "")
+ fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
+ fd.L1.ContainingOneof = od
+ od.L1.Fields.List = append(od.L1.Fields.List, fd)
+ }
+ }
+ }
+ }
+ }
+
+ return md
+}
+
+func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName {
+ if name.IsValid() {
+ return name
+ }
+ func() {
+ defer func() { recover() }() // swallow possible nil panics
+ if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok {
+ name = pref.FullName(m.XXX_MessageName())
+ }
+ }()
+ if name.IsValid() {
+ return name
+ }
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return AberrantDeriveFullName(t)
+}
+
+func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) {
+ t := goType
+ isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct
+ isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+ if isOptional || isRepeated {
+ t = t.Elem()
+ }
+ fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field)
+
+ // Append field descriptor to the message.
+ n := len(md.L2.Fields.List)
+ md.L2.Fields.List = append(md.L2.Fields.List, *fd)
+ fd = &md.L2.Fields.List[n]
+ fd.L0.FullName = md.FullName().Append(fd.Name())
+ fd.L0.ParentFile = md.L0.ParentFile
+ fd.L0.Parent = md
+ fd.L0.Index = n
+
+ if fd.L1.IsWeak || fd.L1.HasPacked {
+ fd.L1.Options = func() pref.ProtoMessage {
+ opts := descopts.Field.ProtoReflect().New()
+ if fd.L1.IsWeak {
+ opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
+ }
+ if fd.L1.HasPacked {
+ opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
+ }
+ return opts.Interface()
+ }
+ }
+
+ // Populate Enum and Message.
+ if fd.Enum() == nil && fd.Kind() == pref.EnumKind {
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.Enum:
+ fd.L1.Enum = v.Descriptor()
+ default:
+ fd.L1.Enum = LegacyLoadEnumDesc(t)
+ }
+ }
+ if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) {
+ switch v := reflect.Zero(t).Interface().(type) {
+ case pref.ProtoMessage:
+ fd.L1.Message = v.ProtoReflect().Descriptor()
+ case messageV1:
+ fd.L1.Message = LegacyLoadMessageDesc(t)
+ default:
+ if t.Kind() == reflect.Map {
+ n := len(md.L1.Messages.List)
+ md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)})
+ md2 := &md.L1.Messages.List[n]
+ md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name()))))
+ md2.L0.ParentFile = md.L0.ParentFile
+ md2.L0.Parent = md
+ md2.L0.Index = n
+
+ md2.L1.IsMapEntry = true
+ md2.L2.Options = func() pref.ProtoMessage {
+ opts := descopts.Message.ProtoReflect().New()
+ opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true))
+ return opts.Interface()
+ }
+
+ aberrantAppendField(md2, t.Key(), tagKey, "", "")
+ aberrantAppendField(md2, t.Elem(), tagVal, "", "")
+
+ fd.L1.Message = md2
+ break
+ }
+ fd.L1.Message = aberrantLoadMessageDescReentrant(t, "")
+ }
+ }
+}
+
+type placeholderEnumValues struct {
+ protoreflect.EnumValueDescriptors
+}
+
+func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor {
+ return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n)))
+}
+
+// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder.
+type legacyMarshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder.
+type legacyUnmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder.
+type legacyMerger interface {
+ Merge(protoiface.MessageV1)
+}
+
+var legacyProtoMethods = &piface.Methods{
+ Marshal: legacyMarshal,
+ Unmarshal: legacyUnmarshal,
+ Merge: legacyMerge,
+
+ // We have no way to tell whether the type's Marshal method
+ // supports deterministic serialization or not, but this
+ // preserves the v1 implementation's behavior of always
+ // calling Marshal methods when present.
+ Flags: piface.SupportMarshalDeterministic,
+}
+
+func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) {
+ v := in.Message.(unwrapper).protoUnwrap()
+ marshaler, ok := v.(legacyMarshaler)
+ if !ok {
+ return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v)
+ }
+ out, err := marshaler.Marshal()
+ if in.Buf != nil {
+ out = append(in.Buf, out...)
+ }
+ return piface.MarshalOutput{
+ Buf: out,
+ }, err
+}
+
+func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) {
+ v := in.Message.(unwrapper).protoUnwrap()
+ unmarshaler, ok := v.(legacyUnmarshaler)
+ if !ok {
+ return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v)
+ }
+ return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf)
+}
+
+func legacyMerge(in piface.MergeInput) piface.MergeOutput {
+ dstv := in.Destination.(unwrapper).protoUnwrap()
+ merger, ok := dstv.(legacyMerger)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ merger.Merge(Export{}.ProtoMessageV1Of(in.Source))
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+}
+
+// aberrantMessageType implements MessageType for all types other than pointer-to-struct.
+type aberrantMessageType struct {
+ t reflect.Type
+}
+
+func (mt aberrantMessageType) New() pref.Message {
+ return aberrantMessage{reflect.Zero(mt.t)}
+}
+func (mt aberrantMessageType) Zero() pref.Message {
+ return aberrantMessage{reflect.Zero(mt.t)}
+}
+func (mt aberrantMessageType) GoType() reflect.Type {
+ return mt.t
+}
+func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor {
+ return LegacyLoadMessageDesc(mt.t)
+}
+
+// aberrantMessage implements Message for all types other than pointer-to-struct.
+//
+// When the underlying type implements legacyMarshaler or legacyUnmarshaler,
+// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is
+// not much that can be done with values of this type.
+type aberrantMessage struct {
+ v reflect.Value
+}
+
+func (m aberrantMessage) ProtoReflect() pref.Message {
+ return m
+}
+
+func (m aberrantMessage) Descriptor() pref.MessageDescriptor {
+ return LegacyLoadMessageDesc(m.v.Type())
+}
+func (m aberrantMessage) Type() pref.MessageType {
+ return aberrantMessageType{m.v.Type()}
+}
+func (m aberrantMessage) New() pref.Message {
+ return aberrantMessage{reflect.Zero(m.v.Type())}
+}
+func (m aberrantMessage) Interface() pref.ProtoMessage {
+ return m
+}
+func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+}
+func (m aberrantMessage) Has(pref.FieldDescriptor) bool {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Clear(pref.FieldDescriptor) {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value {
+ panic("invalid field descriptor")
+}
+func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor {
+ panic("invalid oneof descriptor")
+}
+func (m aberrantMessage) GetUnknown() pref.RawFields {
+ return nil
+}
+func (m aberrantMessage) SetUnknown(pref.RawFields) {
+ // SetUnknown discards its input on messages which don't support unknown field storage.
+}
+func (m aberrantMessage) IsValid() bool {
+ // An invalid message is a read-only, empty message. Since we don't know anything
+ // about the alleged contents of this message, we can't say with confidence that
+ // it is invalid in this sense. Therefore, report it as valid.
+ return true
+}
+func (m aberrantMessage) ProtoMethods() *piface.Methods {
+ return legacyProtoMethods
+}
+func (m aberrantMessage) protoUnwrap() interface{} {
+ return m.v.Interface()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
new file mode 100644
index 00000000..cdc4267d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -0,0 +1,176 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+type mergeOptions struct{}
+
+func (o mergeOptions) Merge(dst, src proto.Message) {
+ proto.Merge(dst, src)
+}
+
+// merge is protoreflect.Methods.Merge.
+func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput {
+ dp, ok := mi.getPointer(in.Destination)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ sp, ok := mi.getPointer(in.Source)
+ if !ok {
+ return piface.MergeOutput{}
+ }
+ mi.mergePointer(dp, sp, mergeOptions{})
+ return piface.MergeOutput{Flags: piface.MergeComplete}
+}
+
+func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
+ mi.init()
+ if dst.IsNil() {
+ panic(fmt.Sprintf("invalid value: merging into nil message"))
+ }
+ if src.IsNil() {
+ return
+ }
+ for _, f := range mi.orderedCoderFields {
+ if f.funcs.merge == nil {
+ continue
+ }
+ sfptr := src.Apply(f.offset)
+ if f.isPointer && sfptr.Elem().IsNil() {
+ continue
+ }
+ f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+ }
+ if mi.extensionOffset.IsValid() {
+ sext := src.Apply(mi.extensionOffset).Extensions()
+ dext := dst.Apply(mi.extensionOffset).Extensions()
+ if *dext == nil {
+ *dext = make(map[int32]ExtensionField)
+ }
+ for num, sx := range *sext {
+ xt := sx.Type()
+ xi := getExtensionFieldInfo(xt)
+ if xi.funcs.merge == nil {
+ continue
+ }
+ dx := (*dext)[num]
+ var dv pref.Value
+ if dx.Type() == sx.Type() {
+ dv = dx.Value()
+ }
+ if !dv.IsValid() && xi.unmarshalNeedsValue {
+ dv = xt.New()
+ }
+ dv = xi.funcs.merge(dv, sx.Value(), opts)
+ dx.Set(sx.Type(), dv)
+ (*dext)[num] = dx
+ }
+ }
+ if mi.unknownOffset.IsValid() {
+ du := dst.Apply(mi.unknownOffset).Bytes()
+ su := src.Apply(mi.unknownOffset).Bytes()
+ if len(*su) > 0 {
+ *du = append(*du, *su...)
+ }
+ }
+}
+
+func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ return src
+}
+
+func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...))
+}
+
+func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ dstl.Append(srcl.Get(i))
+ }
+ return dst
+}
+
+func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ sb := srcl.Get(i).Bytes()
+ db := append(emptyBuf[:], sb...)
+ dstl.Append(pref.ValueOfBytes(db))
+ }
+ return dst
+}
+
+func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ dstl := dst.List()
+ srcl := src.List()
+ for i, llen := 0, srcl.Len(); i < llen; i++ {
+ sm := srcl.Get(i).Message()
+ dm := proto.Clone(sm.Interface()).ProtoReflect()
+ dstl.Append(pref.ValueOfMessage(dm))
+ }
+ return dst
+}
+
+func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value {
+ opts.Merge(dst.Message().Interface(), src.Message().Interface())
+ return dst
+}
+
+func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ if f.mi != nil {
+ if dst.Elem().IsNil() {
+ dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ f.mi.mergePointer(dst.Elem(), src.Elem(), opts)
+ } else {
+ dm := dst.AsValueOf(f.ft).Elem()
+ sm := src.AsValueOf(f.ft).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.New(f.ft.Elem()))
+ }
+ opts.Merge(asMessage(dm), asMessage(sm))
+ }
+}
+
+func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ for _, sp := range src.PointerSlice() {
+ dm := reflect.New(f.ft.Elem().Elem())
+ if f.mi != nil {
+ f.mi.mergePointer(pointerOfValue(dm), sp, opts)
+ } else {
+ opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem())))
+ }
+ dst.AppendPointerSlice(pointerOfValue(dm))
+ }
+}
+
+func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...)
+}
+
+func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Bytes()
+ if len(v) > 0 {
+ *dst.Bytes() = append(emptyBuf[:], v...)
+ }
+}
+
+func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.BytesSlice()
+ for _, v := range *src.BytesSlice() {
+ *ds = append(*ds, append(emptyBuf[:], v...))
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
new file mode 100644
index 00000000..8816c274
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go
@@ -0,0 +1,209 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import ()
+
+func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Bool() = *src.Bool()
+}
+
+func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Bool()
+ if v != false {
+ *dst.Bool() = v
+ }
+}
+
+func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.BoolPtr()
+ if p != nil {
+ v := *p
+ *dst.BoolPtr() = &v
+ }
+}
+
+func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.BoolSlice()
+ ss := src.BoolSlice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Int32() = *src.Int32()
+}
+
+func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Int32()
+ if v != 0 {
+ *dst.Int32() = v
+ }
+}
+
+func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Int32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Int32Ptr() = &v
+ }
+}
+
+func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Int32Slice()
+ ss := src.Int32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Uint32() = *src.Uint32()
+}
+
+func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Uint32()
+ if v != 0 {
+ *dst.Uint32() = v
+ }
+}
+
+func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Uint32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Uint32Ptr() = &v
+ }
+}
+
+func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Uint32Slice()
+ ss := src.Uint32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Int64() = *src.Int64()
+}
+
+func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Int64()
+ if v != 0 {
+ *dst.Int64() = v
+ }
+}
+
+func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Int64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Int64Ptr() = &v
+ }
+}
+
+func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Int64Slice()
+ ss := src.Int64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Uint64() = *src.Uint64()
+}
+
+func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Uint64()
+ if v != 0 {
+ *dst.Uint64() = v
+ }
+}
+
+func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Uint64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Uint64Ptr() = &v
+ }
+}
+
+func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Uint64Slice()
+ ss := src.Uint64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Float32() = *src.Float32()
+}
+
+func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Float32()
+ if v != 0 {
+ *dst.Float32() = v
+ }
+}
+
+func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Float32Ptr()
+ if p != nil {
+ v := *p
+ *dst.Float32Ptr() = &v
+ }
+}
+
+func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Float32Slice()
+ ss := src.Float32Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.Float64() = *src.Float64()
+}
+
+func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.Float64()
+ if v != 0 {
+ *dst.Float64() = v
+ }
+}
+
+func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.Float64Ptr()
+ if p != nil {
+ v := *p
+ *dst.Float64Ptr() = &v
+ }
+}
+
+func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.Float64Slice()
+ ss := src.Float64Slice()
+ *ds = append(*ds, *ss...)
+}
+
+func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ *dst.String() = *src.String()
+}
+
+func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ v := *src.String()
+ if v != "" {
+ *dst.String() = v
+ }
+}
+
+func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ p := *src.StringPtr()
+ if p != nil {
+ v := *p
+ *dst.StringPtr() = &v
+ }
+}
+
+func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+ ds := dst.StringSlice()
+ ss := src.StringSlice()
+ *ds = append(*ds, *ss...)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
new file mode 100644
index 00000000..7dd994bd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -0,0 +1,215 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/genname"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// MessageInfo provides protobuf related functionality for a given Go type
+// that represents a message. A given instance of MessageInfo is tied to
+// exactly one Go type, which must be a pointer to a struct type.
+//
+// The exported fields must be populated before any methods are called
+// and cannot be mutated after set.
+type MessageInfo struct {
+ // GoReflectType is the underlying message Go type and must be populated.
+ GoReflectType reflect.Type // pointer to struct
+
+ // Desc is the underlying message descriptor type and must be populated.
+ Desc pref.MessageDescriptor
+
+ // Exporter must be provided in a purego environment in order to provide
+ // access to unexported fields.
+ Exporter exporter
+
+ // OneofWrappers is list of pointers to oneof wrapper struct types.
+ OneofWrappers []interface{}
+
+ initMu sync.Mutex // protects all unexported fields
+ initDone uint32
+
+ reflectMessageInfo // for reflection implementation
+ coderMessageInfo // for fast-path method implementations
+}
+
+// exporter is a function that returns a reference to the ith field of v,
+// where v is a pointer to a struct. It returns nil if it does not support
+// exporting the requested field (e.g., already exported).
+type exporter func(v interface{}, i int) interface{}
+
+// getMessageInfo returns the MessageInfo for any message type that
+// is generated by our implementation of protoc-gen-go (for v2 and on).
+// If it is unable to obtain a MessageInfo, it returns nil.
+func getMessageInfo(mt reflect.Type) *MessageInfo {
+ m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage)
+ if !ok {
+ return nil
+ }
+ mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo })
+ if !ok {
+ return nil
+ }
+ return mr.ProtoMessageInfo()
+}
+
+func (mi *MessageInfo) init() {
+ // This function is called in the hot path. Inline the sync.Once logic,
+ // since allocating a closure for Once.Do is expensive.
+ // Keep init small to ensure that it can be inlined.
+ if atomic.LoadUint32(&mi.initDone) == 0 {
+ mi.initOnce()
+ }
+}
+
+func (mi *MessageInfo) initOnce() {
+ mi.initMu.Lock()
+ defer mi.initMu.Unlock()
+ if mi.initDone == 1 {
+ return
+ }
+
+ t := mi.GoReflectType
+ if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ panic(fmt.Sprintf("got %v, want *struct kind", t))
+ }
+ t = t.Elem()
+
+ si := mi.makeStructInfo(t)
+ mi.makeReflectFuncs(t, si)
+ mi.makeCoderMethods(t, si)
+
+ atomic.StoreUint32(&mi.initDone, 1)
+}
+
+// getPointer returns the pointer for a message, which should be of
+// the type of the MessageInfo. If the message is of a different type,
+// it returns ok==false.
+func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) {
+ switch m := m.(type) {
+ case *messageState:
+ return m.pointer(), m.messageInfo() == mi
+ case *messageReflectWrapper:
+ return m.pointer(), m.messageInfo() == mi
+ }
+ return pointer{}, false
+}
+
+type (
+ SizeCache = int32
+ WeakFields = map[int32]protoreflect.ProtoMessage
+ UnknownFields = []byte
+ ExtensionFields = map[int32]ExtensionField
+)
+
+var (
+ sizecacheType = reflect.TypeOf(SizeCache(0))
+ weakFieldsType = reflect.TypeOf(WeakFields(nil))
+ unknownFieldsType = reflect.TypeOf(UnknownFields(nil))
+ extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
+)
+
+type structInfo struct {
+ sizecacheOffset offset
+ weakOffset offset
+ unknownOffset offset
+ extensionOffset offset
+
+ fieldsByNumber map[pref.FieldNumber]reflect.StructField
+ oneofsByName map[pref.Name]reflect.StructField
+ oneofWrappersByType map[reflect.Type]pref.FieldNumber
+ oneofWrappersByNumber map[pref.FieldNumber]reflect.Type
+}
+
+func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
+ si := structInfo{
+ sizecacheOffset: invalidOffset,
+ weakOffset: invalidOffset,
+ unknownOffset: invalidOffset,
+ extensionOffset: invalidOffset,
+
+ fieldsByNumber: map[pref.FieldNumber]reflect.StructField{},
+ oneofsByName: map[pref.Name]reflect.StructField{},
+ oneofWrappersByType: map[reflect.Type]pref.FieldNumber{},
+ oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{},
+ }
+
+fieldLoop:
+ for i := 0; i < t.NumField(); i++ {
+ switch f := t.Field(i); f.Name {
+ case genname.SizeCache, genname.SizeCacheA:
+ if f.Type == sizecacheType {
+ si.sizecacheOffset = offsetOf(f, mi.Exporter)
+ }
+ case genname.WeakFields, genname.WeakFieldsA:
+ if f.Type == weakFieldsType {
+ si.weakOffset = offsetOf(f, mi.Exporter)
+ }
+ case genname.UnknownFields, genname.UnknownFieldsA:
+ if f.Type == unknownFieldsType {
+ si.unknownOffset = offsetOf(f, mi.Exporter)
+ }
+ case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB:
+ if f.Type == extensionFieldsType {
+ si.extensionOffset = offsetOf(f, mi.Exporter)
+ }
+ default:
+ for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
+ if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
+ n, _ := strconv.ParseUint(s, 10, 64)
+ si.fieldsByNumber[pref.FieldNumber(n)] = f
+ continue fieldLoop
+ }
+ }
+ if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 {
+ si.oneofsByName[pref.Name(s)] = f
+ continue fieldLoop
+ }
+ }
+ }
+
+ // Derive a mapping of oneof wrappers to fields.
+ oneofWrappers := mi.OneofWrappers
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := reflect.PtrTo(t).MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ oneofWrappers = vs
+ }
+ }
+ }
+ }
+ for _, v := range oneofWrappers {
+ tf := reflect.TypeOf(v).Elem()
+ f := tf.Field(0)
+ for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
+ if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
+ n, _ := strconv.ParseUint(s, 10, 64)
+ si.oneofWrappersByType[tf] = pref.FieldNumber(n)
+ si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf
+ break
+ }
+ }
+ }
+
+ return si
+}
+
+func (mi *MessageInfo) New() protoreflect.Message {
+ return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface())
+}
+func (mi *MessageInfo) Zero() protoreflect.Message {
+ return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface())
+}
+func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
new file mode 100644
index 00000000..0f4b8db7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -0,0 +1,364 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/internal/pragma"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type reflectMessageInfo struct {
+ fields map[pref.FieldNumber]*fieldInfo
+ oneofs map[pref.Name]*oneofInfo
+
+ // denseFields is a subset of fields where:
+ // 0 < fieldDesc.Number() < len(denseFields)
+ // It provides faster access to the fieldInfo, but may be incomplete.
+ denseFields []*fieldInfo
+
+ // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
+ rangeInfos []interface{} // either *fieldInfo or *oneofInfo
+
+ getUnknown func(pointer) pref.RawFields
+ setUnknown func(pointer, pref.RawFields)
+ extensionMap func(pointer) *extensionMap
+
+ nilMessage atomicNilMessage
+}
+
+// makeReflectFuncs generates the set of functions to support reflection.
+func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) {
+ mi.makeKnownFieldsFunc(si)
+ mi.makeUnknownFieldsFunc(t, si)
+ mi.makeExtensionFieldsFunc(t, si)
+}
+
+// makeKnownFieldsFunc generates functions for operations that can be performed
+// on each protobuf message field. It takes in a reflect.Type representing the
+// Go struct and matches message fields with struct fields.
+//
+// This code assumes that the struct is well-formed and panics if there are
+// any discrepancies.
+func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
+ mi.fields = map[pref.FieldNumber]*fieldInfo{}
+ md := mi.Desc
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ fs := si.fieldsByNumber[fd.Number()]
+ var fi fieldInfo
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+ case fd.IsMap():
+ fi = fieldInfoForMap(fd, fs, mi.Exporter)
+ case fd.IsList():
+ fi = fieldInfoForList(fd, fs, mi.Exporter)
+ case fd.IsWeak():
+ fi = fieldInfoForWeakMessage(fd, si.weakOffset)
+ case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind:
+ fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+ default:
+ fi = fieldInfoForScalar(fd, fs, mi.Exporter)
+ }
+ mi.fields[fd.Number()] = &fi
+ }
+
+ mi.oneofs = map[pref.Name]*oneofInfo{}
+ for i := 0; i < md.Oneofs().Len(); i++ {
+ od := md.Oneofs().Get(i)
+ mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter)
+ }
+
+ mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+ for i := 0; i < fds.Len(); i++ {
+ if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+ mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+ }
+ }
+
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() {
+ mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+ i += od.Fields().Len()
+ } else {
+ mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+ i++
+ }
+ }
+}
+
+func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) {
+ mi.getUnknown = func(pointer) pref.RawFields { return nil }
+ mi.setUnknown = func(pointer, pref.RawFields) { return }
+ if si.unknownOffset.IsValid() {
+ mi.getUnknown = func(p pointer) pref.RawFields {
+ if p.IsNil() {
+ return nil
+ }
+ rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
+ return pref.RawFields(*rv.Interface().(*[]byte))
+ }
+ mi.setUnknown = func(p pointer, b pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType)
+ *rv.Interface().(*[]byte) = []byte(b)
+ }
+ } else {
+ mi.getUnknown = func(pointer) pref.RawFields {
+ return nil
+ }
+ mi.setUnknown = func(p pointer, _ pref.RawFields) {
+ if p.IsNil() {
+ panic("invalid SetUnknown on nil Message")
+ }
+ }
+ }
+}
+
+func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) {
+ if si.extensionOffset.IsValid() {
+ mi.extensionMap = func(p pointer) *extensionMap {
+ if p.IsNil() {
+ return (*extensionMap)(nil)
+ }
+ v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType)
+ return (*extensionMap)(v.Interface().(*map[int32]ExtensionField))
+ }
+ } else {
+ mi.extensionMap = func(pointer) *extensionMap {
+ return (*extensionMap)(nil)
+ }
+ }
+}
+
+type extensionMap map[int32]ExtensionField
+
+func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
+ if m != nil {
+ for _, x := range *m {
+ xd := x.Type().TypeDescriptor()
+ v := x.Value()
+ if xd.IsList() && v.List().Len() == 0 {
+ continue
+ }
+ if !f(xd, v) {
+ return
+ }
+ }
+ }
+}
+func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) {
+ if m == nil {
+ return false
+ }
+ xd := xt.TypeDescriptor()
+ x, ok := (*m)[int32(xd.Number())]
+ if !ok {
+ return false
+ }
+ switch {
+ case xd.IsList():
+ return x.Value().List().Len() > 0
+ case xd.IsMap():
+ return x.Value().Map().Len() > 0
+ case xd.Message() != nil:
+ return x.Value().Message().IsValid()
+ }
+ return true
+}
+func (m *extensionMap) Clear(xt pref.ExtensionType) {
+ delete(*m, int32(xt.TypeDescriptor().Number()))
+}
+func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value {
+ xd := xt.TypeDescriptor()
+ if m != nil {
+ if x, ok := (*m)[int32(xd.Number())]; ok {
+ return x.Value()
+ }
+ }
+ return xt.Zero()
+}
+func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) {
+ xd := xt.TypeDescriptor()
+ isValid := true
+ switch {
+ case !xt.IsValidValue(v):
+ isValid = false
+ case xd.IsList():
+ isValid = v.List().IsValid()
+ case xd.IsMap():
+ isValid = v.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = v.Message().IsValid()
+ }
+ if !isValid {
+ panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName()))
+ }
+
+ if *m == nil {
+ *m = make(map[int32]ExtensionField)
+ }
+ var x ExtensionField
+ x.Set(xt, v)
+ (*m)[int32(xd.Number())] = x
+}
+func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value {
+ xd := xt.TypeDescriptor()
+ if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() {
+ panic("invalid Mutable on field with non-composite type")
+ }
+ if x, ok := (*m)[int32(xd.Number())]; ok {
+ return x.Value()
+ }
+ v := xt.New()
+ m.Set(xt, v)
+ return v
+}
+
+// MessageState is a data structure that is nested as the first field in a
+// concrete message. It provides a way to implement the ProtoReflect method
+// in an allocation-free way without needing to have a shadow Go type generated
+// for every message type. This technique only works using unsafe.
+//
+//
+// Example generated code:
+//
+// type M struct {
+// state protoimpl.MessageState
+//
+// Field1 int32
+// Field2 string
+// Field3 *BarMessage
+// ...
+// }
+//
+// func (m *M) ProtoReflect() protoreflect.Message {
+// mi := &file_fizz_buzz_proto_msgInfos[5]
+// if protoimpl.UnsafeEnabled && m != nil {
+// ms := protoimpl.X.MessageStateOf(Pointer(m))
+// if ms.LoadMessageInfo() == nil {
+// ms.StoreMessageInfo(mi)
+// }
+// return ms
+// }
+// return mi.MessageOf(m)
+// }
+//
+// The MessageState type holds a *MessageInfo, which must be atomically set to
+// the message info associated with a given message instance.
+// By unsafely converting a *M into a *MessageState, the MessageState object
+// has access to all the information needed to implement protobuf reflection.
+// It has access to the message info as its first field, and a pointer to the
+// MessageState is identical to a pointer to the concrete message value.
+//
+//
+// Requirements:
+// • The type M must implement protoreflect.ProtoMessage.
+// • The address of m must not be nil.
+// • The address of m and the address of m.state must be equal,
+// even though they are different Go types.
+type MessageState struct {
+ pragma.NoUnkeyedLiterals
+ pragma.DoNotCompare
+ pragma.DoNotCopy
+
+ atomicMessageInfo *MessageInfo
+}
+
+type messageState MessageState
+
+var (
+ _ pref.Message = (*messageState)(nil)
+ _ unwrapper = (*messageState)(nil)
+)
+
+// messageDataType is a tuple of a pointer to the message data and
+// a pointer to the message type. It is a generalized way of providing a
+// reflective view over a message instance. The disadvantage of this approach
+// is the need to allocate this tuple of 16B.
+type messageDataType struct {
+ p pointer
+ mi *MessageInfo
+}
+
+type (
+ messageReflectWrapper messageDataType
+ messageIfaceWrapper messageDataType
+)
+
+var (
+ _ pref.Message = (*messageReflectWrapper)(nil)
+ _ unwrapper = (*messageReflectWrapper)(nil)
+ _ pref.ProtoMessage = (*messageIfaceWrapper)(nil)
+ _ unwrapper = (*messageIfaceWrapper)(nil)
+)
+
+// MessageOf returns a reflective view over a message. The input must be a
+// pointer to a named Go struct. If the provided type has a ProtoReflect method,
+// it must be implemented by calling this method.
+func (mi *MessageInfo) MessageOf(m interface{}) pref.Message {
+ // TODO: Switch the input to be an opaque Pointer.
+ if reflect.TypeOf(m) != mi.GoReflectType {
+ panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
+ }
+ p := pointerOfIface(m)
+ if p.IsNil() {
+ return mi.nilMessage.Init(mi)
+ }
+ return &messageReflectWrapper{p, mi}
+}
+
+func (m *messageReflectWrapper) pointer() pointer { return m.p }
+func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi }
+
+func (m *messageIfaceWrapper) ProtoReflect() pref.Message {
+ return (*messageReflectWrapper)(m)
+}
+func (m *messageIfaceWrapper) protoUnwrap() interface{} {
+ return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
+}
+
+// checkField verifies that the provided field descriptor is valid.
+// Exactly one of the returned values is populated.
+func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) {
+ var fi *fieldInfo
+ if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) {
+ fi = mi.denseFields[n]
+ } else {
+ fi = mi.fields[n]
+ }
+ if fi != nil {
+ if fi.fieldDesc != fd {
+ if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want {
+ panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want))
+ }
+ panic(fmt.Sprintf("mismatching field: %v", fd.FullName()))
+ }
+ return fi, nil
+ }
+
+ if fd.IsExtension() {
+ if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want {
+ // TODO: Should this be exact containing message descriptor match?
+ panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want))
+ }
+ if !mi.Desc.ExtensionRanges().Has(fd.Number()) {
+ panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName()))
+ }
+ xtd, ok := fd.(pref.ExtensionTypeDescriptor)
+ if !ok {
+ panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
+ }
+ return nil, xtd.Type()
+ }
+ panic(fmt.Sprintf("field %v is invalid", fd.FullName()))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
new file mode 100644
index 00000000..23124a86
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -0,0 +1,466 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sync"
+
+ "google.golang.org/protobuf/internal/flags"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+type fieldInfo struct {
+ fieldDesc pref.FieldDescriptor
+
+ // These fields are used for protobuf reflection support.
+ has func(pointer) bool
+ clear func(pointer)
+ get func(pointer) pref.Value
+ set func(pointer, pref.Value)
+ mutable func(pointer) pref.Value
+ newMessage func() pref.Message
+ newField func() pref.Value
+}
+
+func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Interface {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft))
+ }
+ if ot.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot))
+ }
+ if !reflect.PtrTo(ot).Implements(ft) {
+ panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft))
+ }
+ conv := NewConverter(ot.Field(0).Type, fd)
+ isMessage := fd.Message() != nil
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ // NOTE: The logic below intentionally assumes that oneof fields are
+ // well-formatted. That is, the oneof interface never contains a
+ // typed nil pointer to one of the wrapper structs.
+
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ return false
+ }
+ return true
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot {
+ // NOTE: We intentionally don't check for rv.Elem().IsNil()
+ // so that (*OneofWrapperType)(nil) gets cleared to nil.
+ return
+ }
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ return conv.Zero()
+ }
+ rv = rv.Elem().Elem().Field(0)
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ rv.Set(reflect.New(ot))
+ }
+ rv = rv.Elem().Elem().Field(0)
+ rv.Set(conv.GoValueOf(v))
+ },
+ mutable: func(p pointer) pref.Value {
+ if !isMessage {
+ panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName()))
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() {
+ rv.Set(reflect.New(ot))
+ }
+ rv = rv.Elem().Elem().Field(0)
+ if rv.IsNil() {
+ rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message())))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newMessage: func() pref.Message {
+ return conv.New().Message()
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Map {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft))
+ }
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName()))
+ }
+ rv.Set(pv)
+ },
+ mutable: func(p pointer) pref.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(fs.Type))
+ }
+ return conv.PBValueOf(v)
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft))
+ }
+ conv := NewConverter(reflect.PtrTo(ft), fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName()))
+ }
+ rv.Set(pv.Elem())
+ },
+ mutable: func(p pointer) pref.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ return conv.PBValueOf(v)
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+var (
+ nilBytes = reflect.ValueOf([]byte(nil))
+ emptyBytes = reflect.ValueOf([]byte{})
+)
+
+func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ nullable := fd.HasPresence()
+ isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+ if nullable {
+ if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft))
+ }
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ }
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable {
+ return !rv.IsNil()
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return rv.Bool()
+ case reflect.Int32, reflect.Int64:
+ return rv.Int() != 0
+ case reflect.Uint32, reflect.Uint64:
+ return rv.Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() != 0 || math.Signbit(rv.Float())
+ case reflect.String, reflect.Slice:
+ return rv.Len() > 0
+ default:
+ panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen
+ }
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable {
+ if rv.IsNil() {
+ return conv.Zero()
+ }
+ if rv.Kind() == reflect.Ptr {
+ rv = rv.Elem()
+ }
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if nullable && rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(ft))
+ }
+ rv = rv.Elem()
+ }
+ rv.Set(conv.GoValueOf(v))
+ if isBytes && rv.Len() == 0 {
+ if nullable {
+ rv.Set(emptyBytes) // preserve presence
+ } else {
+ rv.Set(nilBytes) // do not preserve presence
+ }
+ }
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo {
+ if !flags.ProtoLegacy {
+ panic("no support for proto1 weak fields")
+ }
+
+ var once sync.Once
+ var messageType pref.MessageType
+ lazyInit := func() {
+ once.Do(func() {
+ messageName := fd.Message().FullName()
+ messageType, _ = preg.GlobalTypes.FindMessageByName(messageName)
+ if messageType == nil {
+ panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
+ }
+ })
+ }
+
+ num := fd.Number()
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ _, ok := p.Apply(weakOffset).WeakFields().get(num)
+ return ok
+ },
+ clear: func(p pointer) {
+ p.Apply(weakOffset).WeakFields().clear(num)
+ },
+ get: func(p pointer) pref.Value {
+ lazyInit()
+ if p.IsNil() {
+ return pref.ValueOfMessage(messageType.Zero())
+ }
+ m, ok := p.Apply(weakOffset).WeakFields().get(num)
+ if !ok {
+ return pref.ValueOfMessage(messageType.Zero())
+ }
+ return pref.ValueOfMessage(m.ProtoReflect())
+ },
+ set: func(p pointer, v pref.Value) {
+ lazyInit()
+ m := v.Message()
+ if m.Descriptor() != messageType.Descriptor() {
+ if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
+ }
+ panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
+ }
+ p.Apply(weakOffset).WeakFields().set(num, m.Interface())
+ },
+ mutable: func(p pointer) pref.Value {
+ lazyInit()
+ fs := p.Apply(weakOffset).WeakFields()
+ m, ok := fs.get(num)
+ if !ok {
+ m = messageType.New().Interface()
+ fs.set(num, m)
+ }
+ return pref.ValueOfMessage(m.ProtoReflect())
+ },
+ newMessage: func() pref.Message {
+ lazyInit()
+ return messageType.New()
+ },
+ newField: func() pref.Value {
+ lazyInit()
+ return pref.ValueOfMessage(messageType.New())
+ },
+ }
+}
+
+func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
+ ft := fs.Type
+ conv := NewConverter(ft, fd)
+
+ // TODO: Implement unsafe fast path?
+ fieldOffset := offsetOf(fs, x)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return !rv.IsNil()
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) pref.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v pref.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(conv.GoValueOf(v))
+ if rv.IsNil() {
+ panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName()))
+ }
+ },
+ mutable: func(p pointer) pref.Value {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ rv.Set(conv.GoValueOf(conv.New()))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newMessage: func() pref.Message {
+ return conv.New().Message()
+ },
+ newField: func() pref.Value {
+ return conv.New()
+ },
+ }
+}
+
+type oneofInfo struct {
+ oneofDesc pref.OneofDescriptor
+ which func(pointer) pref.FieldNumber
+}
+
+func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+ oi := &oneofInfo{oneofDesc: od}
+ if od.IsSynthetic() {
+ fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() { // valid on either *T or []byte
+ return 0
+ }
+ return od.Fields().Get(0).Number()
+ }
+ } else {
+ fs := si.oneofsByName[od.Name()]
+ fieldOffset := offsetOf(fs, x)
+ oi.which = func(p pointer) pref.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ return 0
+ }
+ rv = rv.Elem()
+ if rv.IsNil() {
+ return 0
+ }
+ return si.oneofWrappersByType[rv.Type().Elem()]
+ }
+ }
+ return oi
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
new file mode 100644
index 00000000..741d6e5b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
@@ -0,0 +1,249 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (m *messageState) Descriptor() protoreflect.MessageDescriptor {
+ return m.messageInfo().Desc
+}
+func (m *messageState) Type() protoreflect.MessageType {
+ return m.messageInfo()
+}
+func (m *messageState) New() protoreflect.Message {
+ return m.messageInfo().New()
+}
+func (m *messageState) Interface() protoreflect.ProtoMessage {
+ return m.protoUnwrap().(protoreflect.ProtoMessage)
+}
+func (m *messageState) protoUnwrap() interface{} {
+ return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
+}
+func (m *messageState) ProtoMethods() *protoiface.Methods {
+ m.messageInfo().init()
+ return &m.messageInfo().methods
+}
+
+// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve a v2 MessageInfo struct.
+//
+// WARNING: This method is exempt from the compatibility promise and
+// may be removed in the future without warning.
+func (m *messageState) ProtoMessageInfo() *MessageInfo {
+ return m.messageInfo()
+}
+
+func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ m.messageInfo().init()
+ for _, ri := range m.messageInfo().rangeInfos {
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ if ri.has(m.pointer()) {
+ if !f(ri.fieldDesc, ri.get(m.pointer())) {
+ return
+ }
+ }
+ case *oneofInfo:
+ if n := ri.which(m.pointer()); n > 0 {
+ fi := m.messageInfo().fields[n]
+ if !f(fi.fieldDesc, fi.get(m.pointer())) {
+ return
+ }
+ }
+ }
+ }
+ m.messageInfo().extensionMap(m.pointer()).Range(f)
+}
+func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.has(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ }
+}
+func (m *messageState) Clear(fd protoreflect.FieldDescriptor) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.clear(m.pointer())
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ }
+}
+func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.get(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ }
+}
+func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.set(m.pointer(), v)
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ }
+}
+func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.mutable(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ }
+}
+func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.newField()
+ } else {
+ return xt.New()
+ }
+}
+func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ m.messageInfo().init()
+ if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ return od.Fields().ByNumber(oi.which(m.pointer()))
+ }
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
+}
+func (m *messageState) GetUnknown() protoreflect.RawFields {
+ m.messageInfo().init()
+ return m.messageInfo().getUnknown(m.pointer())
+}
+func (m *messageState) SetUnknown(b protoreflect.RawFields) {
+ m.messageInfo().init()
+ m.messageInfo().setUnknown(m.pointer(), b)
+}
+func (m *messageState) IsValid() bool {
+ return !m.pointer().IsNil()
+}
+
+func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor {
+ return m.messageInfo().Desc
+}
+func (m *messageReflectWrapper) Type() protoreflect.MessageType {
+ return m.messageInfo()
+}
+func (m *messageReflectWrapper) New() protoreflect.Message {
+ return m.messageInfo().New()
+}
+func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage {
+ if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok {
+ return m
+ }
+ return (*messageIfaceWrapper)(m)
+}
+func (m *messageReflectWrapper) protoUnwrap() interface{} {
+ return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
+}
+func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {
+ m.messageInfo().init()
+ return &m.messageInfo().methods
+}
+
+// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
+// to be able to retrieve a v2 MessageInfo struct.
+//
+// WARNING: This method is exempt from the compatibility promise and
+// may be removed in the future without warning.
+func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo {
+ return m.messageInfo()
+}
+
+func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ m.messageInfo().init()
+ for _, ri := range m.messageInfo().rangeInfos {
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ if ri.has(m.pointer()) {
+ if !f(ri.fieldDesc, ri.get(m.pointer())) {
+ return
+ }
+ }
+ case *oneofInfo:
+ if n := ri.which(m.pointer()); n > 0 {
+ fi := m.messageInfo().fields[n]
+ if !f(fi.fieldDesc, fi.get(m.pointer())) {
+ return
+ }
+ }
+ }
+ }
+ m.messageInfo().extensionMap(m.pointer()).Range(f)
+}
+func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.has(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ }
+}
+func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.clear(m.pointer())
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ }
+}
+func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.get(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ }
+}
+func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ fi.set(m.pointer(), v)
+ } else {
+ m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ }
+}
+func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.mutable(m.pointer())
+ } else {
+ return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ }
+}
+func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.messageInfo().init()
+ if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ return fi.newField()
+ } else {
+ return xt.New()
+ }
+}
+func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ m.messageInfo().init()
+ if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ return od.Fields().ByNumber(oi.which(m.pointer()))
+ }
+ panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
+}
+func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields {
+ m.messageInfo().init()
+ return m.messageInfo().getUnknown(m.pointer())
+}
+func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) {
+ m.messageInfo().init()
+ m.messageInfo().setUnknown(m.pointer(), b)
+}
+func (m *messageReflectWrapper) IsValid() bool {
+ return !m.pointer().IsNil()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
new file mode 100644
index 00000000..67b4ede6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -0,0 +1,177 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+const UnsafeEnabled = false
+
+// Pointer is an opaque pointer type.
+type Pointer interface{}
+
+// offset represents the offset to a struct field, accessible from a pointer.
+// The offset is the field index into a struct.
+type offset struct {
+ index int
+ export exporter
+}
+
+// offsetOf returns a field offset for the struct field.
+func offsetOf(f reflect.StructField, x exporter) offset {
+ if len(f.Index) != 1 {
+ panic("embedded structs are not supported")
+ }
+ if f.PkgPath == "" {
+ return offset{index: f.Index[0]} // field is already exported
+ }
+ if x == nil {
+ panic("exporter must be provided for unexported field")
+ }
+ return offset{index: f.Index[0], export: x}
+}
+
+// IsValid reports whether the offset is valid.
+func (f offset) IsValid() bool { return f.index >= 0 }
+
+// invalidOffset is an invalid field offset.
+var invalidOffset = offset{index: -1}
+
+// zeroOffset is a noop when calling pointer.Apply.
+var zeroOffset = offset{index: 0}
+
+// pointer is an abstract representation of a pointer to a struct or field.
+type pointer struct{ v reflect.Value }
+
+// pointerOf returns p as a pointer.
+func pointerOf(p Pointer) pointer {
+ return pointerOfIface(p)
+}
+
+// pointerOfValue returns v as a pointer.
+func pointerOfValue(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// pointerOfIface returns the pointer portion of an interface.
+func pointerOfIface(v interface{}) pointer {
+ return pointer{v: reflect.ValueOf(v)}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p pointer) IsNil() bool {
+ return p.v.IsNil()
+}
+
+// Apply adds an offset to the pointer to derive a new pointer
+// to a specified field. The current pointer must be pointing at a struct.
+func (p pointer) Apply(f offset) pointer {
+ if f.export != nil {
+ if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
+ return pointer{v: v}
+ }
+ }
+ return pointer{v: p.v.Elem().Field(f.index).Addr()}
+}
+
+// AsValueOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
+func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
+ if got := p.v.Type().Elem(); got != t {
+ panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
+ }
+ return p.v
+}
+
+// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to p.AsValueOf(t).Interface()
+func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+ return p.AsValueOf(t).Interface()
+}
+
+func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
+func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
+func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
+func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
+func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
+func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
+func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
+func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
+func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
+func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
+func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
+func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
+func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
+func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
+func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
+func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
+func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
+func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
+func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
+func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
+func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
+func (p pointer) String() *string { return p.v.Interface().(*string) }
+func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
+func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
+func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
+func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
+func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
+func (p pointer) Extensions() *map[int32]ExtensionField {
+ return p.v.Interface().(*map[int32]ExtensionField)
+}
+
+func (p pointer) Elem() pointer {
+ return pointer{v: p.v.Elem()}
+}
+
+// PointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) PointerSlice() []pointer {
+ // TODO: reconsider this
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// AppendPointerSlice appends v to p, which must be a []*T.
+func (p pointer) AppendPointerSlice(v pointer) {
+ sp := p.v.Elem()
+ sp.Set(reflect.Append(sp, v.v))
+}
+
+// SetPointer sets *p to v.
+func (p pointer) SetPointer(v pointer) {
+ p.v.Elem().Set(v.v)
+}
+
+func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
+func (ms *messageState) pointer() pointer { panic("not supported") }
+func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
+func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
+func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
+
+type atomicNilMessage struct {
+ once sync.Once
+ m messageReflectWrapper
+}
+
+func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
+ m.once.Do(func() {
+ m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
+ m.m.mi = mi
+ })
+ return &m.m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
new file mode 100644
index 00000000..088aa85d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -0,0 +1,173 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package impl
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const UnsafeEnabled = true
+
+// Pointer is an opaque pointer type.
+type Pointer unsafe.Pointer
+
+// offset represents the offset to a struct field, accessible from a pointer.
+// The offset is the byte offset to the field from the start of the struct.
+type offset uintptr
+
+// offsetOf returns a field offset for the struct field.
+func offsetOf(f reflect.StructField, x exporter) offset {
+ return offset(f.Offset)
+}
+
+// IsValid reports whether the offset is valid.
+func (f offset) IsValid() bool { return f != invalidOffset }
+
+// invalidOffset is an invalid field offset.
+var invalidOffset = ^offset(0)
+
+// zeroOffset is a noop when calling pointer.Apply.
+var zeroOffset = offset(0)
+
+// pointer is a pointer to a message struct or field.
+type pointer struct{ p unsafe.Pointer }
+
+// pointerOf returns p as a pointer.
+func pointerOf(p Pointer) pointer {
+ return pointer{p: unsafe.Pointer(p)}
+}
+
+// pointerOfValue returns v as a pointer.
+func pointerOfValue(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// pointerOfIface returns the pointer portion of an interface.
+func pointerOfIface(v interface{}) pointer {
+ type ifaceHeader struct {
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+ return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Apply adds an offset to the pointer to derive a new pointer
+// to a specified field. The pointer must be valid and pointing at a struct.
+func (p pointer) Apply(f offset) pointer {
+ if p.IsNil() {
+ panic("invalid nil pointer")
+ }
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+// AsValueOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
+func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to p.AsValueOf(t).Interface()
+func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+ // TODO: Use tricky unsafe magic to directly create ifaceHeader.
+ return p.AsValueOf(t).Interface()
+}
+
+func (p pointer) Bool() *bool { return (*bool)(p.p) }
+func (p pointer) BoolPtr() **bool { return (**bool)(p.p) }
+func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) }
+func (p pointer) Int32() *int32 { return (*int32)(p.p) }
+func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) }
+func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) }
+func (p pointer) Int64() *int64 { return (*int64)(p.p) }
+func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) }
+func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) }
+func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) }
+func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) }
+func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) }
+func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) }
+func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) }
+func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) }
+func (p pointer) Float32() *float32 { return (*float32)(p.p) }
+func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) }
+func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) }
+func (p pointer) Float64() *float64 { return (*float64)(p.p) }
+func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) }
+func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) }
+func (p pointer) String() *string { return (*string)(p.p) }
+func (p pointer) StringPtr() **string { return (**string)(p.p) }
+func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) }
+func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
+func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
+func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
+func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+
+func (p pointer) Elem() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// PointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) PointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// AppendPointerSlice appends v to p, which must be a []*T.
+func (p pointer) AppendPointerSlice(v pointer) {
+ *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v)
+}
+
+// SetPointer sets *p to v.
+func (p pointer) SetPointer(v pointer) {
+ *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p)
+}
+
+// Static check that MessageState does not exceed the size of a pointer.
+const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{}))
+
+func (Export) MessageStateOf(p Pointer) *messageState {
+ // Super-tricky - see documentation on MessageState.
+ return (*messageState)(unsafe.Pointer(p))
+}
+func (ms *messageState) pointer() pointer {
+ // Super-tricky - see documentation on MessageState.
+ return pointer{p: unsafe.Pointer(ms)}
+}
+func (ms *messageState) messageInfo() *MessageInfo {
+ mi := ms.LoadMessageInfo()
+ if mi == nil {
+ panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct")
+ }
+ return mi
+}
+func (ms *messageState) LoadMessageInfo() *MessageInfo {
+ return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo))))
+}
+func (ms *messageState) StoreMessageInfo(mi *MessageInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi))
+}
+
+type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper
+
+func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
+ if p := atomic.LoadPointer(&m.p); p != nil {
+ return (*messageReflectWrapper)(p)
+ }
+ w := &messageReflectWrapper{mi: mi}
+ atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w))
+ return (*messageReflectWrapper)(atomic.LoadPointer(&m.p))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
new file mode 100644
index 00000000..57de9cc8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -0,0 +1,575 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "math/bits"
+ "reflect"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/strs"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// ValidationStatus is the result of validating the wire-format encoding of a message.
+type ValidationStatus int
+
+const (
+ // ValidationUnknown indicates that unmarshaling the message might succeed or fail.
+ // The validator was unable to render a judgement.
+ //
+ // The only causes of this status are an aberrant message type appearing somewhere
+ // in the message or a failure in the extension resolver.
+ ValidationUnknown ValidationStatus = iota + 1
+
+ // ValidationInvalid indicates that unmarshaling the message will fail.
+ ValidationInvalid
+
+ // ValidationValid indicates that unmarshaling the message will succeed.
+ ValidationValid
+)
+
+func (v ValidationStatus) String() string {
+ switch v {
+ case ValidationUnknown:
+ return "ValidationUnknown"
+ case ValidationInvalid:
+ return "ValidationInvalid"
+ case ValidationValid:
+ return "ValidationValid"
+ default:
+ return fmt.Sprintf("ValidationStatus(%d)", int(v))
+ }
+}
+
+// Validate determines whether the contents of the buffer are a valid wire encoding
+// of the message type.
+//
+// This function is exposed for testing.
+func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) {
+ mi, ok := mt.(*MessageInfo)
+ if !ok {
+ return out, ValidationUnknown
+ }
+ if in.Resolver == nil {
+ in.Resolver = preg.GlobalTypes
+ }
+ o, st := mi.validate(in.Buf, 0, unmarshalOptions{
+ flags: in.Flags,
+ resolver: in.Resolver,
+ })
+ if o.initialized {
+ out.Flags |= piface.UnmarshalInitialized
+ }
+ return out, st
+}
+
+type validationInfo struct {
+ mi *MessageInfo
+ typ validationType
+ keyType, valType validationType
+
+ // For non-required fields, requiredBit is 0.
+ //
+ // For required fields, requiredBit's nth bit is set, where n is a
+ // unique index in the range [0, MessageInfo.numRequiredFields).
+ //
+ // If there are more than 64 required fields, requiredBit is 0.
+ requiredBit uint64
+}
+
+type validationType uint8
+
+const (
+ validationTypeOther validationType = iota
+ validationTypeMessage
+ validationTypeGroup
+ validationTypeMap
+ validationTypeRepeatedVarint
+ validationTypeRepeatedFixed32
+ validationTypeRepeatedFixed64
+ validationTypeVarint
+ validationTypeFixed32
+ validationTypeFixed64
+ validationTypeBytes
+ validationTypeUTF8String
+ validationTypeMessageSetItem
+)
+
+func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+ var vi validationInfo
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
+ vi.mi = getMessageInfo(ot.Field(0).Type)
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok {
+ vi.mi = getMessageInfo(ot.Field(0).Type)
+ }
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ }
+ default:
+ vi = newValidationInfo(fd, ft)
+ }
+ if fd.Cardinality() == pref.Required {
+ // Avoid overflow. The required field check is done with a 64-bit mask, with
+ // any message containing more than 64 required fields always reported as
+ // potentially uninitialized, so it is not important to get a precise count
+ // of the required fields past 64.
+ if mi.numRequiredFields < math.MaxUint8 {
+ mi.numRequiredFields++
+ vi.requiredBit = 1 << (mi.numRequiredFields - 1)
+ }
+ }
+ return vi
+}
+
+func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo {
+ var vi validationInfo
+ switch {
+ case fd.IsList():
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if ft.Kind() == reflect.Slice {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ if ft.Kind() == reflect.Slice {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.StringKind:
+ vi.typ = validationTypeBytes
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ default:
+ switch wireTypes[fd.Kind()] {
+ case protowire.VarintType:
+ vi.typ = validationTypeRepeatedVarint
+ case protowire.Fixed32Type:
+ vi.typ = validationTypeRepeatedFixed32
+ case protowire.Fixed64Type:
+ vi.typ = validationTypeRepeatedFixed64
+ }
+ }
+ case fd.IsMap():
+ vi.typ = validationTypeMap
+ switch fd.MapKey().Kind() {
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.keyType = validationTypeUTF8String
+ }
+ }
+ switch fd.MapValue().Kind() {
+ case pref.MessageKind:
+ vi.valType = validationTypeMessage
+ if ft.Kind() == reflect.Map {
+ vi.mi = getMessageInfo(ft.Elem())
+ }
+ case pref.StringKind:
+ if strs.EnforceUTF8(fd) {
+ vi.valType = validationTypeUTF8String
+ }
+ }
+ default:
+ switch fd.Kind() {
+ case pref.MessageKind:
+ vi.typ = validationTypeMessage
+ if !fd.IsWeak() {
+ vi.mi = getMessageInfo(ft)
+ }
+ case pref.GroupKind:
+ vi.typ = validationTypeGroup
+ vi.mi = getMessageInfo(ft)
+ case pref.StringKind:
+ vi.typ = validationTypeBytes
+ if strs.EnforceUTF8(fd) {
+ vi.typ = validationTypeUTF8String
+ }
+ default:
+ switch wireTypes[fd.Kind()] {
+ case protowire.VarintType:
+ vi.typ = validationTypeVarint
+ case protowire.Fixed32Type:
+ vi.typ = validationTypeFixed32
+ case protowire.Fixed64Type:
+ vi.typ = validationTypeFixed64
+ case protowire.BytesType:
+ vi.typ = validationTypeBytes
+ }
+ }
+ }
+ return vi
+}
+
+func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) {
+ mi.init()
+ type validationState struct {
+ typ validationType
+ keyType, valType validationType
+ endGroup protowire.Number
+ mi *MessageInfo
+ tail []byte
+ requiredMask uint64
+ }
+
+ // Pre-allocate some slots to avoid repeated slice reallocation.
+ states := make([]validationState, 0, 16)
+ states = append(states, validationState{
+ typ: validationTypeMessage,
+ mi: mi,
+ })
+ if groupTag > 0 {
+ states[0].typ = validationTypeGroup
+ states[0].endGroup = groupTag
+ }
+ initialized := true
+ start := len(b)
+State:
+ for len(states) > 0 {
+ st := &states[len(states)-1]
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, ValidationInvalid
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if st.endGroup == num {
+ goto PopState
+ }
+ return out, ValidationInvalid
+ }
+ var vi validationInfo
+ switch {
+ case st.typ == validationTypeMap:
+ switch num {
+ case 1:
+ vi.typ = st.keyType
+ case 2:
+ vi.typ = st.valType
+ vi.mi = st.mi
+ vi.requiredBit = 1
+ }
+ case flags.ProtoLegacy && st.mi.isMessageSet:
+ switch num {
+ case messageset.FieldItem:
+ vi.typ = validationTypeMessageSetItem
+ }
+ default:
+ var f *coderFieldInfo
+ if int(num) < len(st.mi.denseCoderFields) {
+ f = st.mi.denseCoderFields[num]
+ } else {
+ f = st.mi.coderFields[num]
+ }
+ if f != nil {
+ vi = f.validation
+ if vi.typ == validationTypeMessage && vi.mi == nil {
+ // Probable weak field.
+ //
+ // TODO: Consider storing the results of this lookup somewhere
+ // rather than recomputing it on every validation.
+ fd := st.mi.Desc.Fields().ByNumber(num)
+ if fd == nil || !fd.IsWeak() {
+ break
+ }
+ messageName := fd.Message().FullName()
+ messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+ switch err {
+ case nil:
+ vi.mi, _ = messageType.(*MessageInfo)
+ case preg.NotFound:
+ vi.typ = validationTypeBytes
+ default:
+ return out, ValidationUnknown
+ }
+ }
+ break
+ }
+ // Possible extension field.
+ //
+ // TODO: We should return ValidationUnknown when:
+ // 1. The resolver is not frozen. (More extensions may be added to it.)
+ // 2. The resolver returns preg.NotFound.
+ // In this case, a type added to the resolver in the future could cause
+ // unmarshaling to begin failing. Supporting this requires some way to
+ // determine if the resolver is frozen.
+ xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num)
+ if err != nil && err != preg.NotFound {
+ return out, ValidationUnknown
+ }
+ if err == nil {
+ vi = getExtensionFieldInfo(xt).validation
+ }
+ }
+ if vi.requiredBit != 0 {
+ // Check that the field has a compatible wire type.
+ // We only need to consider non-repeated field types,
+ // since repeated fields (and maps) can never be required.
+ ok := false
+ switch vi.typ {
+ case validationTypeVarint:
+ ok = wtyp == protowire.VarintType
+ case validationTypeFixed32:
+ ok = wtyp == protowire.Fixed32Type
+ case validationTypeFixed64:
+ ok = wtyp == protowire.Fixed64Type
+ case validationTypeBytes, validationTypeUTF8String, validationTypeMessage:
+ ok = wtyp == protowire.BytesType
+ case validationTypeGroup:
+ ok = wtyp == protowire.StartGroupType
+ }
+ if ok {
+ st.requiredMask |= vi.requiredBit
+ }
+ }
+
+ switch wtyp {
+ case protowire.VarintType:
+ if len(b) >= 10 {
+ switch {
+ case b[0] < 0x80:
+ b = b[1:]
+ case b[1] < 0x80:
+ b = b[2:]
+ case b[2] < 0x80:
+ b = b[3:]
+ case b[3] < 0x80:
+ b = b[4:]
+ case b[4] < 0x80:
+ b = b[5:]
+ case b[5] < 0x80:
+ b = b[6:]
+ case b[6] < 0x80:
+ b = b[7:]
+ case b[7] < 0x80:
+ b = b[8:]
+ case b[8] < 0x80:
+ b = b[9:]
+ case b[9] < 0x80 && b[9] < 2:
+ b = b[10:]
+ default:
+ return out, ValidationInvalid
+ }
+ } else {
+ switch {
+ case len(b) > 0 && b[0] < 0x80:
+ b = b[1:]
+ case len(b) > 1 && b[1] < 0x80:
+ b = b[2:]
+ case len(b) > 2 && b[2] < 0x80:
+ b = b[3:]
+ case len(b) > 3 && b[3] < 0x80:
+ b = b[4:]
+ case len(b) > 4 && b[4] < 0x80:
+ b = b[5:]
+ case len(b) > 5 && b[5] < 0x80:
+ b = b[6:]
+ case len(b) > 6 && b[6] < 0x80:
+ b = b[7:]
+ case len(b) > 7 && b[7] < 0x80:
+ b = b[8:]
+ case len(b) > 8 && b[8] < 0x80:
+ b = b[9:]
+ case len(b) > 9 && b[9] < 2:
+ b = b[10:]
+ default:
+ return out, ValidationInvalid
+ }
+ }
+ continue State
+ case protowire.BytesType:
+ var size uint64
+ if len(b) >= 1 && b[0] < 0x80 {
+ size = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ size = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ size, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ if size > uint64(len(b)) {
+ return out, ValidationInvalid
+ }
+ v := b[:size]
+ b = b[size:]
+ switch vi.typ {
+ case validationTypeMessage:
+ if vi.mi == nil {
+ return out, ValidationUnknown
+ }
+ vi.mi.init()
+ fallthrough
+ case validationTypeMap:
+ if vi.mi != nil {
+ vi.mi.init()
+ }
+ states = append(states, validationState{
+ typ: vi.typ,
+ keyType: vi.keyType,
+ valType: vi.valType,
+ mi: vi.mi,
+ tail: b,
+ })
+ b = v
+ continue State
+ case validationTypeRepeatedVarint:
+ // Packed field.
+ for len(v) > 0 {
+ _, n := protowire.ConsumeVarint(v)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ v = v[n:]
+ }
+ case validationTypeRepeatedFixed32:
+ // Packed field.
+ if len(v)%4 != 0 {
+ return out, ValidationInvalid
+ }
+ case validationTypeRepeatedFixed64:
+ // Packed field.
+ if len(v)%8 != 0 {
+ return out, ValidationInvalid
+ }
+ case validationTypeUTF8String:
+ if !utf8.Valid(v) {
+ return out, ValidationInvalid
+ }
+ }
+ case protowire.Fixed32Type:
+ if len(b) < 4 {
+ return out, ValidationInvalid
+ }
+ b = b[4:]
+ case protowire.Fixed64Type:
+ if len(b) < 8 {
+ return out, ValidationInvalid
+ }
+ b = b[8:]
+ case protowire.StartGroupType:
+ switch {
+ case vi.typ == validationTypeGroup:
+ if vi.mi == nil {
+ return out, ValidationUnknown
+ }
+ vi.mi.init()
+ states = append(states, validationState{
+ typ: validationTypeGroup,
+ mi: vi.mi,
+ endGroup: num,
+ })
+ continue State
+ case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem:
+ typeid, v, n, err := messageset.ConsumeFieldValue(b, false)
+ if err != nil {
+ return out, ValidationInvalid
+ }
+ xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid)
+ switch {
+ case err == preg.NotFound:
+ b = b[n:]
+ case err != nil:
+ return out, ValidationUnknown
+ default:
+ xvi := getExtensionFieldInfo(xt).validation
+ if xvi.mi != nil {
+ xvi.mi.init()
+ }
+ states = append(states, validationState{
+ typ: xvi.typ,
+ mi: xvi.mi,
+ tail: b[n:],
+ })
+ b = v
+ continue State
+ }
+ default:
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ b = b[n:]
+ }
+ default:
+ return out, ValidationInvalid
+ }
+ }
+ if st.endGroup != 0 {
+ return out, ValidationInvalid
+ }
+ if len(b) != 0 {
+ return out, ValidationInvalid
+ }
+ b = st.tail
+ PopState:
+ numRequiredFields := 0
+ switch st.typ {
+ case validationTypeMessage, validationTypeGroup:
+ numRequiredFields = int(st.mi.numRequiredFields)
+ case validationTypeMap:
+ // If this is a map field with a message value that contains
+ // required fields, require that the value be present.
+ if st.mi != nil && st.mi.numRequiredFields > 0 {
+ numRequiredFields = 1
+ }
+ }
+ // If there are more than 64 required fields, this check will
+ // always fail and we will report that the message is potentially
+ // uninitialized.
+ if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields {
+ initialized = false
+ }
+ states = states[:len(states)-1]
+ }
+ out.n = start - len(b)
+ if initialized {
+ out.initialized = true
+ }
+ return out, ValidationValid
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
new file mode 100644
index 00000000..009cbefd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// weakFields adds methods to the exported WeakFields type for internal use.
+//
+// The exported type is an alias to an unnamed type, so methods can't be
+// defined directly on it.
+type weakFields WeakFields
+
+func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) {
+ m, ok := w[int32(num)]
+ return m, ok
+}
+
+func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) {
+ if *w == nil {
+ *w = make(weakFields)
+ }
+ (*w)[int32(num)] = m
+}
+
+func (w *weakFields) clear(num pref.FieldNumber) {
+ delete(*w, int32(num))
+}
+
+func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool {
+ _, ok := w[int32(num)]
+ return ok
+}
+
+func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) {
+ delete(*w, int32(num))
+}
+
+func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage {
+ if m, ok := w[int32(num)]; ok {
+ return m
+ }
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ return mt.Zero().Interface()
+}
+
+func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) {
+ if m != nil {
+ mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
+ if mt == nil {
+ panic(fmt.Sprintf("message %v for weak field is not linked in", name))
+ }
+ if mt != m.ProtoReflect().Type() {
+ panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
+ }
+ }
+ if m == nil || !m.ProtoReflect().IsValid() {
+ delete(*w, int32(num))
+ return
+ }
+ if *w == nil {
+ *w = make(weakFields)
+ }
+ (*w)[int32(num)] = m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
new file mode 100644
index 00000000..a3de1cf3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mapsort provides sorted access to maps.
+package mapsort
+
+import (
+ "sort"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Range iterates over every map entry in sorted key order,
+// calling f for each key and value encountered.
+func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
+ var keys []protoreflect.MapKey
+ mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool {
+ keys = append(keys, key)
+ return true
+ })
+ sort.Slice(keys, func(i, j int) bool {
+ switch keyKind {
+ case protoreflect.BoolKind:
+ return !keys[i].Bool() && keys[j].Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind,
+ protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return keys[i].Int() < keys[j].Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind,
+ protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return keys[i].Uint() < keys[j].Uint()
+ case protoreflect.StringKind:
+ return keys[i].String() < keys[j].String()
+ default:
+ panic("invalid kind: " + keyKind.String())
+ }
+ })
+ for _, key := range keys {
+ if !f(key, mapv.Get(key)) {
+ break
+ }
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go
new file mode 100644
index 00000000..49dc4fcd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pragma provides types that can be embedded into a struct to
+// statically enforce or prevent certain language properties.
+package pragma
+
+import "sync"
+
+// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals.
+type NoUnkeyedLiterals struct{}
+
+// DoNotImplement can be embedded in an interface to prevent trivial
+// implementations of the interface.
+//
+// This is useful to prevent unauthorized implementations of an interface
+// so that it can be extended in the future for any protobuf language changes.
+type DoNotImplement interface{ ProtoInternal(DoNotImplement) }
+
+// DoNotCompare can be embedded in a struct to prevent comparability.
+type DoNotCompare [0]func()
+
+// DoNotCopy can be embedded in a struct to help prevent shallow copies.
+// This does not rely on a Go language feature, but rather a special case
+// within the vet checker.
+//
+// See https://golang.org/issues/8005.
+type DoNotCopy [0]sync.Mutex
diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go
new file mode 100644
index 00000000..d3d7f89a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/set/ints.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package set provides simple set data structures for uint64s.
+package set
+
+import "math/bits"
+
+// int64s represents a set of integers within the range of 0..63.
+type int64s uint64
+
+func (bs *int64s) Len() int {
+ return bits.OnesCount64(uint64(*bs))
+}
+func (bs *int64s) Has(n uint64) bool {
+ return uint64(*bs)&(uint64(1)< 0
+}
+func (bs *int64s) Set(n uint64) {
+ *(*uint64)(bs) |= uint64(1) << n
+}
+func (bs *int64s) Clear(n uint64) {
+ *(*uint64)(bs) &^= uint64(1) << n
+}
+
+// Ints represents a set of integers within the range of 0..math.MaxUint64.
+type Ints struct {
+ lo int64s
+ hi map[uint64]struct{}
+}
+
+func (bs *Ints) Len() int {
+ return bs.lo.Len() + len(bs.hi)
+}
+func (bs *Ints) Has(n uint64) bool {
+ if n < 64 {
+ return bs.lo.Has(n)
+ }
+ _, ok := bs.hi[n]
+ return ok
+}
+func (bs *Ints) Set(n uint64) {
+ if n < 64 {
+ bs.lo.Set(n)
+ return
+ }
+ if bs.hi == nil {
+ bs.hi = make(map[uint64]struct{})
+ }
+ bs.hi[n] = struct{}{}
+}
+func (bs *Ints) Clear(n uint64) {
+ if n < 64 {
+ bs.lo.Clear(n)
+ return
+ }
+ delete(bs.hi, n)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go
new file mode 100644
index 00000000..0b74e765
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package strs provides string manipulation functionality specific to protobuf.
+package strs
+
+import (
+ "go/token"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// EnforceUTF8 reports whether to enforce strict UTF-8 validation.
+func EnforceUTF8(fd protoreflect.FieldDescriptor) bool {
+ if flags.ProtoLegacy {
+ if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok {
+ return fd.EnforceUTF8()
+ }
+ }
+ return fd.Syntax() == protoreflect.Proto3
+}
+
+// GoCamelCase camel-cases a protobuf name for use as a Go identifier.
+//
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+func GoCamelCase(s string) string {
+ // Invariant: if the next letter is lower case, it must be converted
+ // to upper case.
+ // That is, we process a word at a time, where words are marked by _ or
+ // upper case letter. Digits are treated as words.
+ var b []byte
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ switch {
+ case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]):
+ // Skip over '.' in ".{{lowercase}}".
+ case c == '.':
+ b = append(b, '_') // convert '.' to '_'
+ case c == '_' && (i == 0 || s[i-1] == '.'):
+ // Convert initial '_' to ensure we start with a capital letter.
+ // Do the same for '_' after '.' to match historic behavior.
+ b = append(b, 'X') // convert '_' to 'X'
+ case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]):
+ // Skip over '_' in "_{{lowercase}}".
+ case isASCIIDigit(c):
+ b = append(b, c)
+ default:
+ // Assume we have a letter now - if not, it's a bogus identifier.
+ // The next word is a sequence of characters that must start upper case.
+ if isASCIILower(c) {
+ c -= 'a' - 'A' // convert lowercase to uppercase
+ }
+ b = append(b, c)
+
+ // Accept lower case sequence that follows.
+ for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ {
+ b = append(b, s[i+1])
+ }
+ }
+ }
+ return string(b)
+}
+
+// GoSanitized converts a string to a valid Go identifier.
+func GoSanitized(s string) string {
+ // Sanitize the input to the set of valid characters,
+ // which must be '_' or be in the Unicode L or N categories.
+ s = strings.Map(func(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ return '_'
+ }, s)
+
+ // Prepend '_' in the event of a Go keyword conflict or if
+ // the identifier is invalid (does not start in the Unicode L category).
+ r, _ := utf8.DecodeRuneInString(s)
+ if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) {
+ return "_" + s
+ }
+ return s
+}
+
+// JSONCamelCase converts a snake_case identifier to a camelCase identifier,
+// according to the protobuf JSON specification.
+func JSONCamelCase(s string) string {
+ var b []byte
+ var wasUnderscore bool
+ for i := 0; i < len(s); i++ { // proto identifiers are always ASCII
+ c := s[i]
+ if c != '_' {
+ if wasUnderscore && isASCIILower(c) {
+ c -= 'a' - 'A' // convert to uppercase
+ }
+ b = append(b, c)
+ }
+ wasUnderscore = c == '_'
+ }
+ return string(b)
+}
+
+// JSONSnakeCase converts a camelCase identifier to a snake_case identifier,
+// according to the protobuf JSON specification.
+func JSONSnakeCase(s string) string {
+ var b []byte
+ for i := 0; i < len(s); i++ { // proto identifiers are always ASCII
+ c := s[i]
+ if isASCIIUpper(c) {
+ b = append(b, '_')
+ c += 'a' - 'A' // convert to lowercase
+ }
+ b = append(b, c)
+ }
+ return string(b)
+}
+
+// MapEntryName derives the name of the map entry message given the field name.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057
+func MapEntryName(s string) string {
+ var b []byte
+ upperNext := true
+ for _, c := range s {
+ switch {
+ case c == '_':
+ upperNext = true
+ case upperNext:
+ b = append(b, byte(unicode.ToUpper(c)))
+ upperNext = false
+ default:
+ b = append(b, byte(c))
+ }
+ }
+ b = append(b, "Entry"...)
+ return string(b)
+}
+
+// EnumValueName derives the camel-cased enum value name.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313
+func EnumValueName(s string) string {
+ var b []byte
+ upperNext := true
+ for _, c := range s {
+ switch {
+ case c == '_':
+ upperNext = true
+ case upperNext:
+ b = append(b, byte(unicode.ToUpper(c)))
+ upperNext = false
+ default:
+ b = append(b, byte(unicode.ToLower(c)))
+ upperNext = false
+ }
+ }
+ return string(b)
+}
+
+// TrimEnumPrefix trims the enum name prefix from an enum value name,
+// where the prefix is all lowercase without underscores.
+// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375
+func TrimEnumPrefix(s, prefix string) string {
+ s0 := s // original input
+ for len(s) > 0 && len(prefix) > 0 {
+ if s[0] == '_' {
+ s = s[1:]
+ continue
+ }
+ if unicode.ToLower(rune(s[0])) != rune(prefix[0]) {
+ return s0 // no prefix match
+ }
+ s, prefix = s[1:], prefix[1:]
+ }
+ if len(prefix) > 0 {
+ return s0 // no prefix match
+ }
+ s = strings.TrimLeft(s, "_")
+ if len(s) == 0 {
+ return s0 // avoid returning empty string
+ }
+ return s
+}
+
+func isASCIILower(c byte) bool {
+ return 'a' <= c && c <= 'z'
+}
+func isASCIIUpper(c byte) bool {
+ return 'A' <= c && c <= 'Z'
+}
+func isASCIIDigit(c byte) bool {
+ return '0' <= c && c <= '9'
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
new file mode 100644
index 00000000..85e074c9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package strs
+
+import pref "google.golang.org/protobuf/reflect/protoreflect"
+
+func UnsafeString(b []byte) string {
+ return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+ return []byte(s)
+}
+
+type Builder struct{}
+
+func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+ return prefix.Append(name)
+}
+
+func (*Builder) MakeString(b []byte) string {
+ return string(b)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
new file mode 100644
index 00000000..2160c701
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -0,0 +1,94 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package strs
+
+import (
+ "unsafe"
+
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type (
+ stringHeader struct {
+ Data unsafe.Pointer
+ Len int
+ }
+ sliceHeader struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+ }
+)
+
+// UnsafeString returns an unsafe string reference of b.
+// The caller must treat the input slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user
+// unless the input slice is provably immutable.
+func UnsafeString(b []byte) (s string) {
+ src := (*sliceHeader)(unsafe.Pointer(&b))
+ dst := (*stringHeader)(unsafe.Pointer(&s))
+ dst.Data = src.Data
+ dst.Len = src.Len
+ return s
+}
+
+// UnsafeBytes returns an unsafe bytes slice reference of s.
+// The caller must treat returned slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user.
+func UnsafeBytes(s string) (b []byte) {
+ src := (*stringHeader)(unsafe.Pointer(&s))
+ dst := (*sliceHeader)(unsafe.Pointer(&b))
+ dst.Data = src.Data
+ dst.Len = src.Len
+ dst.Cap = src.Len
+ return b
+}
+
+// Builder builds a set of strings with shared lifetime.
+// This differs from strings.Builder, which is for building a single string.
+type Builder struct {
+ buf []byte
+}
+
+// AppendFullName is equivalent to protoreflect.FullName.Append,
+// but optimized for large batches where each name has a shared lifetime.
+func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+ n := len(prefix) + len(".") + len(name)
+ if len(prefix) == 0 {
+ n -= len(".")
+ }
+ sb.grow(n)
+ sb.buf = append(sb.buf, prefix...)
+ sb.buf = append(sb.buf, '.')
+ sb.buf = append(sb.buf, name...)
+ return pref.FullName(sb.last(n))
+}
+
+// MakeString is equivalent to string(b), but optimized for large batches
+// with a shared lifetime.
+func (sb *Builder) MakeString(b []byte) string {
+ sb.grow(len(b))
+ sb.buf = append(sb.buf, b...)
+ return sb.last(len(b))
+}
+
+func (sb *Builder) grow(n int) {
+ if cap(sb.buf)-len(sb.buf) >= n {
+ return
+ }
+
+ // Unlike strings.Builder, we do not need to copy over the contents
+ // of the old buffer since our builder provides no API for
+ // retrieving previously created strings.
+ sb.buf = make([]byte, 2*(cap(sb.buf)+n))
+}
+
+func (sb *Builder) last(n int) string {
+ return UnsafeString(sb.buf[len(sb.buf)-n:])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
new file mode 100644
index 00000000..4088e59c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -0,0 +1,79 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package version records versioning information about this module.
+package version
+
+import (
+ "fmt"
+ "strings"
+)
+
+// These constants determine the current version of this module.
+//
+//
+// For our release process, we enforce the following rules:
+// * Tagged releases use a tag that is identical to String.
+// * Tagged releases never reference a commit where the String
+// contains "devel".
+// * The set of all commits in this repository where String
+// does not contain "devel" must have a unique String.
+//
+//
+// Steps for tagging a new release:
+// 1. Create a new CL.
+//
+// 2. Update Minor, Patch, and/or PreRelease as necessary.
+// PreRelease must not contain the string "devel".
+//
+// 3. Since the last released minor version, have there been any changes to
+// generator that relies on new functionality in the runtime?
+// If yes, then increment RequiredGenerated.
+//
+// 4. Since the last released minor version, have there been any changes to
+// the runtime that removes support for old .pb.go source code?
+// If yes, then increment SupportMinimum.
+//
+// 5. Send out the CL for review and submit it.
+// Note that the next CL in step 8 must be submitted after this CL
+// without any other CLs in-between.
+//
+// 6. Tag a new version, where the tag is is the current String.
+//
+// 7. Write release notes for all notable changes
+// between this release and the last release.
+//
+// 8. Create a new CL.
+//
+// 9. Update PreRelease to include the string "devel".
+// For example: "" -> "devel" or "rc.1" -> "rc.1.devel"
+//
+// 10. Send out the CL for review and submit it.
+const (
+ Major = 1
+ Minor = 23
+ Patch = 0
+ PreRelease = ""
+)
+
+// String formats the version string for this module in semver format.
+//
+// Examples:
+// v1.20.1
+// v1.21.0-rc.1
+func String() string {
+ v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch)
+ if PreRelease != "" {
+ v += "-" + PreRelease
+
+ // TODO: Add metadata about the commit or build hash.
+ // See https://golang.org/issue/29814
+ // See https://golang.org/issue/33533
+ var metadata string
+ if strings.Contains(PreRelease, "devel") && metadata != "" {
+ v += "+" + metadata
+ }
+ }
+ return v
+}
diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go
new file mode 100644
index 00000000..3e9a6a2f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/checkinit.go
@@ -0,0 +1,71 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// CheckInitialized returns an error if any required fields in m are not set.
+func CheckInitialized(m Message) error {
+ // Treat a nil message interface as an "untyped" empty message,
+ // which we assume to have no required fields.
+ if m == nil {
+ return nil
+ }
+
+ return checkInitialized(m.ProtoReflect())
+}
+
+// CheckInitialized returns an error if any required fields in m are not set.
+func checkInitialized(m protoreflect.Message) error {
+ if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil {
+ _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{
+ Message: m,
+ })
+ return err
+ }
+ return checkInitializedSlow(m)
+}
+
+func checkInitializedSlow(m protoreflect.Message) error {
+ md := m.Descriptor()
+ fds := md.Fields()
+ for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ {
+ fd := fds.ByNumber(nums.Get(i))
+ if !m.Has(fd) {
+ return errors.RequiredNotSet(string(fd.FullName()))
+ }
+ }
+ var err error
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsList():
+ if fd.Message() == nil {
+ return true
+ }
+ for i, list := 0, v.List(); i < list.Len() && err == nil; i++ {
+ err = checkInitialized(list.Get(i).Message())
+ }
+ case fd.IsMap():
+ if fd.MapValue().Message() == nil {
+ return true
+ }
+ v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool {
+ err = checkInitialized(v.Message())
+ return err == nil
+ })
+ default:
+ if fd.Message() == nil {
+ return true
+ }
+ err = checkInitialized(v.Message())
+ }
+ return err == nil
+ })
+ return err
+}
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
new file mode 100644
index 00000000..12821476
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -0,0 +1,270 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// UnmarshalOptions configures the unmarshaler.
+//
+// Example usage:
+// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m)
+type UnmarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // Merge merges the input into the destination message.
+ // The default behavior is to always reset the message before unmarshaling,
+ // unless Merge is specified.
+ Merge bool
+
+ // AllowPartial accepts input for messages that will result in missing
+ // required fields. If AllowPartial is false (the default), Unmarshal will
+ // return an error if there are any missing required fields.
+ AllowPartial bool
+
+ // If DiscardUnknown is set, unknown fields are ignored.
+ DiscardUnknown bool
+
+ // Resolver is used for looking up types when unmarshaling extension fields.
+ // If nil, this defaults to using protoregistry.GlobalTypes.
+ Resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+// Unmarshal parses the wire-format message in b and places the result in m.
+func Unmarshal(b []byte, m Message) error {
+ _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect())
+ return err
+}
+
+// Unmarshal parses the wire-format message in b and places the result in m.
+func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
+ _, err := o.unmarshal(b, m.ProtoReflect())
+ return err
+}
+
+// UnmarshalState parses a wire-format message and places the result in m.
+//
+// This method permits fine-grained control over the unmarshaler.
+// Most users should use Unmarshal instead.
+func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
+ return o.unmarshal(in.Buf, in.Message)
+}
+
+func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) {
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+ if !o.Merge {
+ Reset(m.Interface()) // TODO
+ }
+ allowPartial := o.AllowPartial
+ o.Merge = true
+ o.AllowPartial = true
+ methods := protoMethods(m)
+ if methods != nil && methods.Unmarshal != nil &&
+ !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) {
+ in := protoiface.UnmarshalInput{
+ Message: m,
+ Buf: b,
+ Resolver: o.Resolver,
+ }
+ if o.DiscardUnknown {
+ in.Flags |= protoiface.UnmarshalDiscardUnknown
+ }
+ out, err = methods.Unmarshal(in)
+ } else {
+ err = o.unmarshalMessageSlow(b, m)
+ }
+ if err != nil {
+ return out, err
+ }
+ if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) {
+ return out, nil
+ }
+ return out, checkInitialized(m)
+}
+
+func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error {
+ _, err := o.unmarshal(b, m)
+ return err
+}
+
+func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error {
+ md := m.Descriptor()
+ if messageset.IsMessageSet(md) {
+ return unmarshalMessageSet(b, m, o)
+ }
+ fields := md.Fields()
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ num, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return protowire.ParseError(tagLen)
+ }
+ if num > protowire.MaxValidNumber {
+ return errors.New("invalid field number")
+ }
+
+ // Find the field descriptor for this field number.
+ fd := fields.ByNumber(num)
+ if fd == nil && md.ExtensionRanges().Has(num) {
+ extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num)
+ if err != nil && err != protoregistry.NotFound {
+ return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err)
+ }
+ if extType != nil {
+ fd = extType.TypeDescriptor()
+ }
+ }
+ var err error
+ if fd == nil {
+ err = errUnknown
+ } else if flags.ProtoLegacy {
+ if fd.IsWeak() && fd.Message().IsPlaceholder() {
+ err = errUnknown // weak referent is not linked in
+ }
+ }
+
+ // Parse the field value.
+ var valLen int
+ switch {
+ case err != nil:
+ case fd.IsList():
+ valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd)
+ case fd.IsMap():
+ valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd)
+ default:
+ valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd)
+ }
+ if err != nil {
+ if err != errUnknown {
+ return err
+ }
+ valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:])
+ if valLen < 0 {
+ return protowire.ParseError(valLen)
+ }
+ if !o.DiscardUnknown {
+ m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...))
+ }
+ }
+ b = b[tagLen+valLen:]
+ }
+ return nil
+}
+
+func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) {
+ v, n, err := o.unmarshalScalar(b, wtyp, fd)
+ if err != nil {
+ return 0, err
+ }
+ switch fd.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ m2 := m.Mutable(fd).Message()
+ if err := o.unmarshalMessage(v.Bytes(), m2); err != nil {
+ return n, err
+ }
+ default:
+ // Non-message scalars replace the previous value.
+ m.Set(fd, v)
+ }
+ return n, nil
+}
+
+func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) {
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ b, n = protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ var (
+ keyField = fd.MapKey()
+ valField = fd.MapValue()
+ key protoreflect.Value
+ val protoreflect.Value
+ haveKey bool
+ haveVal bool
+ )
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ val = mapv.NewValue()
+ }
+ // Map entries are represented as a two-element message with fields
+ // containing the key and value.
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ if num > protowire.MaxValidNumber {
+ return 0, errors.New("invalid field number")
+ }
+ b = b[n:]
+ err = errUnknown
+ switch num {
+ case 1:
+ key, n, err = o.unmarshalScalar(b, wtyp, keyField)
+ if err != nil {
+ break
+ }
+ haveKey = true
+ case 2:
+ var v protoreflect.Value
+ v, n, err = o.unmarshalScalar(b, wtyp, valField)
+ if err != nil {
+ break
+ }
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil {
+ return 0, err
+ }
+ default:
+ val = v
+ }
+ haveVal = true
+ }
+ if err == errUnknown {
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ } else if err != nil {
+ return 0, err
+ }
+ b = b[n:]
+ }
+ // Every map entry should have entries for key and value, but this is not strictly required.
+ if !haveKey {
+ key = keyField.Default()
+ }
+ if !haveVal {
+ switch valField.Kind() {
+ case protoreflect.GroupKind, protoreflect.MessageKind:
+ default:
+ val = valField.Default()
+ }
+ }
+ mapv.Set(key.MapKey(), val)
+ return n, nil
+}
+
+// errUnknown is used internally to indicate fields which should be added
+// to the unknown field set of a message. It is never returned from an exported
+// function.
+var errUnknown = errors.New("BUG: internal error (unknown)")
diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go
new file mode 100644
index 00000000..d6dc904d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go
@@ -0,0 +1,603 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// unmarshalScalar decodes a value of the given kind.
+//
+// Message values are decoded into a []byte which aliases the input data.
+func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil
+ case protoreflect.EnumKind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil
+ case protoreflect.Int32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt32(int32(v)), n, nil
+ case protoreflect.Sint32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil
+ case protoreflect.Uint32Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), n, nil
+ case protoreflect.Int64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt64(int64(v)), n, nil
+ case protoreflect.Sint64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil
+ case protoreflect.Uint64Kind:
+ if wtyp != protowire.VarintType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint64(v), n, nil
+ case protoreflect.Sfixed32Kind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt32(int32(v)), n, nil
+ case protoreflect.Fixed32Kind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), n, nil
+ case protoreflect.FloatKind:
+ if wtyp != protowire.Fixed32Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil
+ case protoreflect.Sfixed64Kind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfInt64(int64(v)), n, nil
+ case protoreflect.Fixed64Kind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfUint64(v), n, nil
+ case protoreflect.DoubleKind:
+ if wtyp != protowire.Fixed64Type {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil
+ case protoreflect.StringKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
+ return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ return protoreflect.ValueOfString(string(v)), n, nil
+ case protoreflect.BytesKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil
+ case protoreflect.MessageKind:
+ if wtyp != protowire.BytesType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBytes(v), n, nil
+ case protoreflect.GroupKind:
+ if wtyp != protowire.StartGroupType {
+ return val, 0, errUnknown
+ }
+ v, n := protowire.ConsumeGroup(fd.Number(), b)
+ if n < 0 {
+ return val, 0, protowire.ParseError(n)
+ }
+ return protoreflect.ValueOfBytes(v), n, nil
+ default:
+ return val, 0, errUnknown
+ }
+}
+
+func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v)))
+ return n, nil
+ case protoreflect.EnumKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)))
+ return n, nil
+ case protoreflect.Int32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ return n, nil
+ case protoreflect.Sint32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))))
+ return n, nil
+ case protoreflect.Uint32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ return n, nil
+ case protoreflect.Int64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ return n, nil
+ case protoreflect.Sint64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)))
+ return n, nil
+ case protoreflect.Uint64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeVarint(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint64(v))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.VarintType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ return n, nil
+ case protoreflect.Sfixed32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt32(int32(v)))
+ return n, nil
+ case protoreflect.Fixed32Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint32(uint32(v)))
+ return n, nil
+ case protoreflect.FloatKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed32(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed32Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))))
+ return n, nil
+ case protoreflect.Sfixed64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfInt64(int64(v)))
+ return n, nil
+ case protoreflect.Fixed64Kind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfUint64(v))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfUint64(v))
+ return n, nil
+ case protoreflect.DoubleKind:
+ if wtyp == protowire.BytesType {
+ buf, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ for len(buf) > 0 {
+ v, n := protowire.ConsumeFixed64(buf)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ buf = buf[n:]
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ }
+ return n, nil
+ }
+ if wtyp != protowire.Fixed64Type {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v)))
+ return n, nil
+ case protoreflect.StringKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ if strs.EnforceUTF8(fd) && !utf8.Valid(v) {
+ return 0, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ list.Append(protoreflect.ValueOfString(string(v)))
+ return n, nil
+ case protoreflect.BytesKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...)))
+ return n, nil
+ case protoreflect.MessageKind:
+ if wtyp != protowire.BytesType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ if err := o.unmarshalMessage(v, m.Message()); err != nil {
+ return 0, err
+ }
+ list.Append(m)
+ return n, nil
+ case protoreflect.GroupKind:
+ if wtyp != protowire.StartGroupType {
+ return 0, errUnknown
+ }
+ v, n := protowire.ConsumeGroup(fd.Number(), b)
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ m := list.NewElement()
+ if err := o.unmarshalMessage(v, m.Message()); err != nil {
+ return 0, err
+ }
+ list.Append(m)
+ return n, nil
+ default:
+ return 0, errUnknown
+ }
+}
+
+// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices.
+var emptyBuf [0]byte
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
new file mode 100644
index 00000000..c52d8c4a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functions operating on protocol buffer messages.
+//
+// For documentation on protocol buffers in general, see:
+//
+// https://developers.google.com/protocol-buffers
+//
+// For a tutorial on using protocol buffers with Go, see:
+//
+// https://developers.google.com/protocol-buffers/docs/gotutorial
+//
+// For a guide to generated Go protocol buffer code, see:
+//
+// https://developers.google.com/protocol-buffers/docs/reference/go-generated
+//
+//
+// Binary serialization
+//
+// This package contains functions to convert to and from the wire format,
+// an efficient binary serialization of protocol buffers.
+//
+// • Size reports the size of a message in the wire format.
+//
+// • Marshal converts a message to the wire format.
+// The MarshalOptions type provides more control over wire marshaling.
+//
+// • Unmarshal converts a message from the wire format.
+// The UnmarshalOptions type provides more control over wire unmarshaling.
+//
+//
+// Basic message operations
+//
+// • Clone makes a deep copy of a message.
+//
+// • Merge merges the content of a message into another.
+//
+// • Equal compares two messages. For more control over comparisons
+// and detailed reporting of differences, see package
+// "google.golang.org/protobuf/testing/protocmp".
+//
+// • Reset clears the content of a message.
+//
+// • CheckInitialized reports whether all required fields in a message are set.
+//
+//
+// Optional scalar constructors
+//
+// The API for some generated messages represents optional scalar fields
+// as pointers to a value. For example, an optional string field has the
+// Go type *string.
+//
+// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String
+// take a value and return a pointer to a new instance of it,
+// to simplify construction of optional field values.
+//
+// Generated enum types usually have an Enum method which performs the
+// same operation.
+//
+// Optional scalar fields are only supported in proto2.
+//
+//
+// Extension accessors
+//
+// • HasExtension, GetExtension, SetExtension, and ClearExtension
+// access extension field values in a protocol buffer message.
+//
+// Extension fields are only supported in proto2.
+//
+//
+// Related packages
+//
+// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to
+// and from JSON.
+//
+// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to
+// and from the text format.
+//
+// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a
+// reflection interface for protocol buffer data types.
+//
+// • Package "google.golang.org/protobuf/testing/protocmp" provides features
+// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp"
+// package.
+//
+// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic
+// message type, suitable for working with messages where the protocol buffer
+// type is only known at runtime.
+//
+// This module contains additional packages for more specialized use cases.
+// Consult the individual package documentation for details.
+package proto
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
new file mode 100644
index 00000000..456bfda4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -0,0 +1,343 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/fieldsort"
+ "google.golang.org/protobuf/internal/mapsort"
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// MarshalOptions configures the marshaler.
+//
+// Example usage:
+// b, err := MarshalOptions{Deterministic: true}.Marshal(m)
+type MarshalOptions struct {
+ pragma.NoUnkeyedLiterals
+
+ // AllowPartial allows messages that have missing required fields to marshal
+ // without returning an error. If AllowPartial is false (the default),
+ // Marshal will return an error if there are any missing required fields.
+ AllowPartial bool
+
+ // Deterministic controls whether the same message will always be
+ // serialized to the same bytes within the same binary.
+ //
+ // Setting this option guarantees that repeated serialization of
+ // the same message will return the same bytes, and that different
+ // processes of the same binary (which may be executing on different
+ // machines) will serialize equal messages to the same bytes.
+ // It has no effect on the resulting size of the encoded message compared
+ // to a non-deterministic marshal.
+ //
+ // Note that the deterministic serialization is NOT canonical across
+ // languages. It is not guaranteed to remain stable over time. It is
+ // unstable across different builds with schema changes due to unknown
+ // fields. Users who need canonical serialization (e.g., persistent
+ // storage in a canonical form, fingerprinting, etc.) must define
+ // their own canonicalization specification and implement their own
+ // serializer rather than relying on this API.
+ //
+ // If deterministic serialization is requested, map entries will be
+ // sorted by keys in lexographical order. This is an implementation
+ // detail and subject to change.
+ Deterministic bool
+
+ // UseCachedSize indicates that the result of a previous Size call
+ // may be reused.
+ //
+ // Setting this option asserts that:
+ //
+ // 1. Size has previously been called on this message with identical
+ // options (except for UseCachedSize itself).
+ //
+ // 2. The message and all its submessages have not changed in any
+ // way since the Size call.
+ //
+ // If either of these invariants is violated,
+ // the results are undefined and may include panics or corrupted output.
+ //
+ // Implementations MAY take this option into account to provide
+ // better performance, but there is no guarantee that they will do so.
+ // There is absolutely no guarantee that Size followed by Marshal with
+ // UseCachedSize set will perform equivalently to Marshal alone.
+ UseCachedSize bool
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
+ out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
+ return out.Buf, err
+}
+
+// Marshal returns the wire-format encoding of m.
+func (o MarshalOptions) Marshal(m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return nil, nil
+ }
+
+ out, err := o.marshal(nil, m.ProtoReflect())
+ if len(out.Buf) == 0 && err == nil {
+ out.Buf = emptyBytesForMessage(m)
+ }
+ return out.Buf, err
+}
+
+// emptyBytesForMessage returns a nil buffer if and only if m is invalid,
+// otherwise it returns a non-nil empty buffer.
+//
+// This is to assist the edge-case where user-code does the following:
+// m1.OptionalBytes, _ = proto.Marshal(m2)
+// where they expect the proto2 "optional_bytes" field to be populated
+// if any only if m2 is a valid message.
+func emptyBytesForMessage(m Message) []byte {
+ if m == nil || !m.ProtoReflect().IsValid() {
+ return nil
+ }
+ return emptyBuf[:]
+}
+
+// MarshalAppend appends the wire-format encoding of m to b,
+// returning the result.
+func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
+ // Treat nil message interface as an empty message; nothing to append.
+ if m == nil {
+ return b, nil
+ }
+
+ out, err := o.marshal(b, m.ProtoReflect())
+ return out.Buf, err
+}
+
+// MarshalState returns the wire-format encoding of a message.
+//
+// This method permits fine-grained control over the marshaler.
+// Most users should use Marshal instead.
+func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
+ return o.marshal(in.Buf, in.Message)
+}
+
+func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) {
+ allowPartial := o.AllowPartial
+ o.AllowPartial = true
+ if methods := protoMethods(m); methods != nil && methods.Marshal != nil &&
+ !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) {
+ in := protoiface.MarshalInput{
+ Message: m,
+ Buf: b,
+ }
+ if o.Deterministic {
+ in.Flags |= protoiface.MarshalDeterministic
+ }
+ if o.UseCachedSize {
+ in.Flags |= protoiface.MarshalUseCachedSize
+ }
+ if methods.Size != nil {
+ sout := methods.Size(protoiface.SizeInput{
+ Message: m,
+ Flags: in.Flags,
+ })
+ if cap(b) < len(b)+sout.Size {
+ in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size))
+ copy(in.Buf, b)
+ }
+ in.Flags |= protoiface.MarshalUseCachedSize
+ }
+ out, err = methods.Marshal(in)
+ } else {
+ out.Buf, err = o.marshalMessageSlow(b, m)
+ }
+ if err != nil {
+ return out, err
+ }
+ if allowPartial {
+ return out, nil
+ }
+ return out, checkInitialized(m)
+}
+
+func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) {
+ out, err := o.marshal(b, m)
+ return out.Buf, err
+}
+
+// growcap scales up the capacity of a slice.
+//
+// Given a slice with a current capacity of oldcap and a desired
+// capacity of wantcap, growcap returns a new capacity >= wantcap.
+//
+// The algorithm is mostly identical to the one used by append as of Go 1.14.
+func growcap(oldcap, wantcap int) (newcap int) {
+ if wantcap > oldcap*2 {
+ newcap = wantcap
+ } else if oldcap < 1024 {
+ // The Go 1.14 runtime takes this case when len(s) < 1024,
+ // not when cap(s) < 1024. The difference doesn't seem
+ // significant here.
+ newcap = oldcap * 2
+ } else {
+ newcap = oldcap
+ for 0 < newcap && newcap < wantcap {
+ newcap += newcap / 4
+ }
+ if newcap <= 0 {
+ newcap = wantcap
+ }
+ }
+ return newcap
+}
+
+func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) {
+ if messageset.IsMessageSet(m.Descriptor()) {
+ return marshalMessageSet(b, m, o)
+ }
+ // There are many choices for what order we visit fields in. The default one here
+ // is chosen for reasonable efficiency and simplicity given the protoreflect API.
+ // It is not deterministic, since Message.Range does not return fields in any
+ // defined order.
+ //
+ // When using deterministic serialization, we sort the known fields.
+ var err error
+ o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b, err = o.marshalField(b, fd, v)
+ return err == nil
+ })
+ if err != nil {
+ return b, err
+ }
+ b = append(b, m.GetUnknown()...)
+ return b, nil
+}
+
+// rangeFields visits fields in a defined order when deterministic serialization is enabled.
+func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ if !o.Deterministic {
+ m.Range(f)
+ return
+ }
+ var fds []protoreflect.FieldDescriptor
+ m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ fds = append(fds, fd)
+ return true
+ })
+ sort.Slice(fds, func(a, b int) bool {
+ return fieldsort.Less(fds[a], fds[b])
+ })
+ for _, fd := range fds {
+ if !f(fd, m.Get(fd)) {
+ break
+ }
+ }
+}
+
+func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
+ switch {
+ case fd.IsList():
+ return o.marshalList(b, fd, value.List())
+ case fd.IsMap():
+ return o.marshalMap(b, fd, value.Map())
+ default:
+ b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()])
+ return o.marshalSingular(b, fd, value)
+ }
+}
+
+func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) {
+ if fd.IsPacked() && list.Len() > 0 {
+ b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
+ b, pos := appendSpeculativeLength(b)
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ var err error
+ b, err = o.marshalSingular(b, fd, list.Get(i))
+ if err != nil {
+ return b, err
+ }
+ }
+ b = finishSpeculativeLength(b, pos)
+ return b, nil
+ }
+
+ kind := fd.Kind()
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ var err error
+ b = protowire.AppendTag(b, fd.Number(), wireTypes[kind])
+ b, err = o.marshalSingular(b, fd, list.Get(i))
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) {
+ keyf := fd.MapKey()
+ valf := fd.MapValue()
+ var err error
+ o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool {
+ b = protowire.AppendTag(b, fd.Number(), protowire.BytesType)
+ var pos int
+ b, pos = appendSpeculativeLength(b)
+
+ b, err = o.marshalField(b, keyf, key.Value())
+ if err != nil {
+ return false
+ }
+ b, err = o.marshalField(b, valf, value)
+ if err != nil {
+ return false
+ }
+ b = finishSpeculativeLength(b, pos)
+ return true
+ })
+ return b, err
+}
+
+func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) {
+ if !o.Deterministic {
+ mapv.Range(f)
+ return
+ }
+ mapsort.Range(mapv, kind, f)
+}
+
+// When encoding length-prefixed fields, we speculatively set aside some number of bytes
+// for the length, encode the data, and then encode the length (shifting the data if necessary
+// to make room).
+const speculativeLength = 1
+
+func appendSpeculativeLength(b []byte) ([]byte, int) {
+ pos := len(b)
+ b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...)
+ return b, pos
+}
+
+func finishSpeculativeLength(b []byte, pos int) []byte {
+ mlen := len(b) - pos - speculativeLength
+ msiz := protowire.SizeVarint(uint64(mlen))
+ if msiz != speculativeLength {
+ for i := 0; i < msiz-speculativeLength; i++ {
+ b = append(b, 0)
+ }
+ copy(b[pos+msiz:], b[pos+speculativeLength:])
+ b = b[:pos+msiz+mlen]
+ }
+ protowire.AppendVarint(b[:pos], uint64(mlen))
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go
new file mode 100644
index 00000000..185dacfb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go
@@ -0,0 +1,97 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "math"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+var wireTypes = map[protoreflect.Kind]protowire.Type{
+ protoreflect.BoolKind: protowire.VarintType,
+ protoreflect.EnumKind: protowire.VarintType,
+ protoreflect.Int32Kind: protowire.VarintType,
+ protoreflect.Sint32Kind: protowire.VarintType,
+ protoreflect.Uint32Kind: protowire.VarintType,
+ protoreflect.Int64Kind: protowire.VarintType,
+ protoreflect.Sint64Kind: protowire.VarintType,
+ protoreflect.Uint64Kind: protowire.VarintType,
+ protoreflect.Sfixed32Kind: protowire.Fixed32Type,
+ protoreflect.Fixed32Kind: protowire.Fixed32Type,
+ protoreflect.FloatKind: protowire.Fixed32Type,
+ protoreflect.Sfixed64Kind: protowire.Fixed64Type,
+ protoreflect.Fixed64Kind: protowire.Fixed64Type,
+ protoreflect.DoubleKind: protowire.Fixed64Type,
+ protoreflect.StringKind: protowire.BytesType,
+ protoreflect.BytesKind: protowire.BytesType,
+ protoreflect.MessageKind: protowire.BytesType,
+ protoreflect.GroupKind: protowire.StartGroupType,
+}
+
+func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool()))
+ case protoreflect.EnumKind:
+ b = protowire.AppendVarint(b, uint64(v.Enum()))
+ case protoreflect.Int32Kind:
+ b = protowire.AppendVarint(b, uint64(int32(v.Int())))
+ case protoreflect.Sint32Kind:
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int()))))
+ case protoreflect.Uint32Kind:
+ b = protowire.AppendVarint(b, uint64(uint32(v.Uint())))
+ case protoreflect.Int64Kind:
+ b = protowire.AppendVarint(b, uint64(v.Int()))
+ case protoreflect.Sint64Kind:
+ b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int()))
+ case protoreflect.Uint64Kind:
+ b = protowire.AppendVarint(b, v.Uint())
+ case protoreflect.Sfixed32Kind:
+ b = protowire.AppendFixed32(b, uint32(v.Int()))
+ case protoreflect.Fixed32Kind:
+ b = protowire.AppendFixed32(b, uint32(v.Uint()))
+ case protoreflect.FloatKind:
+ b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float())))
+ case protoreflect.Sfixed64Kind:
+ b = protowire.AppendFixed64(b, uint64(v.Int()))
+ case protoreflect.Fixed64Kind:
+ b = protowire.AppendFixed64(b, v.Uint())
+ case protoreflect.DoubleKind:
+ b = protowire.AppendFixed64(b, math.Float64bits(v.Float()))
+ case protoreflect.StringKind:
+ if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) {
+ return b, errors.InvalidUTF8(string(fd.FullName()))
+ }
+ b = protowire.AppendString(b, v.String())
+ case protoreflect.BytesKind:
+ b = protowire.AppendBytes(b, v.Bytes())
+ case protoreflect.MessageKind:
+ var pos int
+ var err error
+ b, pos = appendSpeculativeLength(b)
+ b, err = o.marshalMessage(b, v.Message())
+ if err != nil {
+ return b, err
+ }
+ b = finishSpeculativeLength(b, pos)
+ case protoreflect.GroupKind:
+ var err error
+ b, err = o.marshalMessage(b, v.Message())
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType))
+ default:
+ return b, errors.New("invalid kind %v", fd.Kind())
+ }
+ return b, nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
new file mode 100644
index 00000000..10902bd8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -0,0 +1,154 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "math"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ pref "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they belong to the same message descriptor,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values. If either of the top-level
+// messages are invalid, then Equal reports true only if both are invalid.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ if x == nil || y == nil {
+ return x == nil && y == nil
+ }
+ mx := x.ProtoReflect()
+ my := y.ProtoReflect()
+ if mx.IsValid() != my.IsValid() {
+ return false
+ }
+ return equalMessage(mx, my)
+}
+
+// equalMessage compares two messages.
+func equalMessage(mx, my pref.Message) bool {
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ nx := 0
+ equal := true
+ mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ nx++
+ vy := my.Get(fd)
+ equal = my.Has(fd) && equalField(fd, vx, vy)
+ return equal
+ })
+ if !equal {
+ return false
+ }
+ ny := 0
+ my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool {
+ ny++
+ return true
+ })
+ if nx != ny {
+ return false
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+// equalField compares two fields.
+func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool {
+ switch {
+ case fd.IsList():
+ return equalList(fd, x.List(), y.List())
+ case fd.IsMap():
+ return equalMap(fd, x.Map(), y.Map())
+ default:
+ return equalValue(fd, x, y)
+ }
+}
+
+// equalMap compares two maps.
+func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ equal := true
+ x.Range(func(k pref.MapKey, vx pref.Value) bool {
+ vy := y.Get(k)
+ equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
+ return equal
+ })
+ return equal
+}
+
+// equalList compares two lists.
+func equalList(fd pref.FieldDescriptor, x, y pref.List) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ for i := x.Len() - 1; i >= 0; i-- {
+ if !equalValue(fd, x.Get(i), y.Get(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalValue compares two singular values.
+func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool {
+ switch {
+ case fd.Message() != nil:
+ return equalMessage(x.Message(), y.Message())
+ case fd.Kind() == pref.BytesKind:
+ return bytes.Equal(x.Bytes(), y.Bytes())
+ case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind:
+ fx := x.Float()
+ fy := y.Float()
+ if math.IsNaN(fx) || math.IsNaN(fy) {
+ return math.IsNaN(fx) && math.IsNaN(fy)
+ }
+ return fx == fy
+ default:
+ return x.Interface() == y.Interface()
+ }
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+func equalUnknown(x, y pref.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[pref.FieldNumber]pref.RawFields)
+ my := make(map[pref.FieldNumber]pref.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ return reflect.DeepEqual(mx, my)
+}
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
new file mode 100644
index 00000000..5f293cda
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -0,0 +1,92 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// HasExtension reports whether an extension field is populated.
+// It returns false if m is invalid or if xt does not extend m.
+func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
+ // Treat nil message interface as an empty message; no populated fields.
+ if m == nil {
+ return false
+ }
+
+ // As a special-case, we reports invalid or mismatching descriptors
+ // as always not being populated (since they aren't).
+ if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() {
+ return false
+ }
+
+ return m.ProtoReflect().Has(xt.TypeDescriptor())
+}
+
+// ClearExtension clears an extension field such that subsequent
+// HasExtension calls return false.
+// It panics if m is invalid or if xt does not extend m.
+func ClearExtension(m Message, xt protoreflect.ExtensionType) {
+ m.ProtoReflect().Clear(xt.TypeDescriptor())
+}
+
+// GetExtension retrieves the value for an extension field.
+// If the field is unpopulated, it returns the default value for
+// scalars and an immutable, empty value for lists or messages.
+// It panics if xt does not extend m.
+func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
+ // Treat nil message interface as an empty message; return the default.
+ if m == nil {
+ return xt.InterfaceOf(xt.Zero())
+ }
+
+ return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor()))
+}
+
+// SetExtension stores the value of an extension field.
+// It panics if m is invalid, xt does not extend m, or if type of v
+// is invalid for the specified extension field.
+func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
+ xd := xt.TypeDescriptor()
+ pv := xt.ValueOf(v)
+
+ // Specially treat an invalid list, map, or message as clear.
+ isValid := true
+ switch {
+ case xd.IsList():
+ isValid = pv.List().IsValid()
+ case xd.IsMap():
+ isValid = pv.Map().IsValid()
+ case xd.Message() != nil:
+ isValid = pv.Message().IsValid()
+ }
+ if !isValid {
+ m.ProtoReflect().Clear(xd)
+ return
+ }
+
+ m.ProtoReflect().Set(xd, pv)
+}
+
+// RangeExtensions iterates over every populated extension field in m in an
+// undefined order, calling f for each extension type and value encountered.
+// It returns immediately if f returns false.
+// While iterating, mutating operations may only be performed
+// on the current extension field.
+func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
+ // Treat nil message interface as an empty message; nothing to range over.
+ if m == nil {
+ return
+ }
+
+ m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor).Type()
+ vi := xt.InterfaceOf(v)
+ return f(xt, vi)
+ }
+ return true
+ })
+}
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
new file mode 100644
index 00000000..d761ab33
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -0,0 +1,139 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Merge merges src into dst, which must be a message with the same descriptor.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+//
+// It is semantically equivalent to unmarshaling the encoded form of src
+// into dst with the UnmarshalOptions.Merge option specified.
+func Merge(dst, src Message) {
+ // TODO: Should nil src be treated as semantically equivalent to a
+ // untyped, read-only, empty message? What about a nil dst?
+
+ dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect()
+ if dstMsg.Descriptor() != srcMsg.Descriptor() {
+ if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want {
+ panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want))
+ }
+ panic("descriptor mismatch")
+ }
+ mergeOptions{}.mergeMessage(dstMsg, srcMsg)
+}
+
+// Clone returns a deep copy of m.
+// If the top-level message is invalid, it returns an invalid message as well.
+func Clone(m Message) Message {
+ // NOTE: Most usages of Clone assume the following properties:
+ // t := reflect.TypeOf(m)
+ // t == reflect.TypeOf(m.ProtoReflect().New().Interface())
+ // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface())
+ //
+ // Embedding protobuf messages breaks this since the parent type will have
+ // a forwarded ProtoReflect method, but the Interface method will return
+ // the underlying embedded message type.
+ if m == nil {
+ return nil
+ }
+ src := m.ProtoReflect()
+ if !src.IsValid() {
+ return src.Type().Zero().Interface()
+ }
+ dst := src.New()
+ mergeOptions{}.mergeMessage(dst, src)
+ return dst.Interface()
+}
+
+// mergeOptions provides a namespace for merge functions, and can be
+// exported in the future if we add user-visible merge options.
+type mergeOptions struct{}
+
+func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) {
+ methods := protoMethods(dst)
+ if methods != nil && methods.Merge != nil {
+ in := protoiface.MergeInput{
+ Destination: dst,
+ Source: src,
+ }
+ out := methods.Merge(in)
+ if out.Flags&protoiface.MergeComplete != 0 {
+ return
+ }
+ }
+
+ if !dst.IsValid() {
+ panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName()))
+ }
+
+ src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsList():
+ o.mergeList(dst.Mutable(fd).List(), v.List(), fd)
+ case fd.IsMap():
+ o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue())
+ case fd.Message() != nil:
+ o.mergeMessage(dst.Mutable(fd).Message(), v.Message())
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Set(fd, o.cloneBytes(v))
+ default:
+ dst.Set(fd, v)
+ }
+ return true
+ })
+
+ if len(src.GetUnknown()) > 0 {
+ dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...))
+ }
+}
+
+func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) {
+ // Merge semantics appends to the end of the existing list.
+ for i, n := 0, src.Len(); i < n; i++ {
+ switch v := src.Get(i); {
+ case fd.Message() != nil:
+ dstv := dst.NewElement()
+ o.mergeMessage(dstv.Message(), v.Message())
+ dst.Append(dstv)
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Append(o.cloneBytes(v))
+ default:
+ dst.Append(v)
+ }
+ }
+}
+
+func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) {
+ // Merge semantics replaces, rather than merges into existing entries.
+ src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ switch {
+ case fd.Message() != nil:
+ dstv := dst.NewValue()
+ o.mergeMessage(dstv.Message(), v.Message())
+ dst.Set(k, dstv)
+ case fd.Kind() == protoreflect.BytesKind:
+ dst.Set(k, o.cloneBytes(v))
+ default:
+ dst.Set(k, v)
+ }
+ return true
+ })
+}
+
+func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value {
+ return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...))
+}
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
new file mode 100644
index 00000000..b6b3de59
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -0,0 +1,88 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/flags"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+func sizeMessageSet(m protoreflect.Message) (size int) {
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ size += messageset.SizeField(fd.Number())
+ size += protowire.SizeTag(messageset.FieldMessage)
+ size += protowire.SizeBytes(sizeMessage(v.Message()))
+ return true
+ })
+ size += messageset.SizeUnknown(m.GetUnknown())
+ return size
+}
+
+func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]byte, error) {
+ if !flags.ProtoLegacy {
+ return b, errors.New("no support for message_set_wire_format")
+ }
+ var err error
+ o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b, err = marshalMessageSetField(b, fd, v, o)
+ return err == nil
+ })
+ if err != nil {
+ return b, err
+ }
+ return messageset.AppendUnknown(b, m.GetUnknown())
+}
+
+func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value, o MarshalOptions) ([]byte, error) {
+ b = messageset.AppendFieldStart(b, fd.Number())
+ b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
+ b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
+ b, err := o.marshalMessage(b, value.Message())
+ if err != nil {
+ return b, err
+ }
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+}
+
+func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) error {
+ if !flags.ProtoLegacy {
+ return errors.New("no support for message_set_wire_format")
+ }
+ return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error {
+ err := unmarshalMessageSetField(m, num, v, o)
+ if err == errUnknown {
+ unknown := m.GetUnknown()
+ unknown = protowire.AppendTag(unknown, num, protowire.BytesType)
+ unknown = protowire.AppendBytes(unknown, v)
+ m.SetUnknown(unknown)
+ return nil
+ }
+ return err
+ })
+}
+
+func unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte, o UnmarshalOptions) error {
+ md := m.Descriptor()
+ if !md.ExtensionRanges().Has(num) {
+ return errUnknown
+ }
+ xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num)
+ if err == protoregistry.NotFound {
+ return errUnknown
+ }
+ if err != nil {
+ return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err)
+ }
+ xd := xt.TypeDescriptor()
+ if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
new file mode 100644
index 00000000..ca14b09c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Message is the top-level interface that all messages must implement.
+// It provides access to a reflective view of a message.
+// Any implementation of this interface may be used with all functions in the
+// protobuf module that accept a Message, except where otherwise specified.
+//
+// This is the v2 interface definition for protobuf messages.
+// The v1 interface definition is "github.com/golang/protobuf/proto".Message.
+//
+// To convert a v1 message to a v2 message,
+// use "github.com/golang/protobuf/proto".MessageV2.
+// To convert a v2 message to a v1 message,
+// use "github.com/golang/protobuf/proto".MessageV1.
+type Message = protoreflect.ProtoMessage
+
+// Error matches all errors produced by packages in the protobuf module.
+//
+// That is, errors.Is(err, Error) reports whether an error is produced
+// by this module.
+var Error error
+
+func init() {
+ Error = errors.Error
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go
new file mode 100644
index 00000000..d8dd604f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The protoreflect build tag disables use of fast-path methods.
+// +build !protoreflect
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+const hasProtoMethods = true
+
+func protoMethods(m protoreflect.Message) *protoiface.Methods {
+ return m.ProtoMethods()
+}
diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go
new file mode 100644
index 00000000..b103d432
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The protoreflect build tag disables use of fast-path methods.
+// +build protoreflect
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+const hasProtoMethods = false
+
+func protoMethods(m protoreflect.Message) *protoiface.Methods {
+ return nil
+}
diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go
new file mode 100644
index 00000000..3d7f8943
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/reset.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Reset clears every field in the message.
+// The resulting message shares no observable memory with its previous state
+// other than the memory for the message itself.
+func Reset(m Message) {
+ if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods {
+ mr.Reset()
+ return
+ }
+ resetMessage(m.ProtoReflect())
+}
+
+func resetMessage(m protoreflect.Message) {
+ if !m.IsValid() {
+ panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName()))
+ }
+
+ // Clear all known fields.
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ m.Clear(fds.Get(i))
+ }
+
+ // Clear extension fields.
+ m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ m.Clear(fd)
+ return true
+ })
+
+ // Clear unknown fields.
+ m.SetUnknown(nil)
+}
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
new file mode 100644
index 00000000..11ba8414
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ return MarshalOptions{}.Size(m)
+}
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func (o MarshalOptions) Size(m Message) int {
+ // Treat a nil message interface as an empty message; nothing to output.
+ if m == nil {
+ return 0
+ }
+
+ return sizeMessage(m.ProtoReflect())
+}
+
+func sizeMessage(m protoreflect.Message) (size int) {
+ methods := protoMethods(m)
+ if methods != nil && methods.Size != nil {
+ out := methods.Size(protoiface.SizeInput{
+ Message: m,
+ })
+ return out.Size
+ }
+ if methods != nil && methods.Marshal != nil {
+ // This is not efficient, but we don't have any choice.
+ // This case is mainly used for legacy types with a Marshal method.
+ out, _ := methods.Marshal(protoiface.MarshalInput{
+ Message: m,
+ })
+ return len(out.Buf)
+ }
+ return sizeMessageSlow(m)
+}
+
+func sizeMessageSlow(m protoreflect.Message) (size int) {
+ if messageset.IsMessageSet(m.Descriptor()) {
+ return sizeMessageSet(m)
+ }
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ size += sizeField(fd, v)
+ return true
+ })
+ size += len(m.GetUnknown())
+ return size
+}
+
+func sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) {
+ num := fd.Number()
+ switch {
+ case fd.IsList():
+ return sizeList(num, fd, value.List())
+ case fd.IsMap():
+ return sizeMap(num, fd, value.Map())
+ default:
+ return protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), value)
+ }
+}
+
+func sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) {
+ if fd.IsPacked() && list.Len() > 0 {
+ content := 0
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ content += sizeSingular(num, fd.Kind(), list.Get(i))
+ }
+ return protowire.SizeTag(num) + protowire.SizeBytes(content)
+ }
+
+ for i, llen := 0, list.Len(); i < llen; i++ {
+ size += protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), list.Get(i))
+ }
+ return size
+}
+
+func sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) {
+ mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool {
+ size += protowire.SizeTag(num)
+ size += protowire.SizeBytes(sizeField(fd.MapKey(), key.Value()) + sizeField(fd.MapValue(), value))
+ return true
+ })
+ return size
+}
diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go
new file mode 100644
index 00000000..1118460f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/size_gen.go
@@ -0,0 +1,55 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int {
+ switch kind {
+ case protoreflect.BoolKind:
+ return protowire.SizeVarint(protowire.EncodeBool(v.Bool()))
+ case protoreflect.EnumKind:
+ return protowire.SizeVarint(uint64(v.Enum()))
+ case protoreflect.Int32Kind:
+ return protowire.SizeVarint(uint64(int32(v.Int())))
+ case protoreflect.Sint32Kind:
+ return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int()))))
+ case protoreflect.Uint32Kind:
+ return protowire.SizeVarint(uint64(uint32(v.Uint())))
+ case protoreflect.Int64Kind:
+ return protowire.SizeVarint(uint64(v.Int()))
+ case protoreflect.Sint64Kind:
+ return protowire.SizeVarint(protowire.EncodeZigZag(v.Int()))
+ case protoreflect.Uint64Kind:
+ return protowire.SizeVarint(v.Uint())
+ case protoreflect.Sfixed32Kind:
+ return protowire.SizeFixed32()
+ case protoreflect.Fixed32Kind:
+ return protowire.SizeFixed32()
+ case protoreflect.FloatKind:
+ return protowire.SizeFixed32()
+ case protoreflect.Sfixed64Kind:
+ return protowire.SizeFixed64()
+ case protoreflect.Fixed64Kind:
+ return protowire.SizeFixed64()
+ case protoreflect.DoubleKind:
+ return protowire.SizeFixed64()
+ case protoreflect.StringKind:
+ return protowire.SizeBytes(len(v.String()))
+ case protoreflect.BytesKind:
+ return protowire.SizeBytes(len(v.Bytes()))
+ case protoreflect.MessageKind:
+ return protowire.SizeBytes(sizeMessage(v.Message()))
+ case protoreflect.GroupKind:
+ return protowire.SizeGroup(num, sizeMessage(v.Message()))
+ default:
+ return 0
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go
new file mode 100644
index 00000000..653b12c3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrappers.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
new file mode 100644
index 00000000..6be5d16e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -0,0 +1,77 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+// The following types are used by the fast-path Message.ProtoMethods method.
+//
+// To avoid polluting the public protoreflect API with types used only by
+// low-level implementations, the canonical definitions of these types are
+// in the runtime/protoiface package. The definitions here and in protoiface
+// must be kept in sync.
+type (
+ methods = struct {
+ pragma.NoUnkeyedLiterals
+ Flags supportFlags
+ Size func(sizeInput) sizeOutput
+ Marshal func(marshalInput) (marshalOutput, error)
+ Unmarshal func(unmarshalInput) (unmarshalOutput, error)
+ Merge func(mergeInput) mergeOutput
+ CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ }
+ supportFlags = uint64
+ sizeInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Flags uint8
+ }
+ sizeOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Size int
+ }
+ marshalInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Buf []byte
+ Flags uint8
+ }
+ marshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Buf []byte
+ }
+ unmarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ Buf []byte
+ Flags uint8
+ Resolver interface {
+ FindExtensionByName(field FullName) (ExtensionType, error)
+ FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error)
+ }
+ }
+ unmarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Flags uint8
+ }
+ mergeInput = struct {
+ pragma.NoUnkeyedLiterals
+ Source Message
+ Destination Message
+ }
+ mergeOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Flags uint8
+ }
+ checkInitializedInput = struct {
+ pragma.NoUnkeyedLiterals
+ Message Message
+ }
+ checkInitializedOutput = struct {
+ pragma.NoUnkeyedLiterals
+ }
+)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
new file mode 100644
index 00000000..b669a4e7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -0,0 +1,478 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoreflect provides interfaces to dynamically manipulate messages.
+//
+// This package includes type descriptors which describe the structure of types
+// defined in proto source files and value interfaces which provide the
+// ability to examine and manipulate the contents of messages.
+//
+//
+// Protocol Buffer Descriptors
+//
+// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
+// are immutable objects that represent protobuf type information.
+// They are wrappers around the messages declared in descriptor.proto.
+// Protobuf descriptors alone lack any information regarding Go types.
+//
+// Enums and messages generated by this module implement Enum and ProtoMessage,
+// where the Descriptor and ProtoReflect.Descriptor accessors respectively
+// return the protobuf descriptor for the values.
+//
+// The protobuf descriptor interfaces are not meant to be implemented by
+// user code since they might need to be extended in the future to support
+// additions to the protobuf language.
+// The "google.golang.org/protobuf/reflect/protodesc" package converts between
+// google.protobuf.DescriptorProto messages and protobuf descriptors.
+//
+//
+// Go Type Descriptors
+//
+// A type descriptor (e.g., EnumType or MessageType) is a constructor for
+// a concrete Go type that represents the associated protobuf descriptor.
+// There is commonly a one-to-one relationship between protobuf descriptors and
+// Go type descriptors, but it can potentially be a one-to-many relationship.
+//
+// Enums and messages generated by this module implement Enum and ProtoMessage,
+// where the Type and ProtoReflect.Type accessors respectively
+// return the protobuf descriptor for the values.
+//
+// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
+// create Go type descriptors from protobuf descriptors.
+//
+//
+// Value Interfaces
+//
+// The Enum and Message interfaces provide a reflective view over an
+// enum or message instance. For enums, it provides the ability to retrieve
+// the enum value number for any concrete enum type. For messages, it provides
+// the ability to access or manipulate fields of the message.
+//
+// To convert a proto.Message to a protoreflect.Message, use the
+// former's ProtoReflect method. Since the ProtoReflect method is new to the
+// v2 message interface, it may not be present on older message implementations.
+// The "github.com/golang/protobuf/proto".MessageReflect function can be used
+// to obtain a reflective view on older messages.
+//
+//
+// Relationships
+//
+// The following diagrams demonstrate the relationships between
+// various types declared in this package.
+//
+//
+// ┌───────────────────────────────────┐
+// V │
+// ┌────────────── New(n) ─────────────┐ │
+// │ │ │
+// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │
+// │ │ V V │ V │
+// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗
+// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║
+// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝
+// Λ Λ │ │
+// │ └─── Descriptor() ──┘ │
+// │ │
+// └────────────────── Type() ───────┘
+//
+// • An EnumType describes a concrete Go enum type.
+// It has an EnumDescriptor and can construct an Enum instance.
+//
+// • An EnumDescriptor describes an abstract protobuf enum type.
+//
+// • An Enum is a concrete enum instance. Generated enums implement Enum.
+//
+//
+// ┌──────────────── New() ─────────────────┐
+// │ │
+// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐
+// │ │ V V │ V
+// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗
+// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║
+// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝
+// Λ Λ │ │ Λ │
+// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘
+// │ │
+// └─────────────────── Type() ─────────┘
+//
+// • A MessageType describes a concrete Go message type.
+// It has a MessageDescriptor and can construct a Message instance.
+//
+// • A MessageDescriptor describes an abstract protobuf message type.
+//
+// • A Message is a concrete message instance. Generated messages implement
+// ProtoMessage, which can convert to/from a Message.
+//
+//
+// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐
+// │ V │ V
+// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗
+// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║
+// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝
+// Λ │ │ Λ │ Λ
+// └─────── Type() ───────┘ │ └─── may implement ────┘ │
+// │ │
+// └────── implements ────────┘
+//
+// • An ExtensionType describes a concrete Go implementation of an extension.
+// It has an ExtensionTypeDescriptor and can convert to/from
+// abstract Values and Go values.
+//
+// • An ExtensionTypeDescriptor is an ExtensionDescriptor
+// which also has an ExtensionType.
+//
+// • An ExtensionDescriptor describes an abstract protobuf extension field and
+// may not always be an ExtensionTypeDescriptor.
+package protoreflect
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type doNotImplement pragma.DoNotImplement
+
+// ProtoMessage is the top-level interface that all proto messages implement.
+// This is declared in the protoreflect package to avoid a cyclic dependency;
+// use the proto.Message type instead, which aliases this type.
+type ProtoMessage interface{ ProtoReflect() Message }
+
+// Syntax is the language version of the proto file.
+type Syntax syntax
+
+type syntax int8 // keep exact type opaque as the int type may change
+
+const (
+ Proto2 Syntax = 2
+ Proto3 Syntax = 3
+)
+
+// IsValid reports whether the syntax is valid.
+func (s Syntax) IsValid() bool {
+ switch s {
+ case Proto2, Proto3:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns s as a proto source identifier (e.g., "proto2").
+func (s Syntax) String() string {
+ switch s {
+ case Proto2:
+ return "proto2"
+ case Proto3:
+ return "proto3"
+ default:
+ return fmt.Sprintf("", s)
+ }
+}
+
+// GoString returns s as a Go source identifier (e.g., "Proto2").
+func (s Syntax) GoString() string {
+ switch s {
+ case Proto2:
+ return "Proto2"
+ case Proto3:
+ return "Proto3"
+ default:
+ return fmt.Sprintf("Syntax(%d)", s)
+ }
+}
+
+// Cardinality determines whether a field is optional, required, or repeated.
+type Cardinality cardinality
+
+type cardinality int8 // keep exact type opaque as the int type may change
+
+// Constants as defined by the google.protobuf.Cardinality enumeration.
+const (
+ Optional Cardinality = 1 // appears zero or one times
+ Required Cardinality = 2 // appears exactly one time; invalid with Proto3
+ Repeated Cardinality = 3 // appears zero or more times
+)
+
+// IsValid reports whether the cardinality is valid.
+func (c Cardinality) IsValid() bool {
+ switch c {
+ case Optional, Required, Repeated:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns c as a proto source identifier (e.g., "optional").
+func (c Cardinality) String() string {
+ switch c {
+ case Optional:
+ return "optional"
+ case Required:
+ return "required"
+ case Repeated:
+ return "repeated"
+ default:
+ return fmt.Sprintf("", c)
+ }
+}
+
+// GoString returns c as a Go source identifier (e.g., "Optional").
+func (c Cardinality) GoString() string {
+ switch c {
+ case Optional:
+ return "Optional"
+ case Required:
+ return "Required"
+ case Repeated:
+ return "Repeated"
+ default:
+ return fmt.Sprintf("Cardinality(%d)", c)
+ }
+}
+
+// Kind indicates the basic proto kind of a field.
+type Kind kind
+
+type kind int8 // keep exact type opaque as the int type may change
+
+// Constants as defined by the google.protobuf.Field.Kind enumeration.
+const (
+ BoolKind Kind = 8
+ EnumKind Kind = 14
+ Int32Kind Kind = 5
+ Sint32Kind Kind = 17
+ Uint32Kind Kind = 13
+ Int64Kind Kind = 3
+ Sint64Kind Kind = 18
+ Uint64Kind Kind = 4
+ Sfixed32Kind Kind = 15
+ Fixed32Kind Kind = 7
+ FloatKind Kind = 2
+ Sfixed64Kind Kind = 16
+ Fixed64Kind Kind = 6
+ DoubleKind Kind = 1
+ StringKind Kind = 9
+ BytesKind Kind = 12
+ MessageKind Kind = 11
+ GroupKind Kind = 10
+)
+
+// IsValid reports whether the kind is valid.
+func (k Kind) IsValid() bool {
+ switch k {
+ case BoolKind, EnumKind,
+ Int32Kind, Sint32Kind, Uint32Kind,
+ Int64Kind, Sint64Kind, Uint64Kind,
+ Sfixed32Kind, Fixed32Kind, FloatKind,
+ Sfixed64Kind, Fixed64Kind, DoubleKind,
+ StringKind, BytesKind, MessageKind, GroupKind:
+ return true
+ default:
+ return false
+ }
+}
+
+// String returns k as a proto source identifier (e.g., "bool").
+func (k Kind) String() string {
+ switch k {
+ case BoolKind:
+ return "bool"
+ case EnumKind:
+ return "enum"
+ case Int32Kind:
+ return "int32"
+ case Sint32Kind:
+ return "sint32"
+ case Uint32Kind:
+ return "uint32"
+ case Int64Kind:
+ return "int64"
+ case Sint64Kind:
+ return "sint64"
+ case Uint64Kind:
+ return "uint64"
+ case Sfixed32Kind:
+ return "sfixed32"
+ case Fixed32Kind:
+ return "fixed32"
+ case FloatKind:
+ return "float"
+ case Sfixed64Kind:
+ return "sfixed64"
+ case Fixed64Kind:
+ return "fixed64"
+ case DoubleKind:
+ return "double"
+ case StringKind:
+ return "string"
+ case BytesKind:
+ return "bytes"
+ case MessageKind:
+ return "message"
+ case GroupKind:
+ return "group"
+ default:
+ return fmt.Sprintf("", k)
+ }
+}
+
+// GoString returns k as a Go source identifier (e.g., "BoolKind").
+func (k Kind) GoString() string {
+ switch k {
+ case BoolKind:
+ return "BoolKind"
+ case EnumKind:
+ return "EnumKind"
+ case Int32Kind:
+ return "Int32Kind"
+ case Sint32Kind:
+ return "Sint32Kind"
+ case Uint32Kind:
+ return "Uint32Kind"
+ case Int64Kind:
+ return "Int64Kind"
+ case Sint64Kind:
+ return "Sint64Kind"
+ case Uint64Kind:
+ return "Uint64Kind"
+ case Sfixed32Kind:
+ return "Sfixed32Kind"
+ case Fixed32Kind:
+ return "Fixed32Kind"
+ case FloatKind:
+ return "FloatKind"
+ case Sfixed64Kind:
+ return "Sfixed64Kind"
+ case Fixed64Kind:
+ return "Fixed64Kind"
+ case DoubleKind:
+ return "DoubleKind"
+ case StringKind:
+ return "StringKind"
+ case BytesKind:
+ return "BytesKind"
+ case MessageKind:
+ return "MessageKind"
+ case GroupKind:
+ return "GroupKind"
+ default:
+ return fmt.Sprintf("Kind(%d)", k)
+ }
+}
+
+// FieldNumber is the field number in a message.
+type FieldNumber = protowire.Number
+
+// FieldNumbers represent a list of field numbers.
+type FieldNumbers interface {
+ // Len reports the number of fields in the list.
+ Len() int
+ // Get returns the ith field number. It panics if out of bounds.
+ Get(i int) FieldNumber
+ // Has reports whether n is within the list of fields.
+ Has(n FieldNumber) bool
+
+ doNotImplement
+}
+
+// FieldRanges represent a list of field number ranges.
+type FieldRanges interface {
+ // Len reports the number of ranges in the list.
+ Len() int
+ // Get returns the ith range. It panics if out of bounds.
+ Get(i int) [2]FieldNumber // start inclusive; end exclusive
+ // Has reports whether n is within any of the ranges.
+ Has(n FieldNumber) bool
+
+ doNotImplement
+}
+
+// EnumNumber is the numeric value for an enum.
+type EnumNumber int32
+
+// EnumRanges represent a list of enum number ranges.
+type EnumRanges interface {
+ // Len reports the number of ranges in the list.
+ Len() int
+ // Get returns the ith range. It panics if out of bounds.
+ Get(i int) [2]EnumNumber // start inclusive; end inclusive
+ // Has reports whether n is within any of the ranges.
+ Has(n EnumNumber) bool
+
+ doNotImplement
+}
+
+var (
+ regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`)
+ regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`)
+)
+
+// Name is the short name for a proto declaration. This is not the name
+// as used in Go source code, which might not be identical to the proto name.
+type Name string // e.g., "Kind"
+
+// IsValid reports whether n is a syntactically valid name.
+// An empty name is invalid.
+func (n Name) IsValid() bool {
+ return regexName.MatchString(string(n))
+}
+
+// Names represent a list of names.
+type Names interface {
+ // Len reports the number of names in the list.
+ Len() int
+ // Get returns the ith name. It panics if out of bounds.
+ Get(i int) Name
+ // Has reports whether s matches any names in the list.
+ Has(s Name) bool
+
+ doNotImplement
+}
+
+// FullName is a qualified name that uniquely identifies a proto declaration.
+// A qualified name is the concatenation of the proto package along with the
+// fully-declared name (i.e., name of parent preceding the name of the child),
+// with a '.' delimiter placed between each Name.
+//
+// This should not have any leading or trailing dots.
+type FullName string // e.g., "google.protobuf.Field.Kind"
+
+// IsValid reports whether n is a syntactically valid full name.
+// An empty full name is invalid.
+func (n FullName) IsValid() bool {
+ return regexFullName.MatchString(string(n))
+}
+
+// Name returns the short name, which is the last identifier segment.
+// A single segment FullName is the Name itself.
+func (n FullName) Name() Name {
+ if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
+ return Name(n[i+1:])
+ }
+ return Name(n)
+}
+
+// Parent returns the full name with the trailing identifier removed.
+// A single segment FullName has no parent.
+func (n FullName) Parent() FullName {
+ if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
+ return n[:i]
+ }
+ return ""
+}
+
+// Append returns the qualified name appended with the provided short name.
+//
+// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid
+func (n FullName) Append(s Name) FullName {
+ if n == "" {
+ return FullName(s)
+ }
+ return n + "." + FullName(s)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
new file mode 100644
index 00000000..32ea3d98
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+// SourceLocations is a list of source locations.
+type SourceLocations interface {
+ // Len reports the number of source locations in the proto file.
+ Len() int
+ // Get returns the ith SourceLocation. It panics if out of bounds.
+ Get(int) SourceLocation
+
+ doNotImplement
+
+ // TODO: Add ByPath and ByDescriptor helper methods.
+}
+
+// SourceLocation describes a source location and
+// corresponds with the google.protobuf.SourceCodeInfo.Location message.
+type SourceLocation struct {
+ // Path is the path to the declaration from the root file descriptor.
+ // The contents of this slice must not be mutated.
+ Path SourcePath
+
+ // StartLine and StartColumn are the zero-indexed starting location
+ // in the source file for the declaration.
+ StartLine, StartColumn int
+ // EndLine and EndColumn are the zero-indexed ending location
+ // in the source file for the declaration.
+ // In the descriptor.proto, the end line may be omitted if it is identical
+ // to the start line. Here, it is always populated.
+ EndLine, EndColumn int
+
+ // LeadingDetachedComments are the leading detached comments
+ // for the declaration. The contents of this slice must not be mutated.
+ LeadingDetachedComments []string
+ // LeadingComments is the leading attached comment for the declaration.
+ LeadingComments string
+ // TrailingComments is the trailing attached comment for the declaration.
+ TrailingComments string
+}
+
+// SourcePath identifies part of a file descriptor for a source location.
+// The SourcePath is a sequence of either field numbers or indexes into
+// a repeated field that form a path starting from the root file descriptor.
+//
+// See google.protobuf.SourceCodeInfo.Location.path.
+type SourcePath []int32
+
+// TODO: Add SourcePath.String method to pretty-print the path. For example:
+// ".message_type[6].nested_type[15].field[3]"
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
new file mode 100644
index 00000000..5be14a72
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -0,0 +1,631 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+// Descriptor provides a set of accessors that are common to every descriptor.
+// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto,
+// but provides efficient lookup and immutability.
+//
+// Each descriptor is comparable. Equality implies that the two types are
+// exactly identical. However, it is possible for the same semantically
+// identical proto type to be represented by multiple type descriptors.
+//
+// For example, suppose we have t1 and t2 which are both MessageDescriptors.
+// If t1 == t2, then the types are definitely equal and all accessors return
+// the same information. However, if t1 != t2, then it is still possible that
+// they still represent the same proto type (e.g., t1.FullName == t2.FullName).
+// This can occur if a descriptor type is created dynamically, or multiple
+// versions of the same proto type are accidentally linked into the Go binary.
+type Descriptor interface {
+ // ParentFile returns the parent file descriptor that this descriptor
+ // is declared within. The parent file for the file descriptor is itself.
+ //
+ // Support for this functionality is optional and may return nil.
+ ParentFile() FileDescriptor
+
+ // Parent returns the parent containing this descriptor declaration.
+ // The following shows the mapping from child type to possible parent types:
+ //
+ // ╔═════════════════════╤═══════════════════════════════════╗
+ // ║ Child type │ Possible parent types ║
+ // ╠═════════════════════╪═══════════════════════════════════╣
+ // ║ FileDescriptor │ nil ║
+ // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ OneofDescriptor │ MessageDescriptor ║
+ // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║
+ // ║ EnumValueDescriptor │ EnumDescriptor ║
+ // ║ ServiceDescriptor │ FileDescriptor ║
+ // ║ MethodDescriptor │ ServiceDescriptor ║
+ // ╚═════════════════════╧═══════════════════════════════════╝
+ //
+ // Support for this functionality is optional and may return nil.
+ Parent() Descriptor
+
+ // Index returns the index of this descriptor within its parent.
+ // It returns 0 if the descriptor does not have a parent or if the parent
+ // is unknown.
+ Index() int
+
+ // Syntax is the protobuf syntax.
+ Syntax() Syntax // e.g., Proto2 or Proto3
+
+ // Name is the short name of the declaration (i.e., FullName.Name).
+ Name() Name // e.g., "Any"
+
+ // FullName is the fully-qualified name of the declaration.
+ //
+ // The FullName is a concatenation of the full name of the type that this
+ // type is declared within and the declaration name. For example,
+ // field "foo_field" in message "proto.package.MyMessage" is
+ // uniquely identified as "proto.package.MyMessage.foo_field".
+ // Enum values are an exception to the rule (see EnumValueDescriptor).
+ FullName() FullName // e.g., "google.protobuf.Any"
+
+ // IsPlaceholder reports whether type information is missing since a
+ // dependency is not resolved, in which case only name information is known.
+ //
+ // Placeholder types may only be returned by the following accessors
+ // as a result of unresolved dependencies or weak imports:
+ //
+ // ╔═══════════════════════════════════╤═════════════════════╗
+ // ║ Accessor │ Descriptor ║
+ // ╠═══════════════════════════════════╪═════════════════════╣
+ // ║ FileImports.FileDescriptor │ FileDescriptor ║
+ // ║ FieldDescriptor.Enum │ EnumDescriptor ║
+ // ║ FieldDescriptor.Message │ MessageDescriptor ║
+ // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║
+ // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║
+ // ║ MethodDescriptor.Input │ MessageDescriptor ║
+ // ║ MethodDescriptor.Output │ MessageDescriptor ║
+ // ╚═══════════════════════════════════╧═════════════════════╝
+ //
+ // If true, only Name and FullName are valid.
+ // For FileDescriptor, the Path is also valid.
+ IsPlaceholder() bool
+
+ // Options returns the descriptor options. The caller must not modify
+ // the returned value.
+ //
+ // To avoid a dependency cycle, this function returns a proto.Message value.
+ // The proto message type returned for each descriptor type is as follows:
+ // ╔═════════════════════╤══════════════════════════════════════════╗
+ // ║ Go type │ Protobuf message type ║
+ // ╠═════════════════════╪══════════════════════════════════════════╣
+ // ║ FileDescriptor │ google.protobuf.FileOptions ║
+ // ║ EnumDescriptor │ google.protobuf.EnumOptions ║
+ // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║
+ // ║ MessageDescriptor │ google.protobuf.MessageOptions ║
+ // ║ FieldDescriptor │ google.protobuf.FieldOptions ║
+ // ║ OneofDescriptor │ google.protobuf.OneofOptions ║
+ // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║
+ // ║ MethodDescriptor │ google.protobuf.MethodOptions ║
+ // ╚═════════════════════╧══════════════════════════════════════════╝
+ //
+ // This method returns a typed nil-pointer if no options are present.
+ // The caller must import the descriptorpb package to use this.
+ Options() ProtoMessage
+
+ doNotImplement
+}
+
+// FileDescriptor describes the types in a complete proto file and
+// corresponds with the google.protobuf.FileDescriptorProto message.
+//
+// Top-level declarations:
+// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor.
+type FileDescriptor interface {
+ Descriptor // Descriptor.FullName is identical to Package
+
+ // Path returns the file name, relative to the source tree root.
+ Path() string // e.g., "path/to/file.proto"
+ // Package returns the protobuf package namespace.
+ Package() FullName // e.g., "google.protobuf"
+
+ // Imports is a list of imported proto files.
+ Imports() FileImports
+
+ // Enums is a list of the top-level enum declarations.
+ Enums() EnumDescriptors
+ // Messages is a list of the top-level message declarations.
+ Messages() MessageDescriptors
+ // Extensions is a list of the top-level extension declarations.
+ Extensions() ExtensionDescriptors
+ // Services is a list of the top-level service declarations.
+ Services() ServiceDescriptors
+
+ // SourceLocations is a list of source locations.
+ SourceLocations() SourceLocations
+
+ isFileDescriptor
+}
+type isFileDescriptor interface{ ProtoType(FileDescriptor) }
+
+// FileImports is a list of file imports.
+type FileImports interface {
+ // Len reports the number of files imported by this proto file.
+ Len() int
+ // Get returns the ith FileImport. It panics if out of bounds.
+ Get(i int) FileImport
+
+ doNotImplement
+}
+
+// FileImport is the declaration for a proto file import.
+type FileImport struct {
+ // FileDescriptor is the file type for the given import.
+ // It is a placeholder descriptor if IsWeak is set or if a dependency has
+ // not been regenerated to implement the new reflection APIs.
+ FileDescriptor
+
+ // IsPublic reports whether this is a public import, which causes this file
+ // to alias declarations within the imported file. The intended use cases
+ // for this feature is the ability to move proto files without breaking
+ // existing dependencies.
+ //
+ // The current file and the imported file must be within proto package.
+ IsPublic bool
+
+ // IsWeak reports whether this is a weak import, which does not impose
+ // a direct dependency on the target file.
+ //
+ // Weak imports are a legacy proto1 feature. Equivalent behavior is
+ // achieved using proto2 extension fields or proto3 Any messages.
+ IsWeak bool
+}
+
+// MessageDescriptor describes a message and
+// corresponds with the google.protobuf.DescriptorProto message.
+//
+// Nested declarations:
+// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor,
+// and/or MessageDescriptor.
+type MessageDescriptor interface {
+ Descriptor
+
+ // IsMapEntry indicates that this is an auto-generated message type to
+ // represent the entry type for a map field.
+ //
+ // Map entry messages have only two fields:
+ // • a "key" field with a field number of 1
+ // • a "value" field with a field number of 2
+ // The key and value types are determined by these two fields.
+ //
+ // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true
+ // for some field with this message type.
+ IsMapEntry() bool
+
+ // Fields is a list of nested field declarations.
+ Fields() FieldDescriptors
+ // Oneofs is a list of nested oneof declarations.
+ Oneofs() OneofDescriptors
+
+ // ReservedNames is a list of reserved field names.
+ ReservedNames() Names
+ // ReservedRanges is a list of reserved ranges of field numbers.
+ ReservedRanges() FieldRanges
+ // RequiredNumbers is a list of required field numbers.
+ // In Proto3, it is always an empty list.
+ RequiredNumbers() FieldNumbers
+ // ExtensionRanges is the field ranges used for extension fields.
+ // In Proto3, it is always an empty ranges.
+ ExtensionRanges() FieldRanges
+ // ExtensionRangeOptions returns the ith extension range options.
+ //
+ // To avoid a dependency cycle, this method returns a proto.Message value,
+ // which always contains a google.protobuf.ExtensionRangeOptions message.
+ // This method returns a typed nil-pointer if no options are present.
+ // The caller must import the descriptorpb package to use this.
+ ExtensionRangeOptions(i int) ProtoMessage
+
+ // Enums is a list of nested enum declarations.
+ Enums() EnumDescriptors
+ // Messages is a list of nested message declarations.
+ Messages() MessageDescriptors
+ // Extensions is a list of nested extension declarations.
+ Extensions() ExtensionDescriptors
+
+ isMessageDescriptor
+}
+type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
+
+// MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
+type MessageType interface {
+ // New returns a newly allocated empty message.
+ New() Message
+
+ // Zero returns an empty, read-only message.
+ Zero() Message
+
+ // Descriptor returns the message descriptor.
+ //
+ // Invariant: t.Descriptor() == t.New().Descriptor()
+ Descriptor() MessageDescriptor
+}
+
+// MessageDescriptors is a list of message declarations.
+type MessageDescriptors interface {
+ // Len reports the number of messages.
+ Len() int
+ // Get returns the ith MessageDescriptor. It panics if out of bounds.
+ Get(i int) MessageDescriptor
+ // ByName returns the MessageDescriptor for a message named s.
+ // It returns nil if not found.
+ ByName(s Name) MessageDescriptor
+
+ doNotImplement
+}
+
+// FieldDescriptor describes a field within a message and
+// corresponds with the google.protobuf.FieldDescriptorProto message.
+//
+// It is used for both normal fields defined within the parent message
+// (e.g., MessageDescriptor.Fields) and fields that extend some remote message
+// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions).
+type FieldDescriptor interface {
+ Descriptor
+
+ // Number reports the unique number for this field.
+ Number() FieldNumber
+ // Cardinality reports the cardinality for this field.
+ Cardinality() Cardinality
+ // Kind reports the basic kind for this field.
+ Kind() Kind
+
+ // HasJSONName reports whether this field has an explicitly set JSON name.
+ HasJSONName() bool
+
+ // JSONName reports the name used for JSON serialization.
+ // It is usually the camel-cased form of the field name.
+ JSONName() string
+
+ // HasPresence reports whether the field distinguishes between unpopulated
+ // and default values.
+ HasPresence() bool
+
+ // IsExtension reports whether this is an extension field. If false,
+ // then Parent and ContainingMessage refer to the same message.
+ // Otherwise, ContainingMessage and Parent likely differ.
+ IsExtension() bool
+
+ // HasOptionalKeyword reports whether the "optional" keyword was explicitly
+ // specified in the source .proto file.
+ HasOptionalKeyword() bool
+
+ // IsWeak reports whether this is a weak field, which does not impose a
+ // direct dependency on the target type.
+ // If true, then Message returns a placeholder type.
+ IsWeak() bool
+
+ // IsPacked reports whether repeated primitive numeric kinds should be
+ // serialized using a packed encoding.
+ // If true, then it implies Cardinality is Repeated.
+ IsPacked() bool
+
+ // IsList reports whether this field represents a list,
+ // where the value type for the associated field is a List.
+ // It is equivalent to checking whether Cardinality is Repeated and
+ // that IsMap reports false.
+ IsList() bool
+
+ // IsMap reports whether this field represents a map,
+ // where the value type for the associated field is a Map.
+ // It is equivalent to checking whether Cardinality is Repeated,
+ // that the Kind is MessageKind, and that Message.IsMapEntry reports true.
+ IsMap() bool
+
+ // MapKey returns the field descriptor for the key in the map entry.
+ // It returns nil if IsMap reports false.
+ MapKey() FieldDescriptor
+
+ // MapValue returns the field descriptor for the value in the map entry.
+ // It returns nil if IsMap reports false.
+ MapValue() FieldDescriptor
+
+ // HasDefault reports whether this field has a default value.
+ HasDefault() bool
+
+ // Default returns the default value for scalar fields.
+ // For proto2, it is the default value as specified in the proto file,
+ // or the zero value if unspecified.
+ // For proto3, it is always the zero value of the scalar.
+ // The Value type is determined by the Kind.
+ Default() Value
+
+ // DefaultEnumValue returns the enum value descriptor for the default value
+ // of an enum field, and is nil for any other kind of field.
+ DefaultEnumValue() EnumValueDescriptor
+
+ // ContainingOneof is the containing oneof that this field belongs to,
+ // and is nil if this field is not part of a oneof.
+ ContainingOneof() OneofDescriptor
+
+ // ContainingMessage is the containing message that this field belongs to.
+ // For extension fields, this may not necessarily be the parent message
+ // that the field is declared within.
+ ContainingMessage() MessageDescriptor
+
+ // Enum is the enum descriptor if Kind is EnumKind.
+ // It returns nil for any other Kind.
+ Enum() EnumDescriptor
+
+ // Message is the message descriptor if Kind is
+ // MessageKind or GroupKind. It returns nil for any other Kind.
+ Message() MessageDescriptor
+
+ isFieldDescriptor
+}
+type isFieldDescriptor interface{ ProtoType(FieldDescriptor) }
+
+// FieldDescriptors is a list of field declarations.
+type FieldDescriptors interface {
+ // Len reports the number of fields.
+ Len() int
+ // Get returns the ith FieldDescriptor. It panics if out of bounds.
+ Get(i int) FieldDescriptor
+ // ByName returns the FieldDescriptor for a field named s.
+ // It returns nil if not found.
+ ByName(s Name) FieldDescriptor
+ // ByJSONName returns the FieldDescriptor for a field with s as the JSON name.
+ // It returns nil if not found.
+ ByJSONName(s string) FieldDescriptor
+ // ByNumber returns the FieldDescriptor for a field numbered n.
+ // It returns nil if not found.
+ ByNumber(n FieldNumber) FieldDescriptor
+
+ doNotImplement
+}
+
+// OneofDescriptor describes a oneof field set within a given message and
+// corresponds with the google.protobuf.OneofDescriptorProto message.
+type OneofDescriptor interface {
+ Descriptor
+
+ // IsSynthetic reports whether this is a synthetic oneof created to support
+ // proto3 optional semantics. If true, Fields contains exactly one field
+ // with HasOptionalKeyword specified.
+ IsSynthetic() bool
+
+ // Fields is a list of fields belonging to this oneof.
+ Fields() FieldDescriptors
+
+ isOneofDescriptor
+}
+type isOneofDescriptor interface{ ProtoType(OneofDescriptor) }
+
+// OneofDescriptors is a list of oneof declarations.
+type OneofDescriptors interface {
+ // Len reports the number of oneof fields.
+ Len() int
+ // Get returns the ith OneofDescriptor. It panics if out of bounds.
+ Get(i int) OneofDescriptor
+ // ByName returns the OneofDescriptor for a oneof named s.
+ // It returns nil if not found.
+ ByName(s Name) OneofDescriptor
+
+ doNotImplement
+}
+
+// ExtensionDescriptor is an alias of FieldDescriptor for documentation.
+type ExtensionDescriptor = FieldDescriptor
+
+// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType.
+type ExtensionTypeDescriptor interface {
+ ExtensionDescriptor
+
+ // Type returns the associated ExtensionType.
+ Type() ExtensionType
+
+ // Descriptor returns the plain ExtensionDescriptor without the
+ // associated ExtensionType.
+ Descriptor() ExtensionDescriptor
+}
+
+// ExtensionDescriptors is a list of field declarations.
+type ExtensionDescriptors interface {
+ // Len reports the number of fields.
+ Len() int
+ // Get returns the ith ExtensionDescriptor. It panics if out of bounds.
+ Get(i int) ExtensionDescriptor
+ // ByName returns the ExtensionDescriptor for a field named s.
+ // It returns nil if not found.
+ ByName(s Name) ExtensionDescriptor
+
+ doNotImplement
+}
+
+// ExtensionType encapsulates an ExtensionDescriptor with a concrete
+// Go implementation. The nested field descriptor must be for a extension field.
+//
+// While a normal field is a member of the parent message that it is declared
+// within (see Descriptor.Parent), an extension field is a member of some other
+// target message (see ExtensionDescriptor.Extendee) and may have no
+// relationship with the parent. However, the full name of an extension field is
+// relative to the parent that it is declared within.
+//
+// For example:
+// syntax = "proto2";
+// package example;
+// message FooMessage {
+// extensions 100 to max;
+// }
+// message BarMessage {
+// extends FooMessage { optional BarMessage bar_field = 100; }
+// }
+//
+// Field "bar_field" is an extension of FooMessage, but its full name is
+// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field".
+type ExtensionType interface {
+ // New returns a new value for the field.
+ // For scalars, this returns the default value in native Go form.
+ New() Value
+
+ // Zero returns a new value for the field.
+ // For scalars, this returns the default value in native Go form.
+ // For composite types, this returns an empty, read-only message, list, or map.
+ Zero() Value
+
+ // TypeDescriptor returns the extension type descriptor.
+ TypeDescriptor() ExtensionTypeDescriptor
+
+ // ValueOf wraps the input and returns it as a Value.
+ // ValueOf panics if the input value is invalid or not the appropriate type.
+ //
+ // ValueOf is more extensive than protoreflect.ValueOf for a given field's
+ // value as it has more type information available.
+ ValueOf(interface{}) Value
+
+ // InterfaceOf completely unwraps the Value to the underlying Go type.
+ // InterfaceOf panics if the input is nil or does not represent the
+ // appropriate underlying Go type. For composite types, it panics if the
+ // value is not mutable.
+ //
+ // InterfaceOf is able to unwrap the Value further than Value.Interface
+ // as it has more type information available.
+ InterfaceOf(Value) interface{}
+
+ // IsValidValue reports whether the Value is valid to assign to the field.
+ IsValidValue(Value) bool
+
+ // IsValidInterface reports whether the input is valid to assign to the field.
+ IsValidInterface(interface{}) bool
+}
+
+// EnumDescriptor describes an enum and
+// corresponds with the google.protobuf.EnumDescriptorProto message.
+//
+// Nested declarations:
+// EnumValueDescriptor.
+type EnumDescriptor interface {
+ Descriptor
+
+ // Values is a list of nested enum value declarations.
+ Values() EnumValueDescriptors
+
+ // ReservedNames is a list of reserved enum names.
+ ReservedNames() Names
+ // ReservedRanges is a list of reserved ranges of enum numbers.
+ ReservedRanges() EnumRanges
+
+ isEnumDescriptor
+}
+type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
+
+// EnumType encapsulates an EnumDescriptor with a concrete Go implementation.
+type EnumType interface {
+ // New returns an instance of this enum type with its value set to n.
+ New(n EnumNumber) Enum
+
+ // Descriptor returns the enum descriptor.
+ //
+ // Invariant: t.Descriptor() == t.New(0).Descriptor()
+ Descriptor() EnumDescriptor
+}
+
+// EnumDescriptors is a list of enum declarations.
+type EnumDescriptors interface {
+ // Len reports the number of enum types.
+ Len() int
+ // Get returns the ith EnumDescriptor. It panics if out of bounds.
+ Get(i int) EnumDescriptor
+ // ByName returns the EnumDescriptor for an enum named s.
+ // It returns nil if not found.
+ ByName(s Name) EnumDescriptor
+
+ doNotImplement
+}
+
+// EnumValueDescriptor describes an enum value and
+// corresponds with the google.protobuf.EnumValueDescriptorProto message.
+//
+// All other proto declarations are in the namespace of the parent.
+// However, enum values do not follow this rule and are within the namespace
+// of the parent's parent (i.e., they are a sibling of the containing enum).
+// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified
+// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE".
+type EnumValueDescriptor interface {
+ Descriptor
+
+ // Number returns the enum value as an integer.
+ Number() EnumNumber
+
+ isEnumValueDescriptor
+}
+type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) }
+
+// EnumValueDescriptors is a list of enum value declarations.
+type EnumValueDescriptors interface {
+ // Len reports the number of enum values.
+ Len() int
+ // Get returns the ith EnumValueDescriptor. It panics if out of bounds.
+ Get(i int) EnumValueDescriptor
+ // ByName returns the EnumValueDescriptor for the enum value named s.
+ // It returns nil if not found.
+ ByName(s Name) EnumValueDescriptor
+ // ByNumber returns the EnumValueDescriptor for the enum value numbered n.
+ // If multiple have the same number, the first one defined is returned
+ // It returns nil if not found.
+ ByNumber(n EnumNumber) EnumValueDescriptor
+
+ doNotImplement
+}
+
+// ServiceDescriptor describes a service and
+// corresponds with the google.protobuf.ServiceDescriptorProto message.
+//
+// Nested declarations: MethodDescriptor.
+type ServiceDescriptor interface {
+ Descriptor
+
+ // Methods is a list of nested message declarations.
+ Methods() MethodDescriptors
+
+ isServiceDescriptor
+}
+type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) }
+
+// ServiceDescriptors is a list of service declarations.
+type ServiceDescriptors interface {
+ // Len reports the number of services.
+ Len() int
+ // Get returns the ith ServiceDescriptor. It panics if out of bounds.
+ Get(i int) ServiceDescriptor
+ // ByName returns the ServiceDescriptor for a service named s.
+ // It returns nil if not found.
+ ByName(s Name) ServiceDescriptor
+
+ doNotImplement
+}
+
+// MethodDescriptor describes a method and
+// corresponds with the google.protobuf.MethodDescriptorProto message.
+type MethodDescriptor interface {
+ Descriptor
+
+ // Input is the input message descriptor.
+ Input() MessageDescriptor
+ // Output is the output message descriptor.
+ Output() MessageDescriptor
+ // IsStreamingClient reports whether the client streams multiple messages.
+ IsStreamingClient() bool
+ // IsStreamingServer reports whether the server streams multiple messages.
+ IsStreamingServer() bool
+
+ isMethodDescriptor
+}
+type isMethodDescriptor interface{ ProtoType(MethodDescriptor) }
+
+// MethodDescriptors is a list of method declarations.
+type MethodDescriptors interface {
+ // Len reports the number of methods.
+ Len() int
+ // Get returns the ith MethodDescriptor. It panics if out of bounds.
+ Get(i int) MethodDescriptor
+ // ByName returns the MethodDescriptor for a service method named s.
+ // It returns nil if not found.
+ ByName(s Name) MethodDescriptor
+
+ doNotImplement
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
new file mode 100644
index 00000000..f3198107
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -0,0 +1,285 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import "google.golang.org/protobuf/encoding/protowire"
+
+// Enum is a reflection interface for a concrete enum value,
+// which provides type information and a getter for the enum number.
+// Enum does not provide a mutable API since enums are commonly backed by
+// Go constants, which are not addressable.
+type Enum interface {
+ // Descriptor returns enum descriptor, which contains only the protobuf
+ // type information for the enum.
+ Descriptor() EnumDescriptor
+
+ // Type returns the enum type, which encapsulates both Go and protobuf
+ // type information. If the Go type information is not needed,
+ // it is recommended that the enum descriptor be used instead.
+ Type() EnumType
+
+ // Number returns the enum value as an integer.
+ Number() EnumNumber
+}
+
+// Message is a reflective interface for a concrete message value,
+// encapsulating both type and value information for the message.
+//
+// Accessor/mutators for individual fields are keyed by FieldDescriptor.
+// For non-extension fields, the descriptor must exactly match the
+// field known by the parent message.
+// For extension fields, the descriptor must implement ExtensionTypeDescriptor,
+// extend the parent message (i.e., have the same message FullName), and
+// be within the parent's extension range.
+//
+// Each field Value can be a scalar or a composite type (Message, List, or Map).
+// See Value for the Go types associated with a FieldDescriptor.
+// Providing a Value that is invalid or of an incorrect type panics.
+type Message interface {
+ // Descriptor returns message descriptor, which contains only the protobuf
+ // type information for the message.
+ Descriptor() MessageDescriptor
+
+ // Type returns the message type, which encapsulates both Go and protobuf
+ // type information. If the Go type information is not needed,
+ // it is recommended that the message descriptor be used instead.
+ Type() MessageType
+
+ // New returns a newly allocated and mutable empty message.
+ New() Message
+
+ // Interface unwraps the message reflection interface and
+ // returns the underlying ProtoMessage interface.
+ Interface() ProtoMessage
+
+ // Range iterates over every populated field in an undefined order,
+ // calling f for each field descriptor and value encountered.
+ // Range returns immediately if f returns false.
+ // While iterating, mutating operations may only be performed
+ // on the current field descriptor.
+ Range(f func(FieldDescriptor, Value) bool)
+
+ // Has reports whether a field is populated.
+ //
+ // Some fields have the property of nullability where it is possible to
+ // distinguish between the default value of a field and whether the field
+ // was explicitly populated with the default value. Singular message fields,
+ // member fields of a oneof, and proto2 scalar fields are nullable. Such
+ // fields are populated only if explicitly set.
+ //
+ // In other cases (aside from the nullable cases above),
+ // a proto3 scalar field is populated if it contains a non-zero value, and
+ // a repeated field is populated if it is non-empty.
+ Has(FieldDescriptor) bool
+
+ // Clear clears the field such that a subsequent Has call reports false.
+ //
+ // Clearing an extension field clears both the extension type and value
+ // associated with the given field number.
+ //
+ // Clear is a mutating operation and unsafe for concurrent use.
+ Clear(FieldDescriptor)
+
+ // Get retrieves the value for a field.
+ //
+ // For unpopulated scalars, it returns the default value, where
+ // the default value of a bytes scalar is guaranteed to be a copy.
+ // For unpopulated composite types, it returns an empty, read-only view
+ // of the value; to obtain a mutable reference, use Mutable.
+ Get(FieldDescriptor) Value
+
+ // Set stores the value for a field.
+ //
+ // For a field belonging to a oneof, it implicitly clears any other field
+ // that may be currently set within the same oneof.
+ // For extension fields, it implicitly stores the provided ExtensionType.
+ // When setting a composite type, it is unspecified whether the stored value
+ // aliases the source's memory in any way. If the composite value is an
+ // empty, read-only value, then it panics.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(FieldDescriptor, Value)
+
+ // Mutable returns a mutable reference to a composite type.
+ //
+ // If the field is unpopulated, it may allocate a composite value.
+ // For a field belonging to a oneof, it implicitly clears any other field
+ // that may be currently set within the same oneof.
+ // For extension fields, it implicitly stores the provided ExtensionType
+ // if not already stored.
+ // It panics if the field does not contain a composite type.
+ //
+ // Mutable is a mutating operation and unsafe for concurrent use.
+ Mutable(FieldDescriptor) Value
+
+ // NewField returns a new value that is assignable to the field
+ // for the given descriptor. For scalars, this returns the default value.
+ // For lists, maps, and messages, this returns a new, empty, mutable value.
+ NewField(FieldDescriptor) Value
+
+ // WhichOneof reports which field within the oneof is populated,
+ // returning nil if none are populated.
+ // It panics if the oneof descriptor does not belong to this message.
+ WhichOneof(OneofDescriptor) FieldDescriptor
+
+ // GetUnknown retrieves the entire list of unknown fields.
+ // The caller may only mutate the contents of the RawFields
+ // if the mutated bytes are stored back into the message with SetUnknown.
+ GetUnknown() RawFields
+
+ // SetUnknown stores an entire list of unknown fields.
+ // The raw fields must be syntactically valid according to the wire format.
+ // An implementation may panic if this is not the case.
+ // Once stored, the caller must not mutate the content of the RawFields.
+ // An empty RawFields may be passed to clear the fields.
+ //
+ // SetUnknown is a mutating operation and unsafe for concurrent use.
+ SetUnknown(RawFields)
+
+ // IsValid reports whether the message is valid.
+ //
+ // An invalid message is an empty, read-only value.
+ //
+ // An invalid message often corresponds to a nil pointer of the concrete
+ // message type, but the details are implementation dependent.
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+
+ // ProtoMethods returns optional fast-path implementions of various operations.
+ // This method may return nil.
+ //
+ // The returned methods type is identical to
+ // "google.golang.org/protobuf/runtime/protoiface".Methods.
+ // Consult the protoiface package documentation for details.
+ ProtoMethods() *methods
+}
+
+// RawFields is the raw bytes for an ordered sequence of fields.
+// Each field contains both the tag (representing field number and wire type),
+// and also the wire data itself.
+type RawFields []byte
+
+// IsValid reports whether b is syntactically correct wire format.
+func (b RawFields) IsValid() bool {
+ for len(b) > 0 {
+ _, _, n := protowire.ConsumeField(b)
+ if n < 0 {
+ return false
+ }
+ b = b[n:]
+ }
+ return true
+}
+
+// List is a zero-indexed, ordered list.
+// The element Value type is determined by FieldDescriptor.Kind.
+// Providing a Value that is invalid or of an incorrect type panics.
+type List interface {
+ // Len reports the number of entries in the List.
+ // Get, Set, and Truncate panic with out of bound indexes.
+ Len() int
+
+ // Get retrieves the value at the given index.
+ // It never returns an invalid value.
+ Get(int) Value
+
+ // Set stores a value for the given index.
+ // When setting a composite type, it is unspecified whether the set
+ // value aliases the source's memory in any way.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(int, Value)
+
+ // Append appends the provided value to the end of the list.
+ // When appending a composite type, it is unspecified whether the appended
+ // value aliases the source's memory in any way.
+ //
+ // Append is a mutating operation and unsafe for concurrent use.
+ Append(Value)
+
+ // AppendMutable appends a new, empty, mutable message value to the end
+ // of the list and returns it.
+ // It panics if the list does not contain a message type.
+ AppendMutable() Value
+
+ // Truncate truncates the list to a smaller length.
+ //
+ // Truncate is a mutating operation and unsafe for concurrent use.
+ Truncate(int)
+
+ // NewElement returns a new value for a list element.
+ // For enums, this returns the first enum value.
+ // For other scalars, this returns the zero value.
+ // For messages, this returns a new, empty, mutable value.
+ NewElement() Value
+
+ // IsValid reports whether the list is valid.
+ //
+ // An invalid list is an empty, read-only value.
+ //
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+}
+
+// Map is an unordered, associative map.
+// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind.
+// The entry Value type is determined by FieldDescriptor.MapValue.Kind.
+// Providing a MapKey or Value that is invalid or of an incorrect type panics.
+type Map interface {
+ // Len reports the number of elements in the map.
+ Len() int
+
+ // Range iterates over every map entry in an undefined order,
+ // calling f for each key and value encountered.
+ // Range calls f Len times unless f returns false, which stops iteration.
+ // While iterating, mutating operations may only be performed
+ // on the current map key.
+ Range(f func(MapKey, Value) bool)
+
+ // Has reports whether an entry with the given key is in the map.
+ Has(MapKey) bool
+
+ // Clear clears the entry associated with they given key.
+ // The operation does nothing if there is no entry associated with the key.
+ //
+ // Clear is a mutating operation and unsafe for concurrent use.
+ Clear(MapKey)
+
+ // Get retrieves the value for an entry with the given key.
+ // It returns an invalid value for non-existent entries.
+ Get(MapKey) Value
+
+ // Set stores the value for an entry with the given key.
+ // It panics when given a key or value that is invalid or the wrong type.
+ // When setting a composite type, it is unspecified whether the set
+ // value aliases the source's memory in any way.
+ //
+ // Set is a mutating operation and unsafe for concurrent use.
+ Set(MapKey, Value)
+
+ // Mutable retrieves a mutable reference to the entry for the given key.
+ // If no entry exists for the key, it creates a new, empty, mutable value
+ // and stores it as the entry for the key.
+ // It panics if the map value is not a message.
+ Mutable(MapKey) Value
+
+ // NewValue returns a new value assignable as a map value.
+ // For enums, this returns the first enum value.
+ // For other scalars, this returns the zero value.
+ // For messages, this returns a new, empty, mutable value.
+ NewValue() Value
+
+ // IsValid reports whether the map is valid.
+ //
+ // An invalid map is an empty, read-only value.
+ //
+ // An invalid message often corresponds to a nil Go map value,
+ // but the details are implementation dependent.
+ // Validity is not part of the protobuf data model, and may not
+ // be preserved in marshaling or other operations.
+ IsValid() bool
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
new file mode 100644
index 00000000..918e685e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build purego appengine
+
+package protoreflect
+
+import "google.golang.org/protobuf/internal/pragma"
+
+type valueType int
+
+const (
+ nilType valueType = iota
+ boolType
+ int32Type
+ int64Type
+ uint32Type
+ uint64Type
+ float32Type
+ float64Type
+ stringType
+ bytesType
+ enumType
+ ifaceType
+)
+
+// value is a union where only one type can be represented at a time.
+// This uses a distinct field for each type. This is type safe in Go, but
+// occupies more memory than necessary (72B).
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ typ valueType // 8B
+ num uint64 // 8B
+ str string // 16B
+ bin []byte // 24B
+ iface interface{} // 16B
+}
+
+func valueOfString(v string) Value {
+ return Value{typ: stringType, str: v}
+}
+func valueOfBytes(v []byte) Value {
+ return Value{typ: bytesType, bin: v}
+}
+func valueOfIface(v interface{}) Value {
+ return Value{typ: ifaceType, iface: v}
+}
+
+func (v Value) getString() string {
+ return v.str
+}
+func (v Value) getBytes() []byte {
+ return v.bin
+}
+func (v Value) getIface() interface{} {
+ return v.iface
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
new file mode 100644
index 00000000..f334f71b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -0,0 +1,409 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "fmt"
+ "math"
+)
+
+// Value is a union where only one Go type may be set at a time.
+// The Value is used to represent all possible values a field may take.
+// The following shows which Go type is used to represent each proto Kind:
+//
+// ╔════════════╤═════════════════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠════════════╪═════════════════════════════════════╣
+// ║ bool │ BoolKind ║
+// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║
+// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║
+// ║ uint32 │ Uint32Kind, Fixed32Kind ║
+// ║ uint64 │ Uint64Kind, Fixed64Kind ║
+// ║ float32 │ FloatKind ║
+// ║ float64 │ DoubleKind ║
+// ║ string │ StringKind ║
+// ║ []byte │ BytesKind ║
+// ║ EnumNumber │ EnumKind ║
+// ║ Message │ MessageKind, GroupKind ║
+// ╚════════════╧═════════════════════════════════════╝
+//
+// Multiple protobuf Kinds may be represented by a single Go type if the type
+// can losslessly represent the information for the proto kind. For example,
+// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64,
+// but use different integer encoding methods.
+//
+// The List or Map types are used if the field cardinality is repeated.
+// A field is a List if FieldDescriptor.IsList reports true.
+// A field is a Map if FieldDescriptor.IsMap reports true.
+//
+// Converting to/from a Value and a concrete Go value panics on type mismatch.
+// For example, ValueOf("hello").Int() panics because this attempts to
+// retrieve an int64 from a string.
+type Value value
+
+// The protoreflect API uses a custom Value union type instead of interface{}
+// to keep the future open for performance optimizations. Using an interface{}
+// always incurs an allocation for primitives (e.g., int64) since it needs to
+// be boxed on the heap (as interfaces can only contain pointers natively).
+// Instead, we represent the Value union as a flat struct that internally keeps
+// track of which type is set. Using unsafe, the Value union can be reduced
+// down to 24B, which is identical in size to a slice.
+//
+// The latest compiler (Go1.11) currently suffers from some limitations:
+// • With inlining, the compiler should be able to statically prove that
+// only one of these switch cases are taken and inline one specific case.
+// See https://golang.org/issue/22310.
+
+// ValueOf returns a Value initialized with the concrete value stored in v.
+// This panics if the type does not match one of the allowed types in the
+// Value union.
+func ValueOf(v interface{}) Value {
+ switch v := v.(type) {
+ case nil:
+ return Value{}
+ case bool:
+ return ValueOfBool(v)
+ case int32:
+ return ValueOfInt32(v)
+ case int64:
+ return ValueOfInt64(v)
+ case uint32:
+ return ValueOfUint32(v)
+ case uint64:
+ return ValueOfUint64(v)
+ case float32:
+ return ValueOfFloat32(v)
+ case float64:
+ return ValueOfFloat64(v)
+ case string:
+ return ValueOfString(v)
+ case []byte:
+ return ValueOfBytes(v)
+ case EnumNumber:
+ return ValueOfEnum(v)
+ case Message, List, Map:
+ return valueOfIface(v)
+ default:
+ panic(fmt.Sprintf("invalid type: %T", v))
+ }
+}
+
+// ValueOfBool returns a new boolean value.
+func ValueOfBool(v bool) Value {
+ if v {
+ return Value{typ: boolType, num: 1}
+ } else {
+ return Value{typ: boolType, num: 0}
+ }
+}
+
+// ValueOfInt32 returns a new int32 value.
+func ValueOfInt32(v int32) Value {
+ return Value{typ: int32Type, num: uint64(v)}
+}
+
+// ValueOfInt64 returns a new int64 value.
+func ValueOfInt64(v int64) Value {
+ return Value{typ: int64Type, num: uint64(v)}
+}
+
+// ValueOfUint32 returns a new uint32 value.
+func ValueOfUint32(v uint32) Value {
+ return Value{typ: uint32Type, num: uint64(v)}
+}
+
+// ValueOfUint64 returns a new uint64 value.
+func ValueOfUint64(v uint64) Value {
+ return Value{typ: uint64Type, num: v}
+}
+
+// ValueOfFloat32 returns a new float32 value.
+func ValueOfFloat32(v float32) Value {
+ return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))}
+}
+
+// ValueOfFloat64 returns a new float64 value.
+func ValueOfFloat64(v float64) Value {
+ return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))}
+}
+
+// ValueOfString returns a new string value.
+func ValueOfString(v string) Value {
+ return valueOfString(v)
+}
+
+// ValueOfBytes returns a new bytes value.
+func ValueOfBytes(v []byte) Value {
+ return valueOfBytes(v[:len(v):len(v)])
+}
+
+// ValueOfEnum returns a new enum value.
+func ValueOfEnum(v EnumNumber) Value {
+ return Value{typ: enumType, num: uint64(v)}
+}
+
+// ValueOfMessage returns a new Message value.
+func ValueOfMessage(v Message) Value {
+ return valueOfIface(v)
+}
+
+// ValueOfList returns a new List value.
+func ValueOfList(v List) Value {
+ return valueOfIface(v)
+}
+
+// ValueOfMap returns a new Map value.
+func ValueOfMap(v Map) Value {
+ return valueOfIface(v)
+}
+
+// IsValid reports whether v is populated with a value.
+func (v Value) IsValid() bool {
+ return v.typ != nilType
+}
+
+// Interface returns v as an interface{}.
+//
+// Invariant: v == ValueOf(v).Interface()
+func (v Value) Interface() interface{} {
+ switch v.typ {
+ case nilType:
+ return nil
+ case boolType:
+ return v.Bool()
+ case int32Type:
+ return int32(v.Int())
+ case int64Type:
+ return int64(v.Int())
+ case uint32Type:
+ return uint32(v.Uint())
+ case uint64Type:
+ return uint64(v.Uint())
+ case float32Type:
+ return float32(v.Float())
+ case float64Type:
+ return float64(v.Float())
+ case stringType:
+ return v.String()
+ case bytesType:
+ return v.Bytes()
+ case enumType:
+ return v.Enum()
+ default:
+ return v.getIface()
+ }
+}
+
+func (v Value) typeName() string {
+ switch v.typ {
+ case nilType:
+ return "nil"
+ case boolType:
+ return "bool"
+ case int32Type:
+ return "int32"
+ case int64Type:
+ return "int64"
+ case uint32Type:
+ return "uint32"
+ case uint64Type:
+ return "uint64"
+ case float32Type:
+ return "float32"
+ case float64Type:
+ return "float64"
+ case stringType:
+ return "string"
+ case bytesType:
+ return "bytes"
+ case enumType:
+ return "enum"
+ default:
+ switch v := v.getIface().(type) {
+ case Message:
+ return "message"
+ case List:
+ return "list"
+ case Map:
+ return "map"
+ default:
+ return fmt.Sprintf("", v)
+ }
+ }
+}
+
+func (v Value) panicMessage(what string) string {
+ return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what)
+}
+
+// Bool returns v as a bool and panics if the type is not a bool.
+func (v Value) Bool() bool {
+ switch v.typ {
+ case boolType:
+ return v.num > 0
+ default:
+ panic(v.panicMessage("bool"))
+ }
+}
+
+// Int returns v as a int64 and panics if the type is not a int32 or int64.
+func (v Value) Int() int64 {
+ switch v.typ {
+ case int32Type, int64Type:
+ return int64(v.num)
+ default:
+ panic(v.panicMessage("int"))
+ }
+}
+
+// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64.
+func (v Value) Uint() uint64 {
+ switch v.typ {
+ case uint32Type, uint64Type:
+ return uint64(v.num)
+ default:
+ panic(v.panicMessage("uint"))
+ }
+}
+
+// Float returns v as a float64 and panics if the type is not a float32 or float64.
+func (v Value) Float() float64 {
+ switch v.typ {
+ case float32Type, float64Type:
+ return math.Float64frombits(uint64(v.num))
+ default:
+ panic(v.panicMessage("float"))
+ }
+}
+
+// String returns v as a string. Since this method implements fmt.Stringer,
+// this returns the formatted string value for any non-string type.
+func (v Value) String() string {
+ switch v.typ {
+ case stringType:
+ return v.getString()
+ default:
+ return fmt.Sprint(v.Interface())
+ }
+}
+
+// Bytes returns v as a []byte and panics if the type is not a []byte.
+func (v Value) Bytes() []byte {
+ switch v.typ {
+ case bytesType:
+ return v.getBytes()
+ default:
+ panic(v.panicMessage("bytes"))
+ }
+}
+
+// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber.
+func (v Value) Enum() EnumNumber {
+ switch v.typ {
+ case enumType:
+ return EnumNumber(v.num)
+ default:
+ panic(v.panicMessage("enum"))
+ }
+}
+
+// Message returns v as a Message and panics if the type is not a Message.
+func (v Value) Message() Message {
+ switch vi := v.getIface().(type) {
+ case Message:
+ return vi
+ default:
+ panic(v.panicMessage("message"))
+ }
+}
+
+// List returns v as a List and panics if the type is not a List.
+func (v Value) List() List {
+ switch vi := v.getIface().(type) {
+ case List:
+ return vi
+ default:
+ panic(v.panicMessage("list"))
+ }
+}
+
+// Map returns v as a Map and panics if the type is not a Map.
+func (v Value) Map() Map {
+ switch vi := v.getIface().(type) {
+ case Map:
+ return vi
+ default:
+ panic(v.panicMessage("map"))
+ }
+}
+
+// MapKey returns v as a MapKey and panics for invalid MapKey types.
+func (v Value) MapKey() MapKey {
+ switch v.typ {
+ case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType:
+ return MapKey(v)
+ default:
+ panic(v.panicMessage("map key"))
+ }
+}
+
+// MapKey is used to index maps, where the Go type of the MapKey must match
+// the specified key Kind (see MessageDescriptor.IsMapEntry).
+// The following shows what Go type is used to represent each proto Kind:
+//
+// ╔═════════╤═════════════════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═════════╪═════════════════════════════════════╣
+// ║ bool │ BoolKind ║
+// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║
+// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║
+// ║ uint32 │ Uint32Kind, Fixed32Kind ║
+// ║ uint64 │ Uint64Kind, Fixed64Kind ║
+// ║ string │ StringKind ║
+// ╚═════════╧═════════════════════════════════════╝
+//
+// A MapKey is constructed and accessed through a Value:
+// k := ValueOf("hash").MapKey() // convert string to MapKey
+// s := k.String() // convert MapKey to string
+//
+// The MapKey is a strict subset of valid types used in Value;
+// converting a Value to a MapKey with an invalid type panics.
+type MapKey value
+
+// IsValid reports whether k is populated with a value.
+func (k MapKey) IsValid() bool {
+ return Value(k).IsValid()
+}
+
+// Interface returns k as an interface{}.
+func (k MapKey) Interface() interface{} {
+ return Value(k).Interface()
+}
+
+// Bool returns k as a bool and panics if the type is not a bool.
+func (k MapKey) Bool() bool {
+ return Value(k).Bool()
+}
+
+// Int returns k as a int64 and panics if the type is not a int32 or int64.
+func (k MapKey) Int() int64 {
+ return Value(k).Int()
+}
+
+// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64.
+func (k MapKey) Uint() uint64 {
+ return Value(k).Uint()
+}
+
+// String returns k as a string. Since this method implements fmt.Stringer,
+// this returns the formatted string value for any non-string type.
+func (k MapKey) String() string {
+ return Value(k).String()
+}
+
+// Value returns k as a Value.
+func (k MapKey) Value() Value {
+ return Value(k)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
new file mode 100644
index 00000000..c45debdc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !purego,!appengine
+
+package protoreflect
+
+import (
+ "unsafe"
+
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type (
+ stringHeader struct {
+ Data unsafe.Pointer
+ Len int
+ }
+ sliceHeader struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+ }
+ ifaceHeader struct {
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+)
+
+var (
+ nilType = typeOf(nil)
+ boolType = typeOf(*new(bool))
+ int32Type = typeOf(*new(int32))
+ int64Type = typeOf(*new(int64))
+ uint32Type = typeOf(*new(uint32))
+ uint64Type = typeOf(*new(uint64))
+ float32Type = typeOf(*new(float32))
+ float64Type = typeOf(*new(float64))
+ stringType = typeOf(*new(string))
+ bytesType = typeOf(*new([]byte))
+ enumType = typeOf(*new(EnumNumber))
+)
+
+// typeOf returns a pointer to the Go type information.
+// The pointer is comparable and equal if and only if the types are identical.
+func typeOf(t interface{}) unsafe.Pointer {
+ return (*ifaceHeader)(unsafe.Pointer(&t)).Type
+}
+
+// value is a union where only one type can be represented at a time.
+// The struct is 24B large on 64-bit systems and requires the minimum storage
+// necessary to represent each possible type.
+//
+// The Go GC needs to be able to scan variables containing pointers.
+// As such, pointers and non-pointers cannot be intermixed.
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ // typ stores the type of the value as a pointer to the Go type.
+ typ unsafe.Pointer // 8B
+
+ // ptr stores the data pointer for a String, Bytes, or interface value.
+ ptr unsafe.Pointer // 8B
+
+ // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
+ // Enum value as a raw uint64.
+ //
+ // It is also used to store the length of a String or Bytes value;
+ // the capacity is ignored.
+ num uint64 // 8B
+}
+
+func valueOfString(v string) Value {
+ p := (*stringHeader)(unsafe.Pointer(&v))
+ return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
+}
+func valueOfBytes(v []byte) Value {
+ p := (*sliceHeader)(unsafe.Pointer(&v))
+ return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
+}
+func valueOfIface(v interface{}) Value {
+ p := (*ifaceHeader)(unsafe.Pointer(&v))
+ return Value{typ: p.Type, ptr: p.Data}
+}
+
+func (v Value) getString() (x string) {
+ *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
+ return x
+}
+func (v Value) getBytes() (x []byte) {
+ *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
+ return x
+}
+func (v Value) getIface() (x interface{}) {
+ *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
+ return x
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
new file mode 100644
index 00000000..43f16c61
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -0,0 +1,768 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoregistry provides data structures to register and lookup
+// protobuf descriptor types.
+//
+// The Files registry contains file descriptors and provides the ability
+// to iterate over the files or lookup a specific descriptor within the files.
+// Files only contains protobuf descriptors and has no understanding of Go
+// type information that may be associated with each descriptor.
+//
+// The Types registry contains descriptor types for which there is a known
+// Go type associated with that descriptor. It provides the ability to iterate
+// over the registered types or lookup a type by name.
+package protoregistry
+
+import (
+ "fmt"
+ "log"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// ignoreConflict reports whether to ignore a registration conflict
+// given the descriptor being registered and the error.
+// It is a variable so that the behavior is easily overridden in another file.
+var ignoreConflict = func(d protoreflect.Descriptor, err error) bool {
+ log.Printf(""+
+ "WARNING: %v\n"+
+ "A future release will panic on registration conflicts. See:\n"+
+ "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+
+ "\n", err)
+ return true
+}
+
+var globalMutex sync.RWMutex
+
+// GlobalFiles is a global registry of file descriptors.
+var GlobalFiles *Files = new(Files)
+
+// GlobalTypes is the registry used by default for type lookups
+// unless a local registry is provided by the user.
+var GlobalTypes *Types = new(Types)
+
+// NotFound is a sentinel error value to indicate that the type was not found.
+//
+// Since registry lookup can happen in the critical performance path, resolvers
+// must return this exact error value, not an error wrapping it.
+var NotFound = errors.New("not found")
+
+// Files is a registry for looking up or iterating over files and the
+// descriptors contained within them.
+// The Find and Range methods are safe for concurrent use.
+type Files struct {
+ // The map of descsByName contains:
+ // EnumDescriptor
+ // EnumValueDescriptor
+ // MessageDescriptor
+ // ExtensionDescriptor
+ // ServiceDescriptor
+ // *packageDescriptor
+ //
+ // Note that files are stored as a slice, since a package may contain
+ // multiple files. Only top-level declarations are registered.
+ // Note that enum values are in the top-level since that are in the same
+ // scope as the parent enum.
+ descsByName map[protoreflect.FullName]interface{}
+ filesByPath map[string]protoreflect.FileDescriptor
+}
+
+type packageDescriptor struct {
+ files []protoreflect.FileDescriptor
+}
+
+// RegisterFile registers the provided file descriptor.
+//
+// If any descriptor within the file conflicts with the descriptor of any
+// previously registered file (e.g., two enums with the same full name),
+// then the file is not registered and an error is returned.
+//
+// It is permitted for multiple files to have the same file path.
+func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
+ if r == GlobalFiles {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if r.descsByName == nil {
+ r.descsByName = map[protoreflect.FullName]interface{}{
+ "": &packageDescriptor{},
+ }
+ r.filesByPath = make(map[string]protoreflect.FileDescriptor)
+ }
+ path := file.Path()
+ if prev := r.filesByPath[path]; prev != nil {
+ err := errors.New("file %q is already registered", file.Path())
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(file, err) {
+ err = nil
+ }
+ return err
+ }
+
+ for name := file.Package(); name != ""; name = name.Parent() {
+ switch prev := r.descsByName[name]; prev.(type) {
+ case nil, *packageDescriptor:
+ default:
+ err := errors.New("file %q has a package name conflict over %v", file.Path(), name)
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(file, err) {
+ err = nil
+ }
+ return err
+ }
+ }
+ var err error
+ var hasConflict bool
+ rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) {
+ if prev := r.descsByName[d.FullName()]; prev != nil {
+ hasConflict = true
+ err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName())
+ err = amendErrorWithCaller(err, prev, file)
+ if r == GlobalFiles && ignoreConflict(d, err) {
+ err = nil
+ }
+ }
+ })
+ if hasConflict {
+ return err
+ }
+
+ for name := file.Package(); name != ""; name = name.Parent() {
+ if r.descsByName[name] == nil {
+ r.descsByName[name] = &packageDescriptor{}
+ }
+ }
+ p := r.descsByName[file.Package()].(*packageDescriptor)
+ p.files = append(p.files, file)
+ rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) {
+ r.descsByName[d.FullName()] = d
+ })
+ r.filesByPath[path] = file
+ return nil
+}
+
+// FindDescriptorByName looks up a descriptor by the full name.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ prefix := name
+ suffix := nameSuffix("")
+ for prefix != "" {
+ if d, ok := r.descsByName[prefix]; ok {
+ switch d := d.(type) {
+ case protoreflect.EnumDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.EnumValueDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.MessageDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.ExtensionDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ case protoreflect.ServiceDescriptor:
+ if d.FullName() == name {
+ return d, nil
+ }
+ if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name {
+ return d, nil
+ }
+ }
+ return nil, NotFound
+ }
+ prefix = prefix.Parent()
+ suffix = nameSuffix(name[len(prefix)+len("."):])
+ }
+ return nil, NotFound
+}
+
+func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor {
+ name := suffix.Pop()
+ if suffix == "" {
+ if ed := md.Enums().ByName(name); ed != nil {
+ return ed
+ }
+ for i := md.Enums().Len() - 1; i >= 0; i-- {
+ if vd := md.Enums().Get(i).Values().ByName(name); vd != nil {
+ return vd
+ }
+ }
+ if xd := md.Extensions().ByName(name); xd != nil {
+ return xd
+ }
+ if fd := md.Fields().ByName(name); fd != nil {
+ return fd
+ }
+ if od := md.Oneofs().ByName(name); od != nil {
+ return od
+ }
+ }
+ if md := md.Messages().ByName(name); md != nil {
+ if suffix == "" {
+ return md
+ }
+ return findDescriptorInMessage(md, suffix)
+ }
+ return nil
+}
+
+type nameSuffix string
+
+func (s *nameSuffix) Pop() (name protoreflect.Name) {
+ if i := strings.IndexByte(string(*s), '.'); i >= 0 {
+ name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:]
+ } else {
+ name, *s = protoreflect.Name((*s)), ""
+ }
+ return name
+}
+
+// FindFileByPath looks up a file by the path.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if fd, ok := r.filesByPath[path]; ok {
+ return fd, nil
+ }
+ return nil, NotFound
+}
+
+// NumFiles reports the number of registered files.
+func (r *Files) NumFiles() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return len(r.filesByPath)
+}
+
+// RangeFiles iterates over all registered files while f returns true.
+// The iteration order is undefined.
+func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, file := range r.filesByPath {
+ if !f(file) {
+ return
+ }
+ }
+}
+
+// NumFilesByPackage reports the number of registered files in a proto package.
+func (r *Files) NumFilesByPackage(name protoreflect.FullName) int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ p, ok := r.descsByName[name].(*packageDescriptor)
+ if !ok {
+ return 0
+ }
+ return len(p.files)
+}
+
+// RangeFilesByPackage iterates over all registered files in a given proto package
+// while f returns true. The iteration order is undefined.
+func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalFiles {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ p, ok := r.descsByName[name].(*packageDescriptor)
+ if !ok {
+ return
+ }
+ for _, file := range p.files {
+ if !f(file) {
+ return
+ }
+ }
+}
+
+// rangeTopLevelDescriptors iterates over all top-level descriptors in a file
+// which will be directly entered into the registry.
+func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) {
+ eds := fd.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ vds := eds.Get(i).Values()
+ for i := vds.Len() - 1; i >= 0; i-- {
+ f(vds.Get(i))
+ }
+ }
+ mds := fd.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ f(mds.Get(i))
+ }
+ xds := fd.Extensions()
+ for i := xds.Len() - 1; i >= 0; i-- {
+ f(xds.Get(i))
+ }
+ sds := fd.Services()
+ for i := sds.Len() - 1; i >= 0; i-- {
+ f(sds.Get(i))
+ }
+}
+
+// MessageTypeResolver is an interface for looking up messages.
+//
+// A compliant implementation must deterministically return the same type
+// if no error is encountered.
+//
+// The Types type implements this interface.
+type MessageTypeResolver interface {
+ // FindMessageByName looks up a message by its full name.
+ // E.g., "google.protobuf.Any"
+ //
+ // This return (nil, NotFound) if not found.
+ FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error)
+
+ // FindMessageByURL looks up a message by a URL identifier.
+ // See documentation on google.protobuf.Any.type_url for the URL format.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindMessageByURL(url string) (protoreflect.MessageType, error)
+}
+
+// ExtensionTypeResolver is an interface for looking up extensions.
+//
+// A compliant implementation must deterministically return the same type
+// if no error is encountered.
+//
+// The Types type implements this interface.
+type ExtensionTypeResolver interface {
+ // FindExtensionByName looks up a extension field by the field's full name.
+ // Note that this is the full name of the field as determined by
+ // where the extension is declared and is unrelated to the full name of the
+ // message being extended.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+
+ // FindExtensionByNumber looks up a extension field by the field number
+ // within some parent message, identified by full name.
+ //
+ // This returns (nil, NotFound) if not found.
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+}
+
+var (
+ _ MessageTypeResolver = (*Types)(nil)
+ _ ExtensionTypeResolver = (*Types)(nil)
+)
+
+// Types is a registry for looking up or iterating over descriptor types.
+// The Find and Range methods are safe for concurrent use.
+type Types struct {
+ typesByName typesByName
+ extensionsByMessage extensionsByMessage
+
+ numEnums int
+ numMessages int
+ numExtensions int
+}
+
+type (
+ typesByName map[protoreflect.FullName]interface{}
+ extensionsByMessage map[protoreflect.FullName]extensionsByNumber
+ extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType
+)
+
+// RegisterMessage registers the provided message type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterMessage(mt protoreflect.MessageType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ md := mt.Descriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ if err := r.register("message", md, mt); err != nil {
+ return err
+ }
+ r.numMessages++
+ return nil
+}
+
+// RegisterEnum registers the provided enum type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterEnum(et protoreflect.EnumType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ ed := et.Descriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ if err := r.register("enum", ed, et); err != nil {
+ return err
+ }
+ r.numEnums++
+ return nil
+}
+
+// RegisterExtension registers the provided extension type.
+//
+// If a naming conflict occurs, the type is not registered and an error is returned.
+func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error {
+ // Under rare circumstances getting the descriptor might recursively
+ // examine the registry, so fetch it before locking.
+ //
+ // A known case where this can happen: Fetching the TypeDescriptor for a
+ // legacy ExtensionDesc can consult the global registry.
+ xd := xt.TypeDescriptor()
+
+ if r == GlobalTypes {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+
+ field := xd.Number()
+ message := xd.ContainingMessage().FullName()
+ if prev := r.extensionsByMessage[message][field]; prev != nil {
+ err := errors.New("extension number %d is already registered on message %v", field, message)
+ err = amendErrorWithCaller(err, prev, xt)
+ if !(r == GlobalTypes && ignoreConflict(xd, err)) {
+ return err
+ }
+ }
+
+ if err := r.register("extension", xd, xt); err != nil {
+ return err
+ }
+ if r.extensionsByMessage == nil {
+ r.extensionsByMessage = make(extensionsByMessage)
+ }
+ if r.extensionsByMessage[message] == nil {
+ r.extensionsByMessage[message] = make(extensionsByNumber)
+ }
+ r.extensionsByMessage[message][field] = xt
+ r.numExtensions++
+ return nil
+}
+
+func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error {
+ name := desc.FullName()
+ prev := r.typesByName[name]
+ if prev != nil {
+ err := errors.New("%v %v is already registered", kind, name)
+ err = amendErrorWithCaller(err, prev, typ)
+ if !(r == GlobalTypes && ignoreConflict(desc, err)) {
+ return err
+ }
+ }
+ if r.typesByName == nil {
+ r.typesByName = make(typesByName)
+ }
+ r.typesByName[name] = typ
+ return nil
+}
+
+// FindEnumByName looks up an enum by its full name.
+// E.g., "google.protobuf.Field.Kind".
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[enum]; v != nil {
+ if et, _ := v.(protoreflect.EnumType); et != nil {
+ return et, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want enum", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindMessageByName looks up a message by its full name.
+// E.g., "google.protobuf.Any"
+//
+// This return (nil, NotFound) if not found.
+func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+ // The full name by itself is a valid URL.
+ return r.FindMessageByURL(string(message))
+}
+
+// FindMessageByURL looks up a message by a URL identifier.
+// See documentation on google.protobuf.Any.type_url for the URL format.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ message := protoreflect.FullName(url)
+ if i := strings.LastIndexByte(url, '/'); i >= 0 {
+ message = message[i+len("/"):]
+ }
+
+ if v := r.typesByName[message]; v != nil {
+ if mt, _ := v.(protoreflect.MessageType); mt != nil {
+ return mt, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want message", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindExtensionByName looks up a extension field by the field's full name.
+// Note that this is the full name of the field as determined by
+// where the extension is declared and is unrelated to the full name of the
+// message being extended.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if v := r.typesByName[field]; v != nil {
+ if xt, _ := v.(protoreflect.ExtensionType); xt != nil {
+ return xt, nil
+ }
+ return nil, errors.New("found wrong type: got %v, want extension", typeName(v))
+ }
+ return nil, NotFound
+}
+
+// FindExtensionByNumber looks up a extension field by the field number
+// within some parent message, identified by full name.
+//
+// This returns (nil, NotFound) if not found.
+func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if r == nil {
+ return nil, NotFound
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ if xt, ok := r.extensionsByMessage[message][field]; ok {
+ return xt, nil
+ }
+ return nil, NotFound
+}
+
+// NumEnums reports the number of registered enums.
+func (r *Types) NumEnums() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numEnums
+}
+
+// RangeEnums iterates over all registered enums while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if et, ok := typ.(protoreflect.EnumType); ok {
+ if !f(et) {
+ return
+ }
+ }
+ }
+}
+
+// NumMessages reports the number of registered messages.
+func (r *Types) NumMessages() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numMessages
+}
+
+// RangeMessages iterates over all registered messages while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if mt, ok := typ.(protoreflect.MessageType); ok {
+ if !f(mt) {
+ return
+ }
+ }
+ }
+}
+
+// NumExtensions reports the number of registered extensions.
+func (r *Types) NumExtensions() int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return r.numExtensions
+}
+
+// RangeExtensions iterates over all registered extensions while f returns true.
+// Iteration order is undefined.
+func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, typ := range r.typesByName {
+ if xt, ok := typ.(protoreflect.ExtensionType); ok {
+ if !f(xt) {
+ return
+ }
+ }
+ }
+}
+
+// NumExtensionsByMessage reports the number of registered extensions for
+// a given message type.
+func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int {
+ if r == nil {
+ return 0
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ return len(r.extensionsByMessage[message])
+}
+
+// RangeExtensionsByMessage iterates over all registered extensions filtered
+// by a given message type while f returns true. Iteration order is undefined.
+func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) {
+ if r == nil {
+ return
+ }
+ if r == GlobalTypes {
+ globalMutex.RLock()
+ defer globalMutex.RUnlock()
+ }
+ for _, xt := range r.extensionsByMessage[message] {
+ if !f(xt) {
+ return
+ }
+ }
+}
+
+func typeName(t interface{}) string {
+ switch t.(type) {
+ case protoreflect.EnumType:
+ return "enum"
+ case protoreflect.MessageType:
+ return "message"
+ case protoreflect.ExtensionType:
+ return "extension"
+ default:
+ return fmt.Sprintf("%T", t)
+ }
+}
+
+func amendErrorWithCaller(err error, prev, curr interface{}) error {
+ prevPkg := goPackage(prev)
+ currPkg := goPackage(curr)
+ if prevPkg == "" || currPkg == "" || prevPkg == currPkg {
+ return err
+ }
+ return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg)
+}
+
+func goPackage(v interface{}) string {
+ switch d := v.(type) {
+ case protoreflect.EnumType:
+ v = d.Descriptor()
+ case protoreflect.MessageType:
+ v = d.Descriptor()
+ case protoreflect.ExtensionType:
+ v = d.TypeDescriptor()
+ }
+ if d, ok := v.(protoreflect.Descriptor); ok {
+ v = d.ParentFile()
+ }
+ if d, ok := v.(interface{ GoPackagePath() string }); ok {
+ return d.GoPackagePath()
+ }
+ return ""
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
new file mode 100644
index 00000000..c5872767
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoiface
+
+type MessageV1 interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+type ExtensionRangeV1 struct {
+ Start, End int32 // both inclusive
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
new file mode 100644
index 00000000..32c04f67
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoiface contains types referenced or implemented by messages.
+//
+// WARNING: This package should only be imported by message implementations.
+// The functionality found in this package should be accessed through
+// higher-level abstractions provided by the proto package.
+package protoiface
+
+import (
+ "google.golang.org/protobuf/internal/pragma"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// Methods is a set of optional fast-path implementations of various operations.
+type Methods = struct {
+ pragma.NoUnkeyedLiterals
+
+ // Flags indicate support for optional features.
+ Flags SupportFlags
+
+ // Size returns the size in bytes of the wire-format encoding of a message.
+ // Marshal must be provided if a custom Size is provided.
+ Size func(SizeInput) SizeOutput
+
+ // Marshal formats a message in the wire-format encoding to the provided buffer.
+ // Size should be provided if a custom Marshal is provided.
+ // It must not return an error for a partial message.
+ Marshal func(MarshalInput) (MarshalOutput, error)
+
+ // Unmarshal parses the wire-format encoding and merges the result into a message.
+ // It must not reset the target message or return an error for a partial message.
+ Unmarshal func(UnmarshalInput) (UnmarshalOutput, error)
+
+ // Merge merges the contents of a source message into a destination message.
+ Merge func(MergeInput) MergeOutput
+
+ // CheckInitialized returns an error if any required fields in the message are not set.
+ CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+}
+
+// SupportFlags indicate support for optional features.
+type SupportFlags = uint64
+
+const (
+ // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported.
+ SupportMarshalDeterministic SupportFlags = 1 << iota
+
+ // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported.
+ SupportUnmarshalDiscardUnknown
+)
+
+// SizeInput is input to the Size method.
+type SizeInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Flags MarshalInputFlags
+}
+
+// SizeOutput is output from the Size method.
+type SizeOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Size int
+}
+
+// MarshalInput is input to the Marshal method.
+type MarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Buf []byte // output is appended to this buffer
+ Flags MarshalInputFlags
+}
+
+// MarshalOutput is output from the Marshal method.
+type MarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Buf []byte // contains marshaled message
+}
+
+// MarshalInputFlags configure the marshaler.
+// Most flags correspond to fields in proto.MarshalOptions.
+type MarshalInputFlags = uint8
+
+const (
+ MarshalDeterministic MarshalInputFlags = 1 << iota
+ MarshalUseCachedSize
+)
+
+// UnmarshalInput is input to the Unmarshal method.
+type UnmarshalInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+ Buf []byte // input buffer
+ Flags UnmarshalInputFlags
+ Resolver interface {
+ FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error)
+ FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error)
+ }
+}
+
+// UnmarshalOutput is output from the Unmarshal method.
+type UnmarshalOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Flags UnmarshalOutputFlags
+}
+
+// UnmarshalInputFlags configure the unmarshaler.
+// Most flags correspond to fields in proto.UnmarshalOptions.
+type UnmarshalInputFlags = uint8
+
+const (
+ UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+)
+
+// UnmarshalOutputFlags are output from the Unmarshal method.
+type UnmarshalOutputFlags = uint8
+
+const (
+ // UnmarshalInitialized may be set on return if all required fields are known to be set.
+ // If unset, then it does not necessarily indicate that the message is uninitialized,
+ // only that its status could not be confirmed.
+ UnmarshalInitialized UnmarshalOutputFlags = 1 << iota
+)
+
+// MergeInput is input to the Merge method.
+type MergeInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Source protoreflect.Message
+ Destination protoreflect.Message
+}
+
+// MergeOutput is output from the Merge method.
+type MergeOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Flags MergeOutputFlags
+}
+
+// MergeOutputFlags are output from the Merge method.
+type MergeOutputFlags = uint8
+
+const (
+ // MergeComplete reports whether the merge was performed.
+ // If unset, the merger must have made no changes to the destination.
+ MergeComplete MergeOutputFlags = 1 << iota
+)
+
+// CheckInitializedInput is input to the CheckInitialized method.
+type CheckInitializedInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Message protoreflect.Message
+}
+
+// CheckInitializedOutput is output from the CheckInitialized method.
+type CheckInitializedOutput = struct {
+ pragma.NoUnkeyedLiterals
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
new file mode 100644
index 00000000..4a1ab7fb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -0,0 +1,44 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoimpl contains the default implementation for messages
+// generated by protoc-gen-go.
+//
+// WARNING: This package should only ever be imported by generated messages.
+// The compatibility agreement covers nothing except for functionality needed
+// to keep existing generated messages operational. Breakages that occur due
+// to unauthorized usages of this package are not the author's responsibility.
+package protoimpl
+
+import (
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/filetype"
+ "google.golang.org/protobuf/internal/impl"
+)
+
+// UnsafeEnabled specifies whether package unsafe can be used.
+const UnsafeEnabled = impl.UnsafeEnabled
+
+type (
+ // Types used by generated code in init functions.
+ DescBuilder = filedesc.Builder
+ TypeBuilder = filetype.Builder
+
+ // Types used by generated code to implement EnumType, MessageType, and ExtensionType.
+ EnumInfo = impl.EnumInfo
+ MessageInfo = impl.MessageInfo
+ ExtensionInfo = impl.ExtensionInfo
+
+ // Types embedded in generated messages.
+ MessageState = impl.MessageState
+ SizeCache = impl.SizeCache
+ WeakFields = impl.WeakFields
+ UnknownFields = impl.UnknownFields
+ ExtensionFields = impl.ExtensionFields
+ ExtensionFieldV1 = impl.ExtensionField
+
+ Pointer = impl.Pointer
+)
+
+var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
new file mode 100644
index 00000000..ff094e1b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go
@@ -0,0 +1,56 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoimpl
+
+import (
+ "google.golang.org/protobuf/internal/version"
+)
+
+const (
+ // MaxVersion is the maximum supported version for generated .pb.go files.
+ // It is always the current version of the module.
+ MaxVersion = version.Minor
+
+ // GenVersion is the runtime version required by generated .pb.go files.
+ // This is incremented when generated code relies on new functionality
+ // in the runtime.
+ GenVersion = 20
+
+ // MinVersion is the minimum supported version for generated .pb.go files.
+ // This is incremented when the runtime drops support for old code.
+ MinVersion = 0
+)
+
+// EnforceVersion is used by code generated by protoc-gen-go
+// to statically enforce minimum and maximum versions of this package.
+// A compilation failure implies either that:
+// * the runtime package is too old and needs to be updated OR
+// * the generated code is too old and needs to be regenerated.
+//
+// The runtime package can be upgraded by running:
+// go get google.golang.org/protobuf
+//
+// The generated code can be regenerated by running:
+// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES}
+//
+// Example usage by generated code:
+// const (
+// // Verify that this generated code is sufficiently up-to-date.
+// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion)
+// // Verify that runtime/protoimpl is sufficiently up-to-date.
+// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion)
+// )
+//
+// The genVersion is the current minor version used to generated the code.
+// This compile-time check relies on negative integer overflow of a uint
+// being a compilation failure (guaranteed by the Go specification).
+type EnforceVersion uint
+
+// This enforces the following invariant:
+// MinVersion ≤ GenVersion ≤ MaxVersion
+const (
+ _ = EnforceVersion(GenVersion - MinVersion)
+ _ = EnforceVersion(MaxVersion - GenVersion)
+)
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
new file mode 100644
index 00000000..5f9498e4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -0,0 +1,287 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+package anypb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": ,
+// "lastName":
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+type Any struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. This string must contain at least
+ // one "/" character. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
+ //
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ // Must be a valid serialized protocol buffer of the above specified type.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Any) Reset() {
+ *x = Any{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Any) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Any) ProtoMessage() {}
+
+func (x *Any) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Any.ProtoReflect.Descriptor instead.
+func (*Any) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_any_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Any) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *Any) GetValue() []byte {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_google_protobuf_any_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_any_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
+ 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02,
+ 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e,
+ 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_any_proto_rawDescOnce sync.Once
+ file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+)
+
+func file_google_protobuf_any_proto_rawDescGZIP() []byte {
+ file_google_protobuf_any_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+ })
+ return file_google_protobuf_any_proto_rawDescData
+}
+
+var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_any_proto_goTypes = []interface{}{
+ (*Any)(nil), // 0: google.protobuf.Any
+}
+var file_google_protobuf_any_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_any_proto_init() }
+func file_google_protobuf_any_proto_init() {
+ if File_google_protobuf_any_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Any); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_any_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_any_proto_depIdxs,
+ MessageInfos: file_google_protobuf_any_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_any_proto = out.File
+ file_google_protobuf_any_proto_rawDesc = nil
+ file_google_protobuf_any_proto_goTypes = nil
+ file_google_protobuf_any_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
new file mode 100644
index 00000000..3997c604
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -0,0 +1,249 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+package durationpb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (duration.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (x *Duration) Reset() {
+ *x = Duration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Duration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Duration) ProtoMessage() {}
+
+func (x *Duration) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Duration.ProtoReflect.Descriptor instead.
+func (*Duration) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Duration) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+
+func (x *Duration) GetNanos() int32 {
+ if x != nil {
+ return x.Nanos
+ }
+ return 0
+}
+
+var File_google_protobuf_duration_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_duration_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
+ 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
+ 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a,
+ 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c,
+ 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_duration_proto_rawDescOnce sync.Once
+ file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc
+)
+
+func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
+ file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData)
+ })
+ return file_google_protobuf_duration_proto_rawDescData
+}
+
+var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_duration_proto_goTypes = []interface{}{
+ (*Duration)(nil), // 0: google.protobuf.Duration
+}
+var file_google_protobuf_duration_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_duration_proto_init() }
+func file_google_protobuf_duration_proto_init() {
+ if File_google_protobuf_duration_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Duration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_duration_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_duration_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_duration_proto_depIdxs,
+ MessageInfos: file_google_protobuf_duration_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_duration_proto = out.File
+ file_google_protobuf_duration_proto_rawDesc = nil
+ file_google_protobuf_duration_proto_goTypes = nil
+ file_google_protobuf_duration_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
new file mode 100644
index 00000000..6fe6d42f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -0,0 +1,271 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamppb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+// A Timestamp represents a point in time independent of any time zone or local
+// calendar, encoded as a count of seconds and fractions of seconds at
+// nanosecond resolution. The count is relative to an epoch at UTC midnight on
+// January 1, 1970, in the proleptic Gregorian calendar which extends the
+// Gregorian calendar backwards to year one.
+//
+// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
+// second table is needed for interpretation, using a [24-hour linear
+// smear](https://developers.google.com/time/smear).
+//
+// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
+// restricting to that range, we ensure that we can convert to and from [RFC
+// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard
+// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using
+// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
+// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
+// the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (x *Timestamp) Reset() {
+ *x = Timestamp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Timestamp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Timestamp) ProtoMessage() {}
+
+func (x *Timestamp) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
+func (*Timestamp) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Timestamp) GetSeconds() int64 {
+ if x != nil {
+ return x.Seconds
+ }
+ return 0
+}
+
+func (x *Timestamp) GetNanos() int32 {
+ if x != nil {
+ return x.Nanos
+ }
+ return 0
+}
+
+var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
+ 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02,
+ 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
+ file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+)
+
+func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
+ file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+ })
+ return file_google_protobuf_timestamp_proto_rawDescData
+}
+
+var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
+ (*Timestamp)(nil), // 0: google.protobuf.Timestamp
+}
+var file_google_protobuf_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_timestamp_proto_init() }
+func file_google_protobuf_timestamp_proto_init() {
+ if File_google_protobuf_timestamp_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Timestamp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_timestamp_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs,
+ MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_timestamp_proto = out.File
+ file_google_protobuf_timestamp_proto_rawDesc = nil
+ file_google_protobuf_timestamp_proto_goTypes = nil
+ file_google_protobuf_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
index 1f7e87e6..d2c2308f 100644
--- a/vendor/gopkg.in/yaml.v2/apic.go
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -86,6 +86,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) {
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
}
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 616c9b7e..0aa53875 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,9 +1,28 @@
+# github.com/OneOfOne/xxhash v1.2.8
+github.com/OneOfOne/xxhash
+# github.com/coreos/go-systemd/v22 v22.1.0
+github.com/coreos/go-systemd/v22/dbus
+# github.com/cyphar/filepath-securejoin v0.2.2
+github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew
+# github.com/ghodss/yaml v1.0.0
+github.com/ghodss/yaml
+# github.com/gobwas/glob v0.2.3
+github.com/gobwas/glob
+github.com/gobwas/glob/compiler
+github.com/gobwas/glob/match
+github.com/gobwas/glob/syntax
+github.com/gobwas/glob/syntax/ast
+github.com/gobwas/glob/syntax/lexer
+github.com/gobwas/glob/util/runes
+github.com/gobwas/glob/util/strings
+# github.com/godbus/dbus/v5 v5.0.3
+github.com/godbus/dbus/v5
# github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
github.com/gogo/protobuf/proto
github.com/gogo/protobuf/sortkeys
-# github.com/golang/protobuf v1.3.2
+# github.com/golang/protobuf v1.4.3
github.com/golang/protobuf/proto
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
@@ -15,25 +34,95 @@ github.com/google/gofuzz
github.com/googleapis/gnostic/OpenAPIv2
github.com/googleapis/gnostic/compiler
github.com/googleapis/gnostic/extensions
-# github.com/json-iterator/go v1.1.8
+# github.com/json-iterator/go v1.1.10
github.com/json-iterator/go
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2
-# github.com/opencontainers/runtime-spec v1.0.2 => github.com/kinvolk/runtime-spec v1.0.2-0.20201110202115-2755fc508653
+# github.com/open-policy-agent/opa v0.26.0
+## explicit
+github.com/open-policy-agent/opa/ast
+github.com/open-policy-agent/opa/ast/internal/scanner
+github.com/open-policy-agent/opa/ast/internal/tokens
+github.com/open-policy-agent/opa/ast/location
+github.com/open-policy-agent/opa/bundle
+github.com/open-policy-agent/opa/format
+github.com/open-policy-agent/opa/internal/bundle
+github.com/open-policy-agent/opa/internal/cidr/merge
+github.com/open-policy-agent/opa/internal/compiler/wasm
+github.com/open-policy-agent/opa/internal/compiler/wasm/opa
+github.com/open-policy-agent/opa/internal/deepcopy
+github.com/open-policy-agent/opa/internal/file/archive
+github.com/open-policy-agent/opa/internal/file/url
+github.com/open-policy-agent/opa/internal/ir
+github.com/open-policy-agent/opa/internal/jwx/buffer
+github.com/open-policy-agent/opa/internal/jwx/jwa
+github.com/open-policy-agent/opa/internal/jwx/jwk
+github.com/open-policy-agent/opa/internal/jwx/jws
+github.com/open-policy-agent/opa/internal/jwx/jws/sign
+github.com/open-policy-agent/opa/internal/jwx/jws/verify
+github.com/open-policy-agent/opa/internal/lcss
+github.com/open-policy-agent/opa/internal/leb128
+github.com/open-policy-agent/opa/internal/merge
+github.com/open-policy-agent/opa/internal/planner
+github.com/open-policy-agent/opa/internal/semver
+github.com/open-policy-agent/opa/internal/uuid
+github.com/open-policy-agent/opa/internal/version
+github.com/open-policy-agent/opa/internal/wasm/constant
+github.com/open-policy-agent/opa/internal/wasm/encoding
+github.com/open-policy-agent/opa/internal/wasm/instruction
+github.com/open-policy-agent/opa/internal/wasm/module
+github.com/open-policy-agent/opa/internal/wasm/opcode
+github.com/open-policy-agent/opa/internal/wasm/sdk/internal/wasm
+github.com/open-policy-agent/opa/internal/wasm/sdk/opa
+github.com/open-policy-agent/opa/internal/wasm/sdk/opa/errors
+github.com/open-policy-agent/opa/internal/wasm/types
+github.com/open-policy-agent/opa/keys
+github.com/open-policy-agent/opa/loader
+github.com/open-policy-agent/opa/metrics
+github.com/open-policy-agent/opa/rego
+github.com/open-policy-agent/opa/resolver
+github.com/open-policy-agent/opa/resolver/wasm
+github.com/open-policy-agent/opa/storage
+github.com/open-policy-agent/opa/storage/inmem
+github.com/open-policy-agent/opa/topdown
+github.com/open-policy-agent/opa/topdown/builtins
+github.com/open-policy-agent/opa/topdown/cache
+github.com/open-policy-agent/opa/topdown/copypropagation
+github.com/open-policy-agent/opa/types
+github.com/open-policy-agent/opa/util
+github.com/open-policy-agent/opa/version
+# github.com/opencontainers/runc v0.0.0-00010101000000-000000000000 => github.com/kinvolk/runc v0.1.1-0.20201126131201-5a620a897292
+## explicit
+github.com/opencontainers/runc/libcontainer/cgroups
+github.com/opencontainers/runc/libcontainer/cgroups/fscommon
+github.com/opencontainers/runc/libcontainer/configs
+github.com/opencontainers/runc/libcontainer/seccomp
+github.com/opencontainers/runc/libcontainer/specconv
+github.com/opencontainers/runc/libcontainer/system
+github.com/opencontainers/runc/libcontainer/user
+github.com/opencontainers/runc/libcontainer/utils
+# github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d => github.com/kinvolk/runtime-spec v1.0.2-0.20210309175439-58798e75e980
## explicit
github.com/opencontainers/runtime-spec/specs-go
+# github.com/pkg/errors v0.9.1
+github.com/pkg/errors
+# github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
+github.com/rcrowley/go-metrics
# github.com/seccomp/libseccomp-golang v0.9.1 => github.com/kinvolk/libseccomp-golang v0.9.2-0.20201113182948-883917843313
## explicit
github.com/seccomp/libseccomp-golang
# github.com/sirupsen/logrus v1.7.0
## explicit
github.com/sirupsen/logrus
-# golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
-## explicit
+# github.com/wasmerio/go-ext-wasm v0.3.1
+github.com/wasmerio/go-ext-wasm/wasmer
+# github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b
+github.com/yashtewari/glob-intersection
+# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/crypto/ssh/terminal
-# golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
+# golang.org/x/net v0.0.0-20200927032502-5d4f70055728
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
golang.org/x/net/http/httpguts
@@ -44,12 +133,12 @@ golang.org/x/net/idna
## explicit
golang.org/x/oauth2
golang.org/x/oauth2/internal
-# golang.org/x/sys v0.0.0-20201101102859-da207088b7d1
+# golang.org/x/sys v0.0.0-20201107080550-4d91cf3a1aaf
## explicit
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/text v0.3.2
+# golang.org/x/text v0.3.3
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
@@ -65,9 +154,40 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
+# google.golang.org/protobuf v1.23.0
+google.golang.org/protobuf/encoding/prototext
+google.golang.org/protobuf/encoding/protowire
+google.golang.org/protobuf/internal/descfmt
+google.golang.org/protobuf/internal/descopts
+google.golang.org/protobuf/internal/detrand
+google.golang.org/protobuf/internal/encoding/defval
+google.golang.org/protobuf/internal/encoding/messageset
+google.golang.org/protobuf/internal/encoding/tag
+google.golang.org/protobuf/internal/encoding/text
+google.golang.org/protobuf/internal/errors
+google.golang.org/protobuf/internal/fieldnum
+google.golang.org/protobuf/internal/fieldsort
+google.golang.org/protobuf/internal/filedesc
+google.golang.org/protobuf/internal/filetype
+google.golang.org/protobuf/internal/flags
+google.golang.org/protobuf/internal/genname
+google.golang.org/protobuf/internal/impl
+google.golang.org/protobuf/internal/mapsort
+google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/set
+google.golang.org/protobuf/internal/strs
+google.golang.org/protobuf/internal/version
+google.golang.org/protobuf/proto
+google.golang.org/protobuf/reflect/protoreflect
+google.golang.org/protobuf/reflect/protoregistry
+google.golang.org/protobuf/runtime/protoiface
+google.golang.org/protobuf/runtime/protoimpl
+google.golang.org/protobuf/types/known/anypb
+google.golang.org/protobuf/types/known/durationpb
+google.golang.org/protobuf/types/known/timestamppb
# gopkg.in/inf.v0 v0.9.1
gopkg.in/inf.v0
-# gopkg.in/yaml.v2 v2.2.8
+# gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v2
# k8s.io/api v0.17.4
## explicit
@@ -206,5 +326,6 @@ k8s.io/klog
k8s.io/utils/integer
# sigs.k8s.io/yaml v1.1.0
sigs.k8s.io/yaml
-# github.com/opencontainers/runtime-spec => github.com/kinvolk/runtime-spec v1.0.2-0.20201110202115-2755fc508653
+# github.com/opencontainers/runtime-spec => github.com/kinvolk/runtime-spec v1.0.2-0.20210309175439-58798e75e980
# github.com/seccomp/libseccomp-golang => github.com/kinvolk/libseccomp-golang v0.9.2-0.20201113182948-883917843313
+# github.com/opencontainers/runc => github.com/kinvolk/runc v0.1.1-0.20201126131201-5a620a897292