diff --git a/benchmarks/benchmark_test.go b/benchmarks/benchmark_test.go index 814254c7..dbe3c90e 100644 --- a/benchmarks/benchmark_test.go +++ b/benchmarks/benchmark_test.go @@ -26,7 +26,7 @@ import ( "github.com/ipfs/go-unixfs/importer/balanced" ihelper "github.com/ipfs/go-unixfs/importer/helpers" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" ipldselector "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/ipld/go-ipld-prime/traversal/selector/builder" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" diff --git a/cidset/cidset.go b/cidset/cidset.go index 640e8e90..449a1606 100644 --- a/cidset/cidset.go +++ b/cidset/cidset.go @@ -6,7 +6,7 @@ import ( "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime/fluent" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipfs/go-graphsync/ipldutil" ) diff --git a/dedupkey/dedupkey.go b/dedupkey/dedupkey.go index d1d11e0e..0fd2b4fe 100644 --- a/dedupkey/dedupkey.go +++ b/dedupkey/dedupkey.go @@ -1,7 +1,7 @@ package dedupkey import ( - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipfs/go-graphsync/ipldutil" ) diff --git a/donotsendfirstblocks/donotsendfirstblocks.go b/donotsendfirstblocks/donotsendfirstblocks.go index 629533a2..af6e125f 100644 --- a/donotsendfirstblocks/donotsendfirstblocks.go +++ b/donotsendfirstblocks/donotsendfirstblocks.go @@ -1,7 +1,7 @@ package donotsendfirstblocks import ( - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipfs/go-graphsync/ipldutil" ) diff --git a/go.mod b/go.mod index 63947240..4f3db585 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/ipfs/go-ipld-format v0.2.0 github.com/ipfs/go-log/v2 v2.1.1 github.com/ipfs/go-merkledag v0.3.2 - github.com/ipfs/go-peertaskqueue v0.6.0 + github.com/ipfs/go-peertaskqueue v0.7.1 github.com/ipfs/go-unixfs v0.2.4 github.com/ipld/go-codec-dagpb v1.3.0 github.com/ipld/go-ipld-prime v0.12.3 @@ -35,9 +35,12 @@ require ( github.com/libp2p/go-msgio v0.0.6 github.com/multiformats/go-multiaddr v0.3.1 github.com/multiformats/go-multihash v0.0.15 - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.7.0 github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 - golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + go.opentelemetry.io/otel v1.2.0 + go.opentelemetry.io/otel/sdk v1.2.0 + go.opentelemetry.io/otel/trace v1.2.0 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 google.golang.org/protobuf v1.27.1 ) diff --git a/go.sum b/go.sum index d6ffa038..187bb7d4 100644 --- a/go.sum +++ b/go.sum @@ -99,8 +99,9 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -228,8 +229,8 @@ github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fG github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.6.0 h1:BT1/PuNViVomiz1PnnP5+WmKsTNHrxIDvkZrkj4JhOg= -github.com/ipfs/go-peertaskqueue v0.6.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= @@ -616,8 +617,9 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg= @@ -650,6 +652,12 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v1.2.0 h1:YOQDvxO1FayUcT9MIhJhgMyNO1WqoduiyvQHzGN0kUQ= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/trace v1.2.0 h1:Ys3iqbqZhcf28hHzrm5WAquMkDHNZTUkw7KHbuNjej0= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -708,8 +716,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -732,8 +740,9 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= diff --git a/graphsync.go b/graphsync.go index 23f6f0fc..a23d9b17 100644 --- a/graphsync.go +++ b/graphsync.go @@ -332,6 +332,34 @@ type Stats struct { OutgoingResponses ResponseStats } +// RequestState describes the current general state of a request +type RequestState uint64 + +// RequestStates describe a set of request IDs and their current state +type RequestStates map[RequestID]RequestState + +const ( + // Queued means a request has been received and is queued for processing + Queued RequestState = iota + // Running means a request is actively sending or receiving data + Running + // Paused means a request is paused + Paused +) + +func (rs RequestState) String() string { + switch rs { + case Queued: + return "queued" + case Running: + return "running" + case Paused: + return "paused" + default: + return "unrecognized request state" + } +} + // GraphExchange is a protocol that can exchange IPLD graphs based on a selector type GraphExchange interface { // Request initiates a new GraphSync request to the given peer using the given selector spec. diff --git a/impl/graphsync.go b/impl/graphsync.go index fe26fed1..49e5e3f8 100644 --- a/impl/graphsync.go +++ b/impl/graphsync.go @@ -8,6 +8,9 @@ import ( "github.com/ipfs/go-peertaskqueue" ipld "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p-core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "github.com/ipfs/go-graphsync" "github.com/ipfs/go-graphsync/allocator" @@ -16,6 +19,7 @@ import ( "github.com/ipfs/go-graphsync/messagequeue" gsnet "github.com/ipfs/go-graphsync/network" "github.com/ipfs/go-graphsync/peermanager" + "github.com/ipfs/go-graphsync/peerstate" "github.com/ipfs/go-graphsync/requestmanager" "github.com/ipfs/go-graphsync/requestmanager/asyncloader" "github.com/ipfs/go-graphsync/requestmanager/executor" @@ -304,6 +308,15 @@ func New(parent context.Context, network gsnet.GraphSyncNetwork, // Request initiates a new GraphSync request to the given peer using the given selector spec. func (gs *GraphSync) Request(ctx context.Context, p peer.ID, root ipld.Link, selector ipld.Node, extensions ...graphsync.ExtensionData) (<-chan graphsync.ResponseProgress, <-chan error) { + var extNames []string + for _, ext := range extensions { + extNames = append(extNames, string(ext.Name)) + } + ctx, _ = otel.Tracer("graphsync").Start(ctx, "request", trace.WithAttributes( + attribute.String("peerID", p.Pretty()), + attribute.String("root", root.String()), + attribute.StringSlice("extensions", extNames), + )) return gs.requestManager.NewRequest(ctx, p, root, selector, extensions...) } @@ -446,6 +459,20 @@ func (gs *GraphSync) Stats() graphsync.Stats { } } +// PeerState describes the state of graphsync for a given peer +type PeerState struct { + OutgoingState peerstate.PeerState + IncomingState peerstate.PeerState +} + +// PeerState produces insight on the current state of a given peer +func (gs *GraphSync) PeerState(p peer.ID) PeerState { + return PeerState{ + OutgoingState: gs.requestManager.PeerState(p), + IncomingState: gs.responseManager.PeerState(p), + } +} + type graphSyncReceiver GraphSync func (gsr *graphSyncReceiver) graphSync() *GraphSync { @@ -458,8 +485,17 @@ func (gsr *graphSyncReceiver) ReceiveMessage( ctx context.Context, sender peer.ID, incoming gsmsg.GraphSyncMessage) { - gsr.graphSync().responseManager.ProcessRequests(ctx, sender, incoming.Requests()) - gsr.graphSync().requestManager.ProcessResponses(sender, incoming.Responses(), incoming.Blocks()) + + requests := incoming.Requests() + responses := incoming.Responses() + blocks := incoming.Blocks() + + if len(requests) > 0 { + gsr.graphSync().responseManager.ProcessRequests(ctx, sender, requests) + } + if len(responses) > 0 || len(blocks) > 0 { + gsr.graphSync().requestManager.ProcessResponses(sender, responses, blocks) + } } // ReceiveError is part of the network's Receiver interface and handles incoming diff --git a/impl/graphsync_test.go b/impl/graphsync_test.go index 6d0a9ce1..af03aa51 100644 --- a/impl/graphsync_test.go +++ b/impl/graphsync_test.go @@ -30,7 +30,7 @@ import ( ihelper "github.com/ipfs/go-unixfs/importer/helpers" ipld "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/libp2p/go-libp2p-core/host" @@ -41,13 +41,18 @@ import ( "github.com/ipfs/go-graphsync" "github.com/ipfs/go-graphsync/cidset" "github.com/ipfs/go-graphsync/donotsendfirstblocks" + "github.com/ipfs/go-graphsync/ipldutil" gsmsg "github.com/ipfs/go-graphsync/message" gsnet "github.com/ipfs/go-graphsync/network" + "github.com/ipfs/go-graphsync/requestmanager/hooks" "github.com/ipfs/go-graphsync/storeutil" + "github.com/ipfs/go-graphsync/taskqueue" "github.com/ipfs/go-graphsync/testutil" ) func TestMakeRequestToNetwork(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 10*time.Second) @@ -84,12 +89,32 @@ func TestMakeRequestToNetwork(t *testing.T) { returnedData, found := receivedRequest.Extension(td.extensionName) require.True(t, found) require.Equal(t, td.extensionData, returnedData, "Failed to encode extension") + + graphSync.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) + + // make sure the attributes are what we expect + requestSpans := tracing.FindSpans("request") + peerIdAttr := testutil.AttributeValueInTraceSpan(t, requestSpans[0], "peerID") + require.Equal(t, td.host2.ID().Pretty(), peerIdAttr.AsString()) + rootAttr := testutil.AttributeValueInTraceSpan(t, requestSpans[0], "root") + require.Equal(t, blockChain.TipLink.String(), rootAttr.AsString()) + extensionsAttr := testutil.AttributeValueInTraceSpan(t, requestSpans[0], "extensions") + require.Equal(t, []string{string(td.extensionName)}, extensionsAttr.AsStringSlice()) + requestIdAttr := testutil.AttributeValueInTraceSpan(t, requestSpans[0], "requestID") + require.Equal(t, int64(0), requestIdAttr.AsInt64()) } func TestSendResponseToIncomingRequest(t *testing.T) { // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) r := &receiver{ @@ -153,9 +178,11 @@ func TestSendResponseToIncomingRequest(t *testing.T) { } func TestRejectRequestsByDefault(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -171,12 +198,25 @@ func TestRejectRequestsByDefault(t *testing.T) { testutil.VerifyEmptyResponse(ctx, t, progressChan) testutil.VerifySingleTerminalError(ctx, t, errChan) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) + // has ContextCancelError exception recorded in the right place + tracing.SingleExceptionEvent(t, "request(0)->executeTask(0)", "ContextCancelError", ipldutil.ContextCancelError{}.Error(), false) } func TestGraphsyncRoundTripRequestBudgetRequestor(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -197,12 +237,25 @@ func TestGraphsyncRoundTripRequestBudgetRequestor(t *testing.T) { blockChain.VerifyResponseRange(ctx, progressChan, 0, int(linksToTraverse)) testutil.VerifySingleTerminalError(ctx, t, errChan) require.Len(t, td.blockStore1, int(linksToTraverse), "did not store all blocks") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) + // has ErrBudgetExceeded exception recorded in the right place + tracing.SingleExceptionEvent(t, "request(0)->executeTask(0)", "ErrBudgetExceeded", "traversal budget exceeded", true) } func TestGraphsyncRoundTripRequestBudgetResponder(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -223,12 +276,26 @@ func TestGraphsyncRoundTripRequestBudgetResponder(t *testing.T) { blockChain.VerifyResponseRange(ctx, progressChan, 0, int(linksToTraverse)) testutil.VerifySingleTerminalError(ctx, t, errChan) require.Len(t, td.blockStore1, int(linksToTraverse), "did not store all blocks") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) + // has ContextCancelError exception recorded in the right place + // the requester gets a cancel, the responder gets a ErrBudgetExceeded + tracing.SingleExceptionEvent(t, "request(0)->executeTask(0)", "ContextCancelError", ipldutil.ContextCancelError{}.Error(), false) } func TestGraphsyncRoundTrip(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -284,12 +351,23 @@ func TestGraphsyncRoundTrip(t *testing.T) { var finalResponseStatus graphsync.ResponseStatusCode testutil.AssertReceive(ctx, t, finalResponseStatusChan, &finalResponseStatus, "should receive status") require.Equal(t, graphsync.RequestCompletedFull, finalResponseStatus) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } func TestGraphsyncRoundTripPartial(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -333,12 +411,23 @@ func TestGraphsyncRoundTripPartial(t *testing.T) { var finalResponseStatus graphsync.ResponseStatusCode testutil.AssertReceive(ctx, t, finalResponseStatusChan, &finalResponseStatus, "should receive status") require.Equal(t, graphsync.RequestCompletedPartial, finalResponseStatus) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } func TestGraphsyncRoundTripIgnoreCids(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -382,9 +471,20 @@ func TestGraphsyncRoundTripIgnoreCids(t *testing.T) { require.Equal(t, blockChainLength, totalSent) require.Equal(t, blockChainLength-set.Len(), totalSentOnWire) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } func TestGraphsyncRoundTripIgnoreNBlocks(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() @@ -433,9 +533,20 @@ func TestGraphsyncRoundTripIgnoreNBlocks(t *testing.T) { require.Equal(t, blockChainLength, totalSent) require.Equal(t, blockChainLength-50, totalSentOnWire) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } func TestPauseResume(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Second) @@ -477,6 +588,25 @@ func TestPauseResume(t *testing.T) { timer := time.NewTimer(100 * time.Millisecond) testutil.AssertDoesReceiveFirst(t, timer.C, "should pause request", progressChan) + requestorPeerState := requestor.(*GraphSync).PeerState(td.host2.ID()) + require.Len(t, requestorPeerState.OutgoingState.RequestStates, 1) + require.Len(t, requestorPeerState.IncomingState.RequestStates, 0) + require.Len(t, requestorPeerState.OutgoingState.Active, 1) + require.Contains(t, requestorPeerState.OutgoingState.RequestStates, requestorPeerState.OutgoingState.Active[0]) + require.Len(t, requestorPeerState.OutgoingState.Pending, 0) + require.Len(t, requestorPeerState.IncomingState.Active, 0) + require.Len(t, requestorPeerState.IncomingState.Pending, 0) + require.Len(t, requestorPeerState.OutgoingState.Diagnostics(), 0) + responderPeerState := responder.(*GraphSync).PeerState(td.host1.ID()) + require.Len(t, responderPeerState.IncomingState.RequestStates, 1) + require.Len(t, responderPeerState.OutgoingState.RequestStates, 0) + // no tasks as response is paused by responder + require.Len(t, responderPeerState.IncomingState.Active, 0) + require.Len(t, responderPeerState.IncomingState.Pending, 0) + require.Len(t, responderPeerState.OutgoingState.Active, 0) + require.Len(t, responderPeerState.OutgoingState.Pending, 0) + require.Len(t, responderPeerState.IncomingState.Diagnostics(), 0) + requestID := <-requestIDChan err := responder.UnpauseResponse(td.host1.ID(), requestID) require.NoError(t, err) @@ -485,8 +615,19 @@ func TestPauseResume(t *testing.T) { testutil.VerifyEmptyErrors(ctx, t, errChan) require.Len(t, td.blockStore1, blockChainLength, "did not store all blocks") + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } + func TestPauseResumeRequest(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Second) @@ -531,9 +672,23 @@ func TestPauseResumeRequest(t *testing.T) { blockChain.VerifyRemainder(ctx, progressChan, stopPoint) testutil.VerifyEmptyErrors(ctx, t, errChan) require.Len(t, td.blockStore1, blockChainLength, "did not store all blocks") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->executeTask(1)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) + // has ErrPaused exception recorded in the right place + tracing.SingleExceptionEvent(t, "request(0)->executeTask(0)", "ErrPaused", hooks.ErrPaused{}.Error(), false) } func TestPauseResumeViaUpdate(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Second) @@ -590,9 +745,20 @@ func TestPauseResumeViaUpdate(t *testing.T) { require.Equal(t, td.extensionResponseData, receivedReponseData, "did not receive correct extension response data") require.Equal(t, td.extensionUpdateData, receivedUpdateData, "did not receive correct extension update data") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } func TestPauseResumeViaUpdateOnBlockHook(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Second) @@ -651,9 +817,20 @@ func TestPauseResumeViaUpdateOnBlockHook(t *testing.T) { require.Equal(t, td.extensionResponseData, receivedReponseData, "did not receive correct extension response data") require.Equal(t, td.extensionUpdateData, receivedUpdateData, "did not receive correct extension update data") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } func TestNetworkDisconnect(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Second) @@ -722,9 +899,22 @@ func TestNetworkDisconnect(t *testing.T) { testutil.AssertReceive(ctx, t, errChan, &err, "should receive an error") require.EqualError(t, err, graphsync.RequestClientCancelledErr{}.Error()) testutil.AssertReceive(ctx, t, receiverError, &err, "should receive an error on receiver side") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) + // has ContextCancelError exception recorded in the right place + tracing.SingleExceptionEvent(t, "request(0)->executeTask(0)", "ContextCancelError", ipldutil.ContextCancelError{}.Error(), false) } func TestConnectFail(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() ctx, cancel := context.WithTimeout(ctx, 2*time.Second) @@ -757,12 +947,25 @@ func TestConnectFail(t *testing.T) { testutil.AssertReceive(ctx, t, reqNetworkError, &err, "should receive network error") testutil.AssertReceive(ctx, t, errChan, &err, "should receive an error") require.EqualError(t, err, graphsync.RequestClientCancelledErr{}.Error()) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) + // has ContextCancelError exception recorded in the right place + tracing.SingleExceptionEvent(t, "request(0)->executeTask(0)", "ContextCancelError", ipldutil.ContextCancelError{}.Error(), false) } func TestGraphsyncRoundTripAlternatePersistenceAndNodes(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -822,12 +1025,29 @@ func TestGraphsyncRoundTripAlternatePersistenceAndNodes(t *testing.T) { testutil.VerifyEmptyErrors(ctx, t, errChan) require.Len(t, td.blockStore1, 0, "should store no blocks in normal store") require.Len(t, altStore1, blockChainLength, "did not store all blocks in alternate store") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + // two complete request traces expected + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + "request(1)->newRequest(0)", + "request(1)->executeTask(0)", + "request(1)->terminateRequest(0)", + }, tracing.TracesToStrings()) + // TODO(rvagg): this is randomly either a SkipMe or a ipldutil.ContextCancelError; confirm this is sane + // tracing.SingleExceptionEvent(t, "request(0)->newRequest(0)","request(0)->executeTask(0)", "SkipMe", traversal.SkipMe{}.Error(), true) } func TestGraphsyncRoundTripMultipleAlternatePersistence(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -887,6 +1107,18 @@ func TestGraphsyncRoundTripMultipleAlternatePersistence(t *testing.T) { testutil.VerifyEmptyErrors(ctx, t, errChan2) require.Len(t, altStore1, blockChainLength, "did not store all blocks in alternate store 2") + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + // two complete request traces expected + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + "request(1)->newRequest(0)", + "request(1)->executeTask(0)", + "request(1)->terminateRequest(0)", + }, tracing.TracesToStrings()) } // TestRoundTripLargeBlocksSlowNetwork test verifies graphsync continues to work @@ -898,6 +1130,8 @@ func TestGraphsyncRoundTripMultipleAlternatePersistence(t *testing.T) { // backlog of blocks and then sending them in one giant network packet that can't // be decoded on the client side func TestRoundTripLargeBlocksSlowNetwork(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network if testing.Short() { t.Skip() @@ -924,6 +1158,15 @@ func TestRoundTripLargeBlocksSlowNetwork(t *testing.T) { blockChain.VerifyWholeChain(ctx, progressChan) testutil.VerifyEmptyErrors(ctx, t, errChan) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } // What this test does: @@ -939,6 +1182,7 @@ func TestUnixFSFetch(t *testing.T) { if testing.Short() { t.Skip() } + collectTracing := testutil.SetupTracing() const unixfsChunkSize uint64 = 1 << 10 const unixfsLinksPerLevel = 1024 @@ -1044,12 +1288,23 @@ func TestUnixFSFetch(t *testing.T) { // verify original bytes match final bytes! require.Equal(t, origBytes, finalBytes, "should have gotten same bytes written as read but didn't") + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } func TestGraphsyncBlockListeners(t *testing.T) { + collectTracing := testutil.SetupTracing() + // create network ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 2*time.Second) defer cancel() td := newGsTestData(ctx, t) @@ -1124,6 +1379,15 @@ func TestGraphsyncBlockListeners(t *testing.T) { require.Equal(t, blockChainLength, blocksOutgoing) require.Equal(t, blockChainLength, blocksIncoming) require.Equal(t, blockChainLength, blocksSent) + + requestor.(*GraphSync).requestQueue.(*taskqueue.WorkerTaskQueue).WaitForNoActiveTasks() + + tracing := collectTracing(t) + require.ElementsMatch(t, []string{ + "request(0)->newRequest(0)", + "request(0)->executeTask(0)", + "request(0)->terminateRequest(0)", + }, tracing.TracesToStrings()) } type gsTestData struct { diff --git a/ipldutil/ipldutil.go b/ipldutil/ipldutil.go index 744cae8e..e2e575ae 100644 --- a/ipldutil/ipldutil.go +++ b/ipldutil/ipldutil.go @@ -3,23 +3,12 @@ package ipldutil import ( "bytes" - dagpb "github.com/ipld/go-codec-dagpb" ipld "github.com/ipld/go-ipld-prime" "github.com/ipld/go-ipld-prime/codec/dagcbor" _ "github.com/ipld/go-ipld-prime/codec/raw" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" ) -var defaultChooser = func(lnk ipld.Link, lctx ipld.LinkContext) (ipld.NodePrototype, error) { - // We can decode all nodes into basicnode's Any, except for - // dagpb nodes, which must explicitly use the PBNode prototype. - if lnk, ok := lnk.(cidlink.Link); ok && lnk.Cid.Prefix().Codec == 0x70 { - return dagpb.Type.PBNode, nil - } - return basicnode.Prototype.Any, nil -} - func EncodeNode(node ipld.Node) ([]byte, error) { var buffer bytes.Buffer err := dagcbor.Encode(node, &buffer) diff --git a/ipldutil/traverser.go b/ipldutil/traverser.go index 68c54d3d..2c3d772d 100644 --- a/ipldutil/traverser.go +++ b/ipldutil/traverser.go @@ -5,8 +5,10 @@ import ( "errors" "io" + dagpb "github.com/ipld/go-codec-dagpb" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal" "github.com/ipld/go-ipld-prime/traversal/selector" ) @@ -19,7 +21,7 @@ can go away */ var defaultLinkSystem = cidlink.DefaultLinkSystem() -var defaultVisitor traversal.AdvVisitFn = func(traversal.Progress, ipld.Node, traversal.VisitReason) error { return nil } +func defaultVisitor(traversal.Progress, ipld.Node, traversal.VisitReason) error { return nil } // ContextCancelError is a sentinel that indicates the passed in context // was cancelled @@ -86,7 +88,7 @@ func (tb TraversalBuilder) Start(parentCtx context.Context) Traverser { root: tb.Root, selector: tb.Selector, visitor: defaultVisitor, - chooser: defaultChooser, + chooser: dagpb.AddSupportToChooser(basicnode.Chooser), linkSystem: tb.LinkSystem, budget: tb.Budget, awaitRequest: make(chan struct{}, 1), diff --git a/ipldutil/traverser_test.go b/ipldutil/traverser_test.go index ac30612f..3a2f463c 100644 --- a/ipldutil/traverser_test.go +++ b/ipldutil/traverser_test.go @@ -11,7 +11,7 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/ipld/go-ipld-prime/traversal/selector/builder" diff --git a/message/message_test.go b/message/message_test.go index 19c101ed..c69a92fd 100644 --- a/message/message_test.go +++ b/message/message_test.go @@ -8,7 +8,7 @@ import ( blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/stretchr/testify/require" diff --git a/messagequeue/messagequeue_test.go b/messagequeue/messagequeue_test.go index 18aa524d..9e8c79a5 100644 --- a/messagequeue/messagequeue_test.go +++ b/messagequeue/messagequeue_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" diff --git a/metadata/metadata_test.go b/metadata/metadata_test.go index ff0f61a6..bc7fdb60 100644 --- a/metadata/metadata_test.go +++ b/metadata/metadata_test.go @@ -8,7 +8,7 @@ import ( "github.com/ipld/go-ipld-prime/codec/dagcbor" "github.com/ipld/go-ipld-prime/fluent" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/stretchr/testify/require" "github.com/ipfs/go-graphsync/testutil" diff --git a/network/libp2p_impl_test.go b/network/libp2p_impl_test.go index cfeba7d3..76dc8b56 100644 --- a/network/libp2p_impl_test.go +++ b/network/libp2p_impl_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/libp2p/go-libp2p-core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" diff --git a/peermanager/peermessagemanager_test.go b/peermanager/peermessagemanager_test.go index 028d08b0..03b84ec1 100644 --- a/peermanager/peermessagemanager_test.go +++ b/peermanager/peermessagemanager_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" diff --git a/peerstate/peerstate.go b/peerstate/peerstate.go new file mode 100644 index 00000000..17c4f9d7 --- /dev/null +++ b/peerstate/peerstate.go @@ -0,0 +1,63 @@ +package peerstate + +import ( + "fmt" + + "github.com/ipfs/go-graphsync" +) + +// TaskQueueState describes the the set of requests for a given peer in a task queue +type TaskQueueState struct { + Active []graphsync.RequestID + Pending []graphsync.RequestID +} + +// PeerState tracks the over all state of a given peer for either +// incoming or outgoing requests +type PeerState struct { + graphsync.RequestStates + TaskQueueState +} + +// Diagnostics compares request states with the current state of the task queue to identify unexpected +// states or inconsistences between the tracked task queue and the tracked requests +func (ps PeerState) Diagnostics() map[graphsync.RequestID][]string { + matchedActiveQueue := make(map[graphsync.RequestID]struct{}, len(ps.RequestStates)) + matchedPendingQueue := make(map[graphsync.RequestID]struct{}, len(ps.RequestStates)) + diagnostics := make(map[graphsync.RequestID][]string) + for _, id := range ps.TaskQueueState.Active { + status, ok := ps.RequestStates[id] + if ok { + matchedActiveQueue[id] = struct{}{} + if status != graphsync.Running { + diagnostics[id] = append(diagnostics[id], fmt.Sprintf("expected request with id %d in active task queue to be in running state, but was %s", id, status)) + } + } else { + diagnostics[id] = append(diagnostics[id], fmt.Sprintf("request with id %d in active task queue but appears to have no tracked state", id)) + } + } + for _, id := range ps.TaskQueueState.Pending { + status, ok := ps.RequestStates[id] + if ok { + matchedPendingQueue[id] = struct{}{} + if status != graphsync.Queued { + diagnostics[id] = append(diagnostics[id], fmt.Sprintf("expected request with id %d in pending task queue to be in queued state, but was %s", id, status)) + } + } else { + diagnostics[id] = append(diagnostics[id], fmt.Sprintf("request with id %d in pending task queue but appears to have no tracked state", id)) + } + } + for id, state := range ps.RequestStates { + if state == graphsync.Running { + if _, ok := matchedActiveQueue[id]; !ok { + diagnostics[id] = append(diagnostics[id], fmt.Sprintf("request with id %d in running state is not in the active task queue", id)) + } + } + if state == graphsync.Queued { + if _, ok := matchedPendingQueue[id]; !ok { + diagnostics[id] = append(diagnostics[id], fmt.Sprintf("request with id %d in queued state is not in the pending task queue", id)) + } + } + } + return diagnostics +} diff --git a/peerstate/peerstate_test.go b/peerstate/peerstate_test.go new file mode 100644 index 00000000..01e94ab0 --- /dev/null +++ b/peerstate/peerstate_test.go @@ -0,0 +1,139 @@ +package peerstate_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/ipfs/go-graphsync" + "github.com/ipfs/go-graphsync/peerstate" + "github.com/stretchr/testify/require" +) + +func TestDiagnostics(t *testing.T) { + requestIDs := make([]graphsync.RequestID, 0, 5) + for i := 0; i < 5; i++ { + requestIDs = append(requestIDs, graphsync.RequestID(rand.Int31())) + } + testCases := map[string]struct { + requestStates graphsync.RequestStates + queueStats peerstate.TaskQueueState + expectedDiagnostics map[graphsync.RequestID][]string + }{ + "all requests and queue match": { + requestStates: graphsync.RequestStates{ + requestIDs[0]: graphsync.Running, + requestIDs[1]: graphsync.Running, + requestIDs[2]: graphsync.Queued, + requestIDs[3]: graphsync.Queued, + requestIDs[4]: graphsync.Paused, + }, + queueStats: peerstate.TaskQueueState{ + Active: []graphsync.RequestID{requestIDs[0], requestIDs[1]}, + Pending: []graphsync.RequestID{requestIDs[2], requestIDs[3]}, + }, + expectedDiagnostics: map[graphsync.RequestID][]string{}, + }, + "active task with with incorrect state": { + requestStates: graphsync.RequestStates{ + requestIDs[0]: graphsync.Running, + requestIDs[1]: graphsync.Queued, + requestIDs[2]: graphsync.Queued, + requestIDs[3]: graphsync.Queued, + requestIDs[4]: graphsync.Paused, + }, + queueStats: peerstate.TaskQueueState{ + Active: []graphsync.RequestID{requestIDs[0], requestIDs[1], requestIDs[4]}, + Pending: []graphsync.RequestID{requestIDs[2], requestIDs[3]}, + }, + expectedDiagnostics: map[graphsync.RequestID][]string{ + requestIDs[1]: {fmt.Sprintf("expected request with id %d in active task queue to be in running state, but was queued", requestIDs[1]), fmt.Sprintf("request with id %d in queued state is not in the pending task queue", requestIDs[1])}, + requestIDs[4]: {fmt.Sprintf("expected request with id %d in active task queue to be in running state, but was paused", requestIDs[4])}, + }, + }, + "active task with no state": { + requestStates: graphsync.RequestStates{ + requestIDs[0]: graphsync.Running, + requestIDs[2]: graphsync.Queued, + requestIDs[3]: graphsync.Queued, + requestIDs[4]: graphsync.Paused, + }, + queueStats: peerstate.TaskQueueState{ + Active: []graphsync.RequestID{requestIDs[0], requestIDs[1]}, + Pending: []graphsync.RequestID{requestIDs[2], requestIDs[3]}, + }, + expectedDiagnostics: map[graphsync.RequestID][]string{ + requestIDs[1]: {fmt.Sprintf("request with id %d in active task queue but appears to have no tracked state", requestIDs[1])}, + }, + }, + "pending task with with incorrect state": { + requestStates: graphsync.RequestStates{ + requestIDs[0]: graphsync.Running, + requestIDs[1]: graphsync.Running, + requestIDs[2]: graphsync.Queued, + requestIDs[3]: graphsync.Running, + requestIDs[4]: graphsync.Paused, + }, + queueStats: peerstate.TaskQueueState{ + Active: []graphsync.RequestID{requestIDs[0], requestIDs[1]}, + Pending: []graphsync.RequestID{requestIDs[2], requestIDs[3], requestIDs[4]}, + }, + expectedDiagnostics: map[graphsync.RequestID][]string{ + requestIDs[3]: {fmt.Sprintf("expected request with id %d in pending task queue to be in queued state, but was running", requestIDs[3]), fmt.Sprintf("request with id %d in running state is not in the active task queue", requestIDs[3])}, + requestIDs[4]: {fmt.Sprintf("expected request with id %d in pending task queue to be in queued state, but was paused", requestIDs[4])}, + }, + }, + "pending task with no state": { + requestStates: graphsync.RequestStates{ + requestIDs[0]: graphsync.Running, + requestIDs[1]: graphsync.Running, + requestIDs[2]: graphsync.Queued, + requestIDs[4]: graphsync.Paused, + }, + queueStats: peerstate.TaskQueueState{ + Active: []graphsync.RequestID{requestIDs[0], requestIDs[1]}, + Pending: []graphsync.RequestID{requestIDs[2], requestIDs[3]}, + }, + expectedDiagnostics: map[graphsync.RequestID][]string{ + requestIDs[3]: {fmt.Sprintf("request with id %d in pending task queue but appears to have no tracked state", requestIDs[3])}, + }, + }, + "request state running with no active task": { + requestStates: graphsync.RequestStates{ + requestIDs[0]: graphsync.Running, + requestIDs[1]: graphsync.Running, + requestIDs[2]: graphsync.Queued, + requestIDs[3]: graphsync.Queued, + requestIDs[4]: graphsync.Paused, + }, + queueStats: peerstate.TaskQueueState{ + Active: []graphsync.RequestID{requestIDs[0]}, + Pending: []graphsync.RequestID{requestIDs[2], requestIDs[3]}, + }, + expectedDiagnostics: map[graphsync.RequestID][]string{ + requestIDs[1]: {fmt.Sprintf("request with id %d in running state is not in the active task queue", requestIDs[1])}, + }, + }, + "request state queued with no pending task": { + requestStates: graphsync.RequestStates{ + requestIDs[0]: graphsync.Running, + requestIDs[1]: graphsync.Running, + requestIDs[2]: graphsync.Queued, + requestIDs[3]: graphsync.Queued, + requestIDs[4]: graphsync.Paused, + }, + queueStats: peerstate.TaskQueueState{ + Active: []graphsync.RequestID{requestIDs[0], requestIDs[1]}, + Pending: []graphsync.RequestID{requestIDs[2]}, + }, + expectedDiagnostics: map[graphsync.RequestID][]string{ + requestIDs[3]: {fmt.Sprintf("request with id %d in queued state is not in the pending task queue", requestIDs[3])}, + }, + }, + } + for testCase, data := range testCases { + t.Run(testCase, func(t *testing.T) { + require.Equal(t, data.expectedDiagnostics, peerstate.PeerState{data.requestStates, data.queueStats}.Diagnostics()) + }) + } +} diff --git a/requestmanager/client.go b/requestmanager/client.go index 4e3f2cb0..95081b3d 100644 --- a/requestmanager/client.go +++ b/requestmanager/client.go @@ -16,6 +16,8 @@ import ( "github.com/ipld/go-ipld-prime/traversal" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/libp2p/go-libp2p-core/peer" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "github.com/ipfs/go-graphsync" "github.com/ipfs/go-graphsync/ipldutil" @@ -25,6 +27,7 @@ import ( "github.com/ipfs/go-graphsync/metadata" "github.com/ipfs/go-graphsync/network" "github.com/ipfs/go-graphsync/notifications" + "github.com/ipfs/go-graphsync/peerstate" "github.com/ipfs/go-graphsync/requestmanager/executor" "github.com/ipfs/go-graphsync/requestmanager/hooks" "github.com/ipfs/go-graphsync/requestmanager/types" @@ -42,22 +45,15 @@ const ( defaultPriority = graphsync.Priority(0) ) -type state uint64 - -const ( - queued state = iota - running - paused -) - type inProgressRequestStatus struct { ctx context.Context + span trace.Span startTime time.Time cancelFn func() p peer.ID terminalError error pauseMessages chan struct{} - state state + state graphsync.RequestState lastResponse atomic.Value onTerminated []chan<- error request gsmsg.GraphSyncRequest @@ -174,13 +170,20 @@ func (rm *RequestManager) NewRequest(ctx context.Context, root ipld.Link, selectorNode ipld.Node, extensions ...graphsync.ExtensionData) (<-chan graphsync.ResponseProgress, <-chan error) { + + span := trace.SpanFromContext(ctx) + if _, err := selector.ParseSelector(selectorNode); err != nil { - return rm.singleErrorResponse(fmt.Errorf("invalid selector spec")) + err := fmt.Errorf("invalid selector spec") + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + defer span.End() + return rm.singleErrorResponse(err) } inProgressRequestChan := make(chan inProgressRequest) - rm.send(&newRequestMessage{p, root, selectorNode, extensions, inProgressRequestChan}, ctx.Done()) + rm.send(&newRequestMessage{span, p, root, selectorNode, extensions, inProgressRequestChan}, ctx.Done()) var receivedInProgressRequest inProgressRequest select { case <-rm.ctx.Done(): @@ -322,7 +325,24 @@ func (rm *RequestManager) GetRequestTask(p peer.ID, task *peertask.Task, request // ReleaseRequestTask releases a task request the requestQueue func (rm *RequestManager) ReleaseRequestTask(p peer.ID, task *peertask.Task, err error) { - rm.send(&releaseRequestTaskMessage{p, task, err}, nil) + done := make(chan struct{}, 1) + rm.send(&releaseRequestTaskMessage{p, task, err, done}, nil) + select { + case <-rm.ctx.Done(): + case <-done: + } +} + +// PeerState gets stats on all outgoing requests for a given peer +func (rm *RequestManager) PeerState(p peer.ID) peerstate.PeerState { + response := make(chan peerstate.PeerState) + rm.send(&peerStateMessage{p, response}, nil) + select { + case <-rm.ctx.Done(): + return peerstate.PeerState{} + case peerState := <-response: + return peerState + } } // SendRequest sends a request to the message queue diff --git a/requestmanager/executor/executor.go b/requestmanager/executor/executor.go index 95b7f23e..88f19979 100644 --- a/requestmanager/executor/executor.go +++ b/requestmanager/executor/executor.go @@ -12,6 +12,9 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/ipld/go-ipld-prime/traversal" "github.com/libp2p/go-libp2p-core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "github.com/ipfs/go-graphsync" "github.com/ipfs/go-graphsync/cidset" @@ -73,11 +76,17 @@ func (e *Executor) ExecuteTask(ctx context.Context, pid peer.ID, task *peertask. log.Info("Empty task on peer request stack") return false } + + _, span := otel.Tracer("graphsync").Start(trace.ContextWithSpan(ctx, requestTask.Span), "executeTask") + defer span.End() + log.Debugw("beginning request execution", "id", requestTask.Request.ID(), "peer", pid.String(), "root_cid", requestTask.Request.Root().String()) err := e.traverse(requestTask) + span.RecordError(err) if err != nil && !ipldutil.IsContextCancelErr(err) { e.manager.SendRequest(requestTask.P, gsmsg.CancelRequest(requestTask.Request.ID())) if !isPausedErr(err) { + span.SetStatus(codes.Error, err.Error()) select { case <-requestTask.Ctx.Done(): case requestTask.InProgressErr <- err: @@ -92,6 +101,7 @@ func (e *Executor) ExecuteTask(ctx context.Context, pid peer.ID, task *peertask. // RequestTask are parameters for a single request execution type RequestTask struct { Ctx context.Context + Span trace.Span Request gsmsg.GraphSyncRequest LastResponse *atomic.Value DoNotSendCids *cid.Set diff --git a/requestmanager/hooks/hooks_test.go b/requestmanager/hooks/hooks_test.go index 979057f9..4f008f09 100644 --- a/requestmanager/hooks/hooks_test.go +++ b/requestmanager/hooks/hooks_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/ipld/go-ipld-prime" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector/builder" peer "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" diff --git a/requestmanager/messages.go b/requestmanager/messages.go index e5709ea8..dd5acf3c 100644 --- a/requestmanager/messages.go +++ b/requestmanager/messages.go @@ -5,9 +5,11 @@ import ( "github.com/ipfs/go-peertaskqueue/peertask" "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p-core/peer" + "go.opentelemetry.io/otel/trace" "github.com/ipfs/go-graphsync" gsmsg "github.com/ipfs/go-graphsync/message" + "github.com/ipfs/go-graphsync/peerstate" "github.com/ipfs/go-graphsync/requestmanager/executor" ) @@ -76,13 +78,19 @@ type releaseRequestTaskMessage struct { p peer.ID task *peertask.Task err error + done chan struct{} } func (trm *releaseRequestTaskMessage) handle(rm *RequestManager) { rm.releaseRequestTask(trm.p, trm.task, trm.err) + select { + case <-rm.ctx.Done(): + case trm.done <- struct{}{}: + } } type newRequestMessage struct { + span trace.Span p peer.ID root ipld.Link selector ipld.Node @@ -93,7 +101,7 @@ type newRequestMessage struct { func (nrm *newRequestMessage) handle(rm *RequestManager) { var ipr inProgressRequest - ipr.request, ipr.incoming, ipr.incomingError = rm.newRequest(nrm.p, nrm.root, nrm.selector, nrm.extensions) + ipr.request, ipr.incoming, ipr.incomingError = rm.newRequest(nrm.span, nrm.p, nrm.root, nrm.selector, nrm.extensions) ipr.requestID = ipr.request.ID() select { @@ -101,3 +109,16 @@ func (nrm *newRequestMessage) handle(rm *RequestManager) { case <-rm.ctx.Done(): } } + +type peerStateMessage struct { + p peer.ID + peerStatsChan chan<- peerstate.PeerState +} + +func (psm *peerStateMessage) handle(rm *RequestManager) { + peerStats := rm.peerStats(psm.p) + select { + case psm.peerStatsChan <- peerStats: + case <-rm.ctx.Done(): + } +} diff --git a/requestmanager/requestmanager_test.go b/requestmanager/requestmanager_test.go index 44e7dc4e..c62d9702 100644 --- a/requestmanager/requestmanager_test.go +++ b/requestmanager/requestmanager_test.go @@ -978,6 +978,32 @@ func TestPauseResumeExternal(t *testing.T) { testutil.VerifyEmptyErrors(ctx, t, returnedErrorChan) } +func TestStats(t *testing.T) { + ctx := context.Background() + td := newTestData(ctx, t) + + requestCtx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + peers := testutil.GeneratePeers(2) + + blockChain2 := testutil.SetupBlockChain(ctx, t, td.persistence, 100, 5) + + _, _ = td.requestManager.NewRequest(requestCtx, peers[0], td.blockChain.TipLink, td.blockChain.Selector()) + _, _ = td.requestManager.NewRequest(requestCtx, peers[0], blockChain2.TipLink, blockChain2.Selector()) + _, _ = td.requestManager.NewRequest(requestCtx, peers[1], td.blockChain.TipLink, td.blockChain.Selector()) + + requestRecords := readNNetworkRequests(requestCtx, t, td.requestRecordChan, 3) + + peerState := td.requestManager.PeerState(peers[0]) + require.Len(t, peerState.RequestStates, 2) + require.Equal(t, peerState.RequestStates[requestRecords[0].gsr.ID()], graphsync.Running) + require.Equal(t, peerState.RequestStates[requestRecords[1].gsr.ID()], graphsync.Running) + require.Len(t, peerState.Active, 2) + require.Contains(t, peerState.Active, requestRecords[0].gsr.ID()) + require.Contains(t, peerState.Active, requestRecords[1].gsr.ID()) + require.Len(t, peerState.Pending, 0) +} + type requestRecord struct { gsr gsmsg.GraphSyncRequest p peer.ID diff --git a/requestmanager/server.go b/requestmanager/server.go index 20a5be0c..da7fd0f0 100644 --- a/requestmanager/server.go +++ b/requestmanager/server.go @@ -10,17 +10,23 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-peertaskqueue/peertracker" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/ipld/go-ipld-prime/traversal" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/libp2p/go-libp2p-core/peer" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "github.com/ipfs/go-graphsync" "github.com/ipfs/go-graphsync/cidset" "github.com/ipfs/go-graphsync/dedupkey" "github.com/ipfs/go-graphsync/ipldutil" gsmsg "github.com/ipfs/go-graphsync/message" + "github.com/ipfs/go-graphsync/peerstate" "github.com/ipfs/go-graphsync/requestmanager/executor" "github.com/ipfs/go-graphsync/requestmanager/hooks" ) @@ -49,14 +55,21 @@ func (rm *RequestManager) cleanupInProcessRequests() { } } -func (rm *RequestManager) newRequest(p peer.ID, root ipld.Link, selector ipld.Node, extensions []graphsync.ExtensionData) (gsmsg.GraphSyncRequest, chan graphsync.ResponseProgress, chan error) { +func (rm *RequestManager) newRequest(parentSpan trace.Span, p peer.ID, root ipld.Link, selector ipld.Node, extensions []graphsync.ExtensionData) (gsmsg.GraphSyncRequest, chan graphsync.ResponseProgress, chan error) { requestID := rm.nextRequestID rm.nextRequestID++ + parentSpan.SetAttributes(attribute.Int("requestID", int(requestID))) + ctx, span := otel.Tracer("graphsync").Start(trace.ContextWithSpan(rm.ctx, parentSpan), "newRequest") + defer span.End() + log.Infow("graphsync request initiated", "request id", requestID, "peer", p, "root", root) request, hooksResult, err := rm.validateRequest(requestID, p, root, selector, extensions) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + defer parentSpan.End() rp, err := rm.singleErrorResponse(err) return request, rp, err } @@ -65,22 +78,26 @@ func (rm *RequestManager) newRequest(p peer.ID, root ipld.Link, selector ipld.No if has { doNotSendCids, err = cidset.DecodeCidSet(doNotSendCidsData) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + defer parentSpan.End() rp, err := rm.singleErrorResponse(err) return request, rp, err } } else { doNotSendCids = cid.NewSet() } - ctx, cancel := context.WithCancel(rm.ctx) + ctx, cancel := context.WithCancel(ctx) requestStatus := &inProgressRequestStatus{ ctx: ctx, + span: parentSpan, startTime: time.Now(), cancelFn: cancel, p: p, pauseMessages: make(chan struct{}, 1), doNotSendCids: doNotSendCids, request: request, - state: queued, + state: graphsync.Queued, nodeStyleChooser: hooksResult.CustomChooser, inProgressChan: make(chan graphsync.ResponseProgress), inProgressErr: make(chan error), @@ -138,9 +155,10 @@ func (rm *RequestManager) requestTask(requestID graphsync.RequestID) executor.Re rm.outgoingRequestProcessingListeners.NotifyOutgoingRequestProcessingListeners(ipr.p, ipr.request, inProgressCount) } - ipr.state = running + ipr.state = graphsync.Running return executor.RequestTask{ Ctx: ipr.ctx, + Span: ipr.span, Request: ipr.request, LastResponse: &ipr.lastResponse, DoNotSendCids: ipr.doNotSendCids, @@ -163,6 +181,10 @@ func (rm *RequestManager) getRequestTask(p peer.ID, task *peertask.Task) executo } func (rm *RequestManager) terminateRequest(requestID graphsync.RequestID, ipr *inProgressRequestStatus) { + _, span := otel.Tracer("graphsync").Start(trace.ContextWithSpan(rm.ctx, ipr.span), "terminateRequest") + defer span.End() + defer ipr.span.End() // parent span for this whole request + if ipr.terminalError != nil { select { case ipr.inProgressErr <- ipr.terminalError: @@ -203,7 +225,7 @@ func (rm *RequestManager) releaseRequestTask(p peer.ID, task *peertask.Task, err return } if _, ok := err.(hooks.ErrPaused); ok { - ipr.state = paused + ipr.state = graphsync.Paused return } log.Infow("graphsync request complete", "request id", requestID, "peer", ipr.p, "total time", time.Since(ipr.startTime)) @@ -233,7 +255,7 @@ func (rm *RequestManager) cancelOnError(requestID graphsync.RequestID, ipr *inPr if ipr.terminalError == nil { ipr.terminalError = terminalError } - if ipr.state != running { + if ipr.state != graphsync.Running { rm.terminateRequest(requestID, ipr) } else { ipr.cancelFn() @@ -348,10 +370,10 @@ func (rm *RequestManager) unpause(id graphsync.RequestID, extensions []graphsync if !ok { return graphsync.RequestNotFoundErr{} } - if inProgressRequestStatus.state != paused { + if inProgressRequestStatus.state != graphsync.Paused { return errors.New("request is not paused") } - inProgressRequestStatus.state = queued + inProgressRequestStatus.state = graphsync.Queued inProgressRequestStatus.request = inProgressRequestStatus.request.ReplaceExtensions(extensions) rm.requestQueue.PushTask(inProgressRequestStatus.p, peertask.Task{Topic: id, Priority: math.MaxInt32, Work: 1}) return nil @@ -362,7 +384,7 @@ func (rm *RequestManager) pause(id graphsync.RequestID) error { if !ok { return graphsync.RequestNotFoundErr{} } - if inProgressRequestStatus.state == paused { + if inProgressRequestStatus.state == graphsync.Paused { return errors.New("request is already paused") } select { @@ -371,3 +393,32 @@ func (rm *RequestManager) pause(id graphsync.RequestID) error { } return nil } + +func (rm *RequestManager) peerStats(p peer.ID) peerstate.PeerState { + requestStates := make(graphsync.RequestStates) + for id, ipr := range rm.inProgressRequestStatuses { + if ipr.p == p { + requestStates[id] = graphsync.RequestState(ipr.state) + } + } + peerTopics := rm.requestQueue.PeerTopics(p) + return peerstate.PeerState{RequestStates: requestStates, TaskQueueState: fromPeerTopics(peerTopics)} +} + +func fromPeerTopics(pt *peertracker.PeerTrackerTopics) peerstate.TaskQueueState { + if pt == nil { + return peerstate.TaskQueueState{} + } + active := make([]graphsync.RequestID, 0, len(pt.Active)) + for _, topic := range pt.Active { + active = append(active, topic.(graphsync.RequestID)) + } + pending := make([]graphsync.RequestID, 0, len(pt.Pending)) + for _, topic := range pt.Pending { + pending = append(pending, topic.(graphsync.RequestID)) + } + return peerstate.TaskQueueState{ + Active: active, + Pending: pending, + } +} diff --git a/requestmanager/utils.go b/requestmanager/utils.go index a4f0c758..4f875deb 100644 --- a/requestmanager/utils.go +++ b/requestmanager/utils.go @@ -4,6 +4,7 @@ import ( "github.com/ipfs/go-graphsync" gsmsg "github.com/ipfs/go-graphsync/message" "github.com/ipfs/go-graphsync/metadata" + "github.com/ipfs/go-peertaskqueue/peertask" ) func metadataForResponses(responses []gsmsg.GraphSyncResponse) map[graphsync.RequestID]metadata.Metadata { @@ -23,3 +24,8 @@ func metadataForResponses(responses []gsmsg.GraphSyncResponse) map[graphsync.Req } return responseMetadata } + +// RequestIDFromTaskTopic extracts a request ID from a given peer task topic +func RequestIDFromTaskTopic(topic peertask.Topic) graphsync.RequestID { + return topic.(graphsync.RequestID) +} diff --git a/responsemanager/client.go b/responsemanager/client.go index dec66152..de284e3a 100644 --- a/responsemanager/client.go +++ b/responsemanager/client.go @@ -3,6 +3,7 @@ package responsemanager import ( "context" "errors" + "time" logging "github.com/ipfs/go-log/v2" "github.com/ipfs/go-peertaskqueue/peertask" @@ -14,6 +15,7 @@ import ( gsmsg "github.com/ipfs/go-graphsync/message" "github.com/ipfs/go-graphsync/network" "github.com/ipfs/go-graphsync/notifications" + "github.com/ipfs/go-graphsync/peerstate" "github.com/ipfs/go-graphsync/responsemanager/hooks" "github.com/ipfs/go-graphsync/responsemanager/queryexecutor" "github.com/ipfs/go-graphsync/responsemanager/responseassembler" @@ -26,14 +28,6 @@ import ( var log = logging.Logger("graphsync") -type state uint64 - -const ( - queued state = iota - running - paused -) - type inProgressResponseStatus struct { ctx context.Context cancelFn func() @@ -42,8 +36,9 @@ type inProgressResponseStatus struct { traverser ipldutil.Traverser signals queryexecutor.ResponseSignals updates []gsmsg.GraphSyncRequest - state state + state graphsync.RequestState subscriber *notifications.TopicDataSubscriber + startTime time.Time } type responseKey struct { @@ -224,7 +219,12 @@ func (rm *ResponseManager) GetUpdates(p peer.ID, requestID graphsync.RequestID, // FinishTask marks a task from the task queue as done func (rm *ResponseManager) FinishTask(task *peertask.Task, err error) { - rm.send(&finishTaskRequest{task, err}, nil) + done := make(chan struct{}, 1) + rm.send(&finishTaskRequest{task, err, done}, nil) + select { + case <-rm.ctx.Done(): + case <-done: + } } // CloseWithNetworkError closes a request due to a network error @@ -232,6 +232,18 @@ func (rm *ResponseManager) CloseWithNetworkError(p peer.ID, requestID graphsync. rm.send(&errorRequestMessage{p, requestID, queryexecutor.ErrNetworkError, make(chan error, 1)}, nil) } +// PeerState gets current state of the outgoing responses for a given peer +func (rm *ResponseManager) PeerState(p peer.ID) peerstate.PeerState { + response := make(chan peerstate.PeerState) + rm.send(&peerStateMessage{p, response}, nil) + select { + case <-rm.ctx.Done(): + return peerstate.PeerState{} + case peerState := <-response: + return peerState + } +} + func (rm *ResponseManager) send(message responseManagerMessage, done <-chan struct{}) { select { case <-rm.ctx.Done(): diff --git a/responsemanager/hooks/hooks_test.go b/responsemanager/hooks/hooks_test.go index 9ed18bd9..e8a56903 100644 --- a/responsemanager/hooks/hooks_test.go +++ b/responsemanager/hooks/hooks_test.go @@ -8,7 +8,7 @@ import ( "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector/builder" peer "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" diff --git a/responsemanager/messages.go b/responsemanager/messages.go index 65340724..0a05f08e 100644 --- a/responsemanager/messages.go +++ b/responsemanager/messages.go @@ -6,6 +6,7 @@ import ( "github.com/ipfs/go-graphsync" gsmsg "github.com/ipfs/go-graphsync/message" + "github.com/ipfs/go-graphsync/peerstate" "github.com/ipfs/go-graphsync/responsemanager/queryexecutor" ) @@ -85,10 +86,15 @@ func (rur *responseUpdateRequest) handle(rm *ResponseManager) { type finishTaskRequest struct { task *peertask.Task err error + done chan struct{} } func (ftr *finishTaskRequest) handle(rm *ResponseManager) { rm.finishTask(ftr.task, ftr.err) + select { + case <-rm.ctx.Done(): + case ftr.done <- struct{}{}: + } } type startTaskRequest struct { @@ -108,3 +114,16 @@ func (str *startTaskRequest) handle(rm *ResponseManager) { func (prm *processRequestMessage) handle(rm *ResponseManager) { rm.processRequests(prm.p, prm.requests) } + +type peerStateMessage struct { + p peer.ID + peerStatsChan chan<- peerstate.PeerState +} + +func (psm *peerStateMessage) handle(rm *ResponseManager) { + peerState := rm.peerState(psm.p) + select { + case psm.peerStatsChan <- peerState: + case <-rm.ctx.Done(): + } +} diff --git a/responsemanager/responsemanager_test.go b/responsemanager/responsemanager_test.go index b05582d4..0745df4f 100644 --- a/responsemanager/responsemanager_test.go +++ b/responsemanager/responsemanager_test.go @@ -11,9 +11,10 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-peertaskqueue/peertracker" ipld "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/libp2p/go-libp2p-core/peer" "github.com/stretchr/testify/require" @@ -162,6 +163,38 @@ func TestEarlyCancellation(t *testing.T) { td.assertNoResponses() td.connManager.RefuteProtected(t, td.p) } + +func TestStats(t *testing.T) { + td := newTestData(t) + defer td.cancel() + // we're not testing the queryexeuctor or taskqueue here, we're testing + // that cancellation inside the responsemanager itself is properly + // activated, so we won't let responses get far enough to race our + // cancellation + responseManager := td.nullTaskQueueResponseManager() + td.requestHooks.Register(selectorvalidator.SelectorValidator(100)) + responseManager.Startup() + responseManager.ProcessRequests(td.ctx, td.p, td.requests) + p2 := testutil.GeneratePeers(1)[0] + responseManager.ProcessRequests(td.ctx, p2, td.requests) + peerState := responseManager.PeerState(td.p) + require.Len(t, peerState.RequestStates, 1) + require.Equal(t, peerState.RequestStates[td.requestID], graphsync.Queued) + require.Len(t, peerState.Pending, 1) + require.Equal(t, peerState.Pending[0], td.requestID) + require.Len(t, peerState.Active, 0) + // no inconsistencies + require.Len(t, peerState.Diagnostics(), 0) + peerState = responseManager.PeerState(p2) + require.Len(t, peerState.RequestStates, 1) + require.Equal(t, peerState.RequestStates[td.requestID], graphsync.Queued) + require.Len(t, peerState.Pending, 1) + require.Equal(t, peerState.Pending[0], td.requestID) + require.Len(t, peerState.Active, 0) + // no inconsistencies + require.Len(t, peerState.Diagnostics(), 0) + +} func TestMissingContent(t *testing.T) { t.Run("missing root block", func(t *testing.T) { td := newTestData(t) @@ -1090,7 +1123,7 @@ func (td *testData) newResponseManager() *ResponseManager { } func (td *testData) nullTaskQueueResponseManager() *ResponseManager { - ntq := nullTaskQueue{} + ntq := nullTaskQueue{tasksQueued: make(map[peer.ID][]peertask.Topic)} rm := New(td.ctx, td.persistence, td.responseAssembler, td.requestQueuedHooks, td.requestHooks, td.updateHooks, td.completedListeners, td.cancelledListeners, td.blockSentListeners, td.networkErrorListeners, 6, td.connManager, 0, ntq) return rm } @@ -1111,6 +1144,7 @@ func (td *testData) newQueryExecutor(manager queryexecutor.Manager) *queryexecut func (td *testData) assertPausedRequest() { var pausedRequest pausedRequest testutil.AssertReceive(td.ctx, td.t, td.pausedRequests, &pausedRequest, "should pause request") + td.taskqueue.WaitForNoActiveTasks() } func (td *testData) getAllBlocks() []blocks.Block { @@ -1147,6 +1181,7 @@ func (td *testData) assertCompleteRequestWith(expectedCode graphsync.ResponseSta var status graphsync.ResponseStatusCode testutil.AssertReceive(td.ctx, td.t, td.completedResponseStatuses, &status, "should receive status") require.Equal(td.t, expectedCode, status) + td.taskqueue.WaitForNoActiveTasks() } func (td *testData) assertOnlyCompleteProcessingWith(expectedCode graphsync.ResponseStatusCode) { @@ -1260,11 +1295,19 @@ func (td *testData) assertHasNetworkErrors(err error) { require.EqualError(td.t, receivedErr, err.Error()) } -type nullTaskQueue struct{} +type nullTaskQueue struct { + tasksQueued map[peer.ID][]peertask.Topic +} + +func (ntq nullTaskQueue) PushTask(p peer.ID, task peertask.Task) { + ntq.tasksQueued[p] = append(ntq.tasksQueued[p], task.Topic) +} -func (ntq nullTaskQueue) PushTask(p peer.ID, task peertask.Task) {} func (ntq nullTaskQueue) TaskDone(p peer.ID, task *peertask.Task) {} func (ntq nullTaskQueue) Remove(t peertask.Topic, p peer.ID) {} func (ntq nullTaskQueue) Stats() graphsync.RequestStats { return graphsync.RequestStats{} } +func (ntq nullTaskQueue) PeerTopics(p peer.ID) *peertracker.PeerTrackerTopics { + return &peertracker.PeerTrackerTopics{Pending: ntq.tasksQueued[p]} +} var _ taskqueue.TaskQueue = nullTaskQueue{} diff --git a/responsemanager/server.go b/responsemanager/server.go index bfcfa04f..d754b423 100644 --- a/responsemanager/server.go +++ b/responsemanager/server.go @@ -4,14 +4,17 @@ import ( "context" "errors" "math" + "time" "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-peertaskqueue/peertracker" "github.com/libp2p/go-libp2p-core/peer" "github.com/ipfs/go-graphsync" "github.com/ipfs/go-graphsync/ipldutil" gsmsg "github.com/ipfs/go-graphsync/message" "github.com/ipfs/go-graphsync/notifications" + "github.com/ipfs/go-graphsync/peerstate" "github.com/ipfs/go-graphsync/responsemanager/hooks" "github.com/ipfs/go-graphsync/responsemanager/queryexecutor" "github.com/ipfs/go-graphsync/responsemanager/responseassembler" @@ -45,7 +48,7 @@ func (rm *ResponseManager) processUpdate(key responseKey, update gsmsg.GraphSync log.Warnf("received update for non existent request, peer %s, request ID %d", key.p.Pretty(), key.requestID) return } - if response.state != paused { + if response.state != graphsync.Paused { response.updates = append(response.updates, update) select { case response.signals.UpdateSignal <- struct{}{}: @@ -83,10 +86,10 @@ func (rm *ResponseManager) unpauseRequest(p peer.ID, requestID graphsync.Request if !ok { return errors.New("could not find request") } - if inProgressResponse.state != paused { + if inProgressResponse.state != graphsync.Paused { return errors.New("request is not paused") } - inProgressResponse.state = queued + inProgressResponse.state = graphsync.Queued if len(extensions) > 0 { _ = rm.responseAssembler.Transaction(p, requestID, func(rb responseassembler.ResponseBuilder) error { for _, extension := range extensions { @@ -107,7 +110,7 @@ func (rm *ResponseManager) abortRequest(p peer.ID, requestID graphsync.RequestID return errors.New("could not find request") } - if response.state != running { + if response.state != graphsync.Running { _ = rm.responseAssembler.Transaction(p, requestID, func(rb responseassembler.ResponseBuilder) error { if ipldutil.IsContextCancelErr(err) { rm.connManager.Unprotect(p, requestID.Tag()) @@ -155,6 +158,11 @@ func (rm *ResponseManager) processRequests(p peer.ID, requests []gsmsg.GraphSync networkErrorListeners: rm.networkErrorListeners, connManager: rm.connManager, }) + log.Infow("graphsync request initiated", "request id", request.ID(), "peer", p, "root", request.Root()) + ipr, ok := rm.inProgressResponses[key] + if ok && ipr.state == graphsync.Running { + log.Warnf("there is an identical request already in progress", "request id", request.ID(), "peer", p) + } rm.inProgressResponses[key] = &inProgressResponseStatus{ @@ -167,7 +175,8 @@ func (rm *ResponseManager) processRequests(p peer.ID, requests []gsmsg.GraphSync UpdateSignal: make(chan struct{}, 1), ErrSignal: make(chan error, 1), }, - state: queued, + state: graphsync.Queued, + startTime: time.Now(), } // TODO: Use a better work estimation metric. @@ -180,6 +189,8 @@ func (rm *ResponseManager) taskDataForKey(key responseKey) queryexecutor.Respons if !hasResponse { return queryexecutor.ResponseTask{Empty: true} } + log.Infow("graphsync response processing begins", "request id", key.requestID, "peer", key.p, "total time", time.Since(response.startTime)) + if response.loader == nil || response.traverser == nil { loader, traverser, isPaused, err := (&queryPreparer{rm.requestHooks, rm.responseAssembler, rm.linkSystem, rm.maxLinksPerRequest}).prepareQuery(response.ctx, key.p, response.request, response.signals, response.subscriber) if err != nil { @@ -190,11 +201,11 @@ func (rm *ResponseManager) taskDataForKey(key responseKey) queryexecutor.Respons response.loader = loader response.traverser = traverser if isPaused { - response.state = paused + response.state = graphsync.Paused return queryexecutor.ResponseTask{Empty: true} } } - response.state = running + response.state = graphsync.Running return queryexecutor.ResponseTask{ Ctx: response.ctx, Empty: false, @@ -212,6 +223,7 @@ func (rm *ResponseManager) startTask(task *peertask.Task) queryexecutor.Response if taskData.Empty { rm.responseQueue.TaskDone(key.p, task) } + return taskData } @@ -223,9 +235,11 @@ func (rm *ResponseManager) finishTask(task *peertask.Task, err error) { return } if _, ok := err.(hooks.ErrPaused); ok { - response.state = paused + response.state = graphsync.Paused return } + log.Infow("graphsync response processing complete (messages stil sending)", "request id", key.requestID, "peer", key.p, "total time", time.Since(response.startTime)) + if err != nil { log.Infof("response failed: %w", err) } @@ -249,7 +263,7 @@ func (rm *ResponseManager) pauseRequest(p peer.ID, requestID graphsync.RequestID if !ok { return errors.New("could not find request") } - if inProgressResponse.state == paused { + if inProgressResponse.state == graphsync.Paused { return errors.New("request is already paused") } select { @@ -258,3 +272,32 @@ func (rm *ResponseManager) pauseRequest(p peer.ID, requestID graphsync.RequestID } return nil } + +func (rm *ResponseManager) peerState(p peer.ID) peerstate.PeerState { + requestStates := make(graphsync.RequestStates) + for key, ipr := range rm.inProgressResponses { + if key.p == p { + requestStates[key.requestID] = ipr.state + } + } + peerTopics := rm.responseQueue.PeerTopics(p) + return peerstate.PeerState{RequestStates: requestStates, TaskQueueState: fromPeerTopics(peerTopics)} +} + +func fromPeerTopics(pt *peertracker.PeerTrackerTopics) peerstate.TaskQueueState { + if pt == nil { + return peerstate.TaskQueueState{} + } + active := make([]graphsync.RequestID, 0, len(pt.Active)) + for _, topic := range pt.Active { + active = append(active, topic.(responseKey).requestID) + } + pending := make([]graphsync.RequestID, 0, len(pt.Pending)) + for _, topic := range pt.Pending { + pending = append(pending, topic.(responseKey).requestID) + } + return peerstate.TaskQueueState{ + Active: active, + Pending: pending, + } +} diff --git a/selectorvalidator/selectorvalidator.go b/selectorvalidator/selectorvalidator.go index 0953714d..ed352a46 100644 --- a/selectorvalidator/selectorvalidator.go +++ b/selectorvalidator/selectorvalidator.go @@ -4,7 +4,7 @@ import ( "errors" ipld "github.com/ipld/go-ipld-prime" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/ipld/go-ipld-prime/traversal/selector/builder" diff --git a/selectorvalidator/selectorvalidator_test.go b/selectorvalidator/selectorvalidator_test.go index 6baaf69a..cd513637 100644 --- a/selectorvalidator/selectorvalidator_test.go +++ b/selectorvalidator/selectorvalidator_test.go @@ -4,7 +4,7 @@ import ( "testing" ipld "github.com/ipld/go-ipld-prime" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/stretchr/testify/require" diff --git a/taskqueue/taskqueue.go b/taskqueue/taskqueue.go index 67810ca3..7a667a29 100644 --- a/taskqueue/taskqueue.go +++ b/taskqueue/taskqueue.go @@ -2,10 +2,12 @@ package taskqueue import ( "context" + "sync" "time" "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" + "github.com/ipfs/go-peertaskqueue/peertracker" peer "github.com/libp2p/go-libp2p-core/peer" "github.com/ipfs/go-graphsync" @@ -23,16 +25,19 @@ type TaskQueue interface { TaskDone(p peer.ID, task *peertask.Task) Remove(t peertask.Topic, p peer.ID) Stats() graphsync.RequestStats + PeerTopics(p peer.ID) *peertracker.PeerTrackerTopics } -// TaskQueue is a wrapper around peertaskqueue.PeerTaskQueue that manages running workers +// WorkerTaskQueue is a wrapper around peertaskqueue.PeerTaskQueue that manages running workers // that pop tasks and execute them type WorkerTaskQueue struct { - ctx context.Context - cancelFn func() - peerTaskQueue *peertaskqueue.PeerTaskQueue - workSignal chan struct{} - ticker *time.Ticker + *peertaskqueue.PeerTaskQueue + ctx context.Context + cancelFn func() + workSignal chan struct{} + noTaskCond *sync.Cond + ticker *time.Ticker + activeTasks int32 } // NewTaskQueue initializes a new queue @@ -41,15 +46,16 @@ func NewTaskQueue(ctx context.Context) *WorkerTaskQueue { return &WorkerTaskQueue{ ctx: ctx, cancelFn: cancelFn, - peerTaskQueue: peertaskqueue.New(), + PeerTaskQueue: peertaskqueue.New(), workSignal: make(chan struct{}, 1), + noTaskCond: sync.NewCond(&sync.Mutex{}), ticker: time.NewTicker(thawSpeed), } } // PushTask pushes a new task on to the queue func (tq *WorkerTaskQueue) PushTask(p peer.ID, task peertask.Task) { - tq.peerTaskQueue.PushTasks(p, task) + tq.PeerTaskQueue.PushTasks(p, task) select { case tq.workSignal <- struct{}{}: default: @@ -58,12 +64,12 @@ func (tq *WorkerTaskQueue) PushTask(p peer.ID, task peertask.Task) { // TaskDone marks a task as completed so further tasks can be executed func (tq *WorkerTaskQueue) TaskDone(p peer.ID, task *peertask.Task) { - tq.peerTaskQueue.TasksDone(p, task) + tq.PeerTaskQueue.TasksDone(p, task) } // Stats returns statistics about a task queue func (tq *WorkerTaskQueue) Stats() graphsync.RequestStats { - ptqstats := tq.peerTaskQueue.Stats() + ptqstats := tq.PeerTaskQueue.Stats() return graphsync.RequestStats{ TotalPeers: uint64(ptqstats.NumPeers), Active: uint64(ptqstats.NumActive), @@ -71,11 +77,6 @@ func (tq *WorkerTaskQueue) Stats() graphsync.RequestStats { } } -// Remove removes a task from the execution queue -func (tq *WorkerTaskQueue) Remove(topic peertask.Topic, p peer.ID) { - tq.peerTaskQueue.Remove(topic, p) -} - // Startup runs the given number of task workers with the given executor func (tq *WorkerTaskQueue) Startup(workerCount uint64, executor Executor) { for i := uint64(0); i < workerCount; i++ { @@ -88,23 +89,40 @@ func (tq *WorkerTaskQueue) Shutdown() { tq.cancelFn() } +func (tq *WorkerTaskQueue) WaitForNoActiveTasks() { + tq.noTaskCond.L.Lock() + for tq.activeTasks > 0 { + tq.noTaskCond.Wait() + } + tq.noTaskCond.L.Unlock() +} + func (tq *WorkerTaskQueue) worker(executor Executor) { targetWork := 1 for { - pid, tasks, _ := tq.peerTaskQueue.PopTasks(targetWork) + pid, tasks, _ := tq.PeerTaskQueue.PopTasks(targetWork) for len(tasks) == 0 { select { case <-tq.ctx.Done(): return case <-tq.workSignal: - pid, tasks, _ = tq.peerTaskQueue.PopTasks(targetWork) + pid, tasks, _ = tq.PeerTaskQueue.PopTasks(targetWork) case <-tq.ticker.C: - tq.peerTaskQueue.ThawRound() - pid, tasks, _ = tq.peerTaskQueue.PopTasks(targetWork) + tq.PeerTaskQueue.ThawRound() + pid, tasks, _ = tq.PeerTaskQueue.PopTasks(targetWork) } } for _, task := range tasks { + tq.noTaskCond.L.Lock() + tq.activeTasks = tq.activeTasks + 1 + tq.noTaskCond.L.Unlock() terminate := executor.ExecuteTask(tq.ctx, pid, task) + tq.noTaskCond.L.Lock() + tq.activeTasks = tq.activeTasks - 1 + if tq.activeTasks == 0 { + tq.noTaskCond.Broadcast() + } + tq.noTaskCond.L.Unlock() if terminate { return } diff --git a/testutil/chaintypes/doc.go b/testutil/chaintypes/doc.go new file mode 100644 index 00000000..9225c6ff --- /dev/null +++ b/testutil/chaintypes/doc.go @@ -0,0 +1,3 @@ +//go:generate go run gen.go + +package chaintypes diff --git a/testutil/chaintypes/gen/main.go b/testutil/chaintypes/gen.go similarity index 94% rename from testutil/chaintypes/gen/main.go rename to testutil/chaintypes/gen.go index d2786400..43fef6cd 100644 --- a/testutil/chaintypes/gen/main.go +++ b/testutil/chaintypes/gen.go @@ -1,14 +1,14 @@ +//go:build ignore +// +build ignore + package main import ( - "os/exec" - "github.com/ipld/go-ipld-prime/schema" gengo "github.com/ipld/go-ipld-prime/schema/gen/go" ) func main() { - ts := schema.TypeSystem{} ts.Init() adjCfg := &gengo.AdjunctCfg{} @@ -28,5 +28,4 @@ func main() { schema.SpawnStructRepresentationMap(nil), )) gengo.Generate(".", pkgName, ts, adjCfg) - _ = exec.Command("go", "fmt").Run() } diff --git a/testutil/chaintypes/ipldsch_minima.go b/testutil/chaintypes/ipldsch_minima.go index 3de57a01..6b6bb4e9 100644 --- a/testutil/chaintypes/ipldsch_minima.go +++ b/testutil/chaintypes/ipldsch_minima.go @@ -5,7 +5,7 @@ package chaintypes import ( "fmt" - "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/datamodel" "github.com/ipld/go-ipld-prime/schema" ) @@ -36,16 +36,16 @@ type _ErrorThunkAssembler struct { e error } -func (ea _ErrorThunkAssembler) BeginMap(_ int64) (ipld.MapAssembler, error) { return nil, ea.e } -func (ea _ErrorThunkAssembler) BeginList(_ int64) (ipld.ListAssembler, error) { return nil, ea.e } -func (ea _ErrorThunkAssembler) AssignNull() error { return ea.e } -func (ea _ErrorThunkAssembler) AssignBool(bool) error { return ea.e } -func (ea _ErrorThunkAssembler) AssignInt(int64) error { return ea.e } -func (ea _ErrorThunkAssembler) AssignFloat(float64) error { return ea.e } -func (ea _ErrorThunkAssembler) AssignString(string) error { return ea.e } -func (ea _ErrorThunkAssembler) AssignBytes([]byte) error { return ea.e } -func (ea _ErrorThunkAssembler) AssignLink(ipld.Link) error { return ea.e } -func (ea _ErrorThunkAssembler) AssignNode(ipld.Node) error { return ea.e } -func (ea _ErrorThunkAssembler) Prototype() ipld.NodePrototype { +func (ea _ErrorThunkAssembler) BeginMap(_ int64) (datamodel.MapAssembler, error) { return nil, ea.e } +func (ea _ErrorThunkAssembler) BeginList(_ int64) (datamodel.ListAssembler, error) { return nil, ea.e } +func (ea _ErrorThunkAssembler) AssignNull() error { return ea.e } +func (ea _ErrorThunkAssembler) AssignBool(bool) error { return ea.e } +func (ea _ErrorThunkAssembler) AssignInt(int64) error { return ea.e } +func (ea _ErrorThunkAssembler) AssignFloat(float64) error { return ea.e } +func (ea _ErrorThunkAssembler) AssignString(string) error { return ea.e } +func (ea _ErrorThunkAssembler) AssignBytes([]byte) error { return ea.e } +func (ea _ErrorThunkAssembler) AssignLink(datamodel.Link) error { return ea.e } +func (ea _ErrorThunkAssembler) AssignNode(datamodel.Node) error { return ea.e } +func (ea _ErrorThunkAssembler) Prototype() datamodel.NodePrototype { panic(fmt.Errorf("cannot get prototype from error-carrying assembler: already derailed with error: %w", ea.e)) } diff --git a/testutil/chaintypes/ipldsch_satisfaction.go b/testutil/chaintypes/ipldsch_satisfaction.go index 336d75a1..bbae16b6 100644 --- a/testutil/chaintypes/ipldsch_satisfaction.go +++ b/testutil/chaintypes/ipldsch_satisfaction.go @@ -3,7 +3,7 @@ package chaintypes // Code generated by go-ipld-prime gengo. DO NOT EDIT. import ( - ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/datamodel" "github.com/ipld/go-ipld-prime/node/mixins" "github.com/ipld/go-ipld-prime/schema" ) @@ -30,12 +30,12 @@ func (m MaybeBlock) IsAbsent() bool { func (m MaybeBlock) Exists() bool { return m.m == schema.Maybe_Value } -func (m MaybeBlock) AsNode() ipld.Node { +func (m MaybeBlock) AsNode() datamodel.Node { switch m.m { case schema.Maybe_Absent: - return ipld.Absent + return datamodel.Absent case schema.Maybe_Null: - return ipld.Null + return datamodel.Null case schema.Maybe_Value: return m.v default: @@ -53,36 +53,36 @@ var ( fieldName__Block_Parents = _String{"Parents"} fieldName__Block_Messages = _String{"Messages"} ) -var _ ipld.Node = (Block)(&_Block{}) +var _ datamodel.Node = (Block)(&_Block{}) var _ schema.TypedNode = (Block)(&_Block{}) -func (Block) Kind() ipld.Kind { - return ipld.Kind_Map +func (Block) Kind() datamodel.Kind { + return datamodel.Kind_Map } -func (n Block) LookupByString(key string) (ipld.Node, error) { +func (n Block) LookupByString(key string) (datamodel.Node, error) { switch key { case "Parents": return &n.Parents, nil case "Messages": return &n.Messages, nil default: - return nil, schema.ErrNoSuchField{Type: nil /*TODO*/, Field: ipld.PathSegmentOfString(key)} + return nil, schema.ErrNoSuchField{Type: nil /*TODO*/, Field: datamodel.PathSegmentOfString(key)} } } -func (n Block) LookupByNode(key ipld.Node) (ipld.Node, error) { +func (n Block) LookupByNode(key datamodel.Node) (datamodel.Node, error) { ks, err := key.AsString() if err != nil { return nil, err } return n.LookupByString(ks) } -func (Block) LookupByIndex(idx int64) (ipld.Node, error) { +func (Block) LookupByIndex(idx int64) (datamodel.Node, error) { return mixins.Map{TypeName: "chaintypes.Block"}.LookupByIndex(0) } -func (n Block) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (n Block) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { return n.LookupByString(seg.String()) } -func (n Block) MapIterator() ipld.MapIterator { +func (n Block) MapIterator() datamodel.MapIterator { return &_Block__MapItr{n, 0} } @@ -91,9 +91,9 @@ type _Block__MapItr struct { idx int } -func (itr *_Block__MapItr) Next() (k ipld.Node, v ipld.Node, _ error) { +func (itr *_Block__MapItr) Next() (k datamodel.Node, v datamodel.Node, _ error) { if itr.idx >= 2 { - return nil, nil, ipld.ErrIteratorOverread{} + return nil, nil, datamodel.ErrIteratorOverread{} } switch itr.idx { case 0: @@ -112,7 +112,7 @@ func (itr *_Block__MapItr) Done() bool { return itr.idx >= 2 } -func (Block) ListIterator() ipld.ListIterator { +func (Block) ListIterator() datamodel.ListIterator { return nil } func (Block) Length() int64 { @@ -139,16 +139,16 @@ func (Block) AsString() (string, error) { func (Block) AsBytes() ([]byte, error) { return mixins.Map{TypeName: "chaintypes.Block"}.AsBytes() } -func (Block) AsLink() (ipld.Link, error) { +func (Block) AsLink() (datamodel.Link, error) { return mixins.Map{TypeName: "chaintypes.Block"}.AsLink() } -func (Block) Prototype() ipld.NodePrototype { +func (Block) Prototype() datamodel.NodePrototype { return _Block__Prototype{} } type _Block__Prototype struct{} -func (_Block__Prototype) NewBuilder() ipld.NodeBuilder { +func (_Block__Prototype) NewBuilder() datamodel.NodeBuilder { var nb _Block__Builder nb.Reset() return &nb @@ -158,7 +158,7 @@ type _Block__Builder struct { _Block__Assembler } -func (nb *_Block__Builder) Build() ipld.Node { +func (nb *_Block__Builder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -195,7 +195,7 @@ var ( fieldBits__Block_sufficient = 0 + 1<<0 + 1<<1 ) -func (na *_Block__Assembler) BeginMap(int64) (ipld.MapAssembler, error) { +func (na *_Block__Assembler) BeginMap(int64) (datamodel.MapAssembler, error) { switch *na.m { case schema.Maybe_Value, schema.Maybe_Null: panic("invalid state: cannot assign into assembler that's already finished") @@ -208,7 +208,7 @@ func (na *_Block__Assembler) BeginMap(int64) (ipld.MapAssembler, error) { } return na, nil } -func (_Block__Assembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (_Block__Assembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { return mixins.MapAssembler{TypeName: "chaintypes.Block"}.BeginList(0) } func (na *_Block__Assembler) AssignNull() error { @@ -240,10 +240,10 @@ func (_Block__Assembler) AssignString(string) error { func (_Block__Assembler) AssignBytes([]byte) error { return mixins.MapAssembler{TypeName: "chaintypes.Block"}.AssignBytes(nil) } -func (_Block__Assembler) AssignLink(ipld.Link) error { +func (_Block__Assembler) AssignLink(datamodel.Link) error { return mixins.MapAssembler{TypeName: "chaintypes.Block"}.AssignLink(nil) } -func (na *_Block__Assembler) AssignNode(v ipld.Node) error { +func (na *_Block__Assembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -263,8 +263,8 @@ func (na *_Block__Assembler) AssignNode(v ipld.Node) error { *na.m = schema.Maybe_Value return nil } - if v.Kind() != ipld.Kind_Map { - return ipld.ErrWrongKind{TypeName: "chaintypes.Block", MethodName: "AssignNode", AppropriateKind: ipld.KindSet_JustMap, ActualKind: v.Kind()} + if v.Kind() != datamodel.Kind_Map { + return datamodel.ErrWrongKind{TypeName: "chaintypes.Block", MethodName: "AssignNode", AppropriateKind: datamodel.KindSet_JustMap, ActualKind: v.Kind()} } itr := v.MapIterator() for !itr.Done() { @@ -281,7 +281,7 @@ func (na *_Block__Assembler) AssignNode(v ipld.Node) error { } return na.Finish() } -func (_Block__Assembler) Prototype() ipld.NodePrototype { +func (_Block__Assembler) Prototype() datamodel.NodePrototype { return _Block__Prototype{} } func (ma *_Block__Assembler) valueFinishTidy() bool { @@ -310,7 +310,7 @@ func (ma *_Block__Assembler) valueFinishTidy() bool { panic("unreachable") } } -func (ma *_Block__Assembler) AssembleEntry(k string) (ipld.NodeAssembler, error) { +func (ma *_Block__Assembler) AssembleEntry(k string) (datamodel.NodeAssembler, error) { switch ma.state { case maState_initial: // carry on @@ -328,7 +328,7 @@ func (ma *_Block__Assembler) AssembleEntry(k string) (ipld.NodeAssembler, error) switch k { case "Parents": if ma.s&fieldBit__Block_Parents != 0 { - return nil, ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Parents} + return nil, datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Parents} } ma.s += fieldBit__Block_Parents ma.state = maState_midValue @@ -338,7 +338,7 @@ func (ma *_Block__Assembler) AssembleEntry(k string) (ipld.NodeAssembler, error) return &ma.ca_Parents, nil case "Messages": if ma.s&fieldBit__Block_Messages != 0 { - return nil, ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Messages} + return nil, datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Messages} } ma.s += fieldBit__Block_Messages ma.state = maState_midValue @@ -347,9 +347,9 @@ func (ma *_Block__Assembler) AssembleEntry(k string) (ipld.NodeAssembler, error) ma.ca_Messages.m = &ma.cm return &ma.ca_Messages, nil } - return nil, ipld.ErrInvalidKey{TypeName: "chaintypes.Block", Key: &_String{k}} + return nil, schema.ErrInvalidKey{TypeName: "chaintypes.Block", Key: &_String{k}} } -func (ma *_Block__Assembler) AssembleKey() ipld.NodeAssembler { +func (ma *_Block__Assembler) AssembleKey() datamodel.NodeAssembler { switch ma.state { case maState_initial: // carry on @@ -367,7 +367,7 @@ func (ma *_Block__Assembler) AssembleKey() ipld.NodeAssembler { ma.state = maState_midKey return (*_Block__KeyAssembler)(ma) } -func (ma *_Block__Assembler) AssembleValue() ipld.NodeAssembler { +func (ma *_Block__Assembler) AssembleValue() datamodel.NodeAssembler { switch ma.state { case maState_initial: panic("invalid state: AssembleValue cannot be called when no key is primed") @@ -410,7 +410,7 @@ func (ma *_Block__Assembler) Finish() error { panic("invalid state: Finish cannot be called on an assembler that's already finished") } if ma.s&fieldBits__Block_sufficient != fieldBits__Block_sufficient { - err := ipld.ErrMissingRequiredField{Missing: make([]string, 0)} + err := schema.ErrMissingRequiredField{Missing: make([]string, 0)} if ma.s&fieldBit__Block_Parents == 0 { err.Missing = append(err.Missing, "Parents") } @@ -423,19 +423,19 @@ func (ma *_Block__Assembler) Finish() error { *ma.m = schema.Maybe_Value return nil } -func (ma *_Block__Assembler) KeyPrototype() ipld.NodePrototype { +func (ma *_Block__Assembler) KeyPrototype() datamodel.NodePrototype { return _String__Prototype{} } -func (ma *_Block__Assembler) ValuePrototype(k string) ipld.NodePrototype { +func (ma *_Block__Assembler) ValuePrototype(k string) datamodel.NodePrototype { panic("todo structbuilder mapassembler valueprototype") } type _Block__KeyAssembler _Block__Assembler -func (_Block__KeyAssembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Block__KeyAssembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.StringAssembler{TypeName: "chaintypes.Block.KeyAssembler"}.BeginMap(0) } -func (_Block__KeyAssembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (_Block__KeyAssembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { return mixins.StringAssembler{TypeName: "chaintypes.Block.KeyAssembler"}.BeginList(0) } func (na *_Block__KeyAssembler) AssignNull() error { @@ -457,7 +457,7 @@ func (ka *_Block__KeyAssembler) AssignString(k string) error { switch k { case "Parents": if ka.s&fieldBit__Block_Parents != 0 { - return ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Parents} + return datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Parents} } ka.s += fieldBit__Block_Parents ka.state = maState_expectValue @@ -465,36 +465,36 @@ func (ka *_Block__KeyAssembler) AssignString(k string) error { return nil case "Messages": if ka.s&fieldBit__Block_Messages != 0 { - return ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Messages} + return datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Messages} } ka.s += fieldBit__Block_Messages ka.state = maState_expectValue ka.f = 1 return nil default: - return ipld.ErrInvalidKey{TypeName: "chaintypes.Block", Key: &_String{k}} + return schema.ErrInvalidKey{TypeName: "chaintypes.Block", Key: &_String{k}} } } func (_Block__KeyAssembler) AssignBytes([]byte) error { return mixins.StringAssembler{TypeName: "chaintypes.Block.KeyAssembler"}.AssignBytes(nil) } -func (_Block__KeyAssembler) AssignLink(ipld.Link) error { +func (_Block__KeyAssembler) AssignLink(datamodel.Link) error { return mixins.StringAssembler{TypeName: "chaintypes.Block.KeyAssembler"}.AssignLink(nil) } -func (ka *_Block__KeyAssembler) AssignNode(v ipld.Node) error { +func (ka *_Block__KeyAssembler) AssignNode(v datamodel.Node) error { if v2, err := v.AsString(); err != nil { return err } else { return ka.AssignString(v2) } } -func (_Block__KeyAssembler) Prototype() ipld.NodePrototype { +func (_Block__KeyAssembler) Prototype() datamodel.NodePrototype { return _String__Prototype{} } func (Block) Type() schema.Type { return nil /*TODO:typelit*/ } -func (n Block) Representation() ipld.Node { +func (n Block) Representation() datamodel.Node { return (*_Block__Repr)(n) } @@ -504,35 +504,35 @@ var ( fieldName__Block_Parents_serial = _String{"Parents"} fieldName__Block_Messages_serial = _String{"Messages"} ) -var _ ipld.Node = &_Block__Repr{} +var _ datamodel.Node = &_Block__Repr{} -func (_Block__Repr) Kind() ipld.Kind { - return ipld.Kind_Map +func (_Block__Repr) Kind() datamodel.Kind { + return datamodel.Kind_Map } -func (n *_Block__Repr) LookupByString(key string) (ipld.Node, error) { +func (n *_Block__Repr) LookupByString(key string) (datamodel.Node, error) { switch key { case "Parents": return n.Parents.Representation(), nil case "Messages": return n.Messages.Representation(), nil default: - return nil, schema.ErrNoSuchField{Type: nil /*TODO*/, Field: ipld.PathSegmentOfString(key)} + return nil, schema.ErrNoSuchField{Type: nil /*TODO*/, Field: datamodel.PathSegmentOfString(key)} } } -func (n *_Block__Repr) LookupByNode(key ipld.Node) (ipld.Node, error) { +func (n *_Block__Repr) LookupByNode(key datamodel.Node) (datamodel.Node, error) { ks, err := key.AsString() if err != nil { return nil, err } return n.LookupByString(ks) } -func (_Block__Repr) LookupByIndex(idx int64) (ipld.Node, error) { +func (_Block__Repr) LookupByIndex(idx int64) (datamodel.Node, error) { return mixins.Map{TypeName: "chaintypes.Block.Repr"}.LookupByIndex(0) } -func (n _Block__Repr) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (n _Block__Repr) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { return n.LookupByString(seg.String()) } -func (n *_Block__Repr) MapIterator() ipld.MapIterator { +func (n *_Block__Repr) MapIterator() datamodel.MapIterator { return &_Block__ReprMapItr{n, 0} } @@ -541,9 +541,9 @@ type _Block__ReprMapItr struct { idx int } -func (itr *_Block__ReprMapItr) Next() (k ipld.Node, v ipld.Node, _ error) { +func (itr *_Block__ReprMapItr) Next() (k datamodel.Node, v datamodel.Node, _ error) { if itr.idx >= 2 { - return nil, nil, ipld.ErrIteratorOverread{} + return nil, nil, datamodel.ErrIteratorOverread{} } switch itr.idx { case 0: @@ -561,7 +561,7 @@ func (itr *_Block__ReprMapItr) Next() (k ipld.Node, v ipld.Node, _ error) { func (itr *_Block__ReprMapItr) Done() bool { return itr.idx >= 2 } -func (_Block__Repr) ListIterator() ipld.ListIterator { +func (_Block__Repr) ListIterator() datamodel.ListIterator { return nil } func (rn *_Block__Repr) Length() int64 { @@ -589,16 +589,16 @@ func (_Block__Repr) AsString() (string, error) { func (_Block__Repr) AsBytes() ([]byte, error) { return mixins.Map{TypeName: "chaintypes.Block.Repr"}.AsBytes() } -func (_Block__Repr) AsLink() (ipld.Link, error) { +func (_Block__Repr) AsLink() (datamodel.Link, error) { return mixins.Map{TypeName: "chaintypes.Block.Repr"}.AsLink() } -func (_Block__Repr) Prototype() ipld.NodePrototype { +func (_Block__Repr) Prototype() datamodel.NodePrototype { return _Block__ReprPrototype{} } type _Block__ReprPrototype struct{} -func (_Block__ReprPrototype) NewBuilder() ipld.NodeBuilder { +func (_Block__ReprPrototype) NewBuilder() datamodel.NodeBuilder { var nb _Block__ReprBuilder nb.Reset() return &nb @@ -608,7 +608,7 @@ type _Block__ReprBuilder struct { _Block__ReprAssembler } -func (nb *_Block__ReprBuilder) Build() ipld.Node { +func (nb *_Block__ReprBuilder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -638,7 +638,7 @@ func (na *_Block__ReprAssembler) reset() { na.ca_Parents.reset() na.ca_Messages.reset() } -func (na *_Block__ReprAssembler) BeginMap(int64) (ipld.MapAssembler, error) { +func (na *_Block__ReprAssembler) BeginMap(int64) (datamodel.MapAssembler, error) { switch *na.m { case schema.Maybe_Value, schema.Maybe_Null: panic("invalid state: cannot assign into assembler that's already finished") @@ -651,7 +651,7 @@ func (na *_Block__ReprAssembler) BeginMap(int64) (ipld.MapAssembler, error) { } return na, nil } -func (_Block__ReprAssembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (_Block__ReprAssembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { return mixins.MapAssembler{TypeName: "chaintypes.Block.Repr"}.BeginList(0) } func (na *_Block__ReprAssembler) AssignNull() error { @@ -683,10 +683,10 @@ func (_Block__ReprAssembler) AssignString(string) error { func (_Block__ReprAssembler) AssignBytes([]byte) error { return mixins.MapAssembler{TypeName: "chaintypes.Block.Repr"}.AssignBytes(nil) } -func (_Block__ReprAssembler) AssignLink(ipld.Link) error { +func (_Block__ReprAssembler) AssignLink(datamodel.Link) error { return mixins.MapAssembler{TypeName: "chaintypes.Block.Repr"}.AssignLink(nil) } -func (na *_Block__ReprAssembler) AssignNode(v ipld.Node) error { +func (na *_Block__ReprAssembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -706,8 +706,8 @@ func (na *_Block__ReprAssembler) AssignNode(v ipld.Node) error { *na.m = schema.Maybe_Value return nil } - if v.Kind() != ipld.Kind_Map { - return ipld.ErrWrongKind{TypeName: "chaintypes.Block.Repr", MethodName: "AssignNode", AppropriateKind: ipld.KindSet_JustMap, ActualKind: v.Kind()} + if v.Kind() != datamodel.Kind_Map { + return datamodel.ErrWrongKind{TypeName: "chaintypes.Block.Repr", MethodName: "AssignNode", AppropriateKind: datamodel.KindSet_JustMap, ActualKind: v.Kind()} } itr := v.MapIterator() for !itr.Done() { @@ -724,7 +724,7 @@ func (na *_Block__ReprAssembler) AssignNode(v ipld.Node) error { } return na.Finish() } -func (_Block__ReprAssembler) Prototype() ipld.NodePrototype { +func (_Block__ReprAssembler) Prototype() datamodel.NodePrototype { return _Block__ReprPrototype{} } func (ma *_Block__ReprAssembler) valueFinishTidy() bool { @@ -751,7 +751,7 @@ func (ma *_Block__ReprAssembler) valueFinishTidy() bool { panic("unreachable") } } -func (ma *_Block__ReprAssembler) AssembleEntry(k string) (ipld.NodeAssembler, error) { +func (ma *_Block__ReprAssembler) AssembleEntry(k string) (datamodel.NodeAssembler, error) { switch ma.state { case maState_initial: // carry on @@ -769,7 +769,7 @@ func (ma *_Block__ReprAssembler) AssembleEntry(k string) (ipld.NodeAssembler, er switch k { case "Parents": if ma.s&fieldBit__Block_Parents != 0 { - return nil, ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Parents_serial} + return nil, datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Parents_serial} } ma.s += fieldBit__Block_Parents ma.state = maState_midValue @@ -779,7 +779,7 @@ func (ma *_Block__ReprAssembler) AssembleEntry(k string) (ipld.NodeAssembler, er return &ma.ca_Parents, nil case "Messages": if ma.s&fieldBit__Block_Messages != 0 { - return nil, ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Messages_serial} + return nil, datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Messages_serial} } ma.s += fieldBit__Block_Messages ma.state = maState_midValue @@ -789,9 +789,9 @@ func (ma *_Block__ReprAssembler) AssembleEntry(k string) (ipld.NodeAssembler, er return &ma.ca_Messages, nil default: } - return nil, ipld.ErrInvalidKey{TypeName: "chaintypes.Block.Repr", Key: &_String{k}} + return nil, schema.ErrInvalidKey{TypeName: "chaintypes.Block.Repr", Key: &_String{k}} } -func (ma *_Block__ReprAssembler) AssembleKey() ipld.NodeAssembler { +func (ma *_Block__ReprAssembler) AssembleKey() datamodel.NodeAssembler { switch ma.state { case maState_initial: // carry on @@ -809,7 +809,7 @@ func (ma *_Block__ReprAssembler) AssembleKey() ipld.NodeAssembler { ma.state = maState_midKey return (*_Block__ReprKeyAssembler)(ma) } -func (ma *_Block__ReprAssembler) AssembleValue() ipld.NodeAssembler { +func (ma *_Block__ReprAssembler) AssembleValue() datamodel.NodeAssembler { switch ma.state { case maState_initial: panic("invalid state: AssembleValue cannot be called when no key is primed") @@ -852,7 +852,7 @@ func (ma *_Block__ReprAssembler) Finish() error { panic("invalid state: Finish cannot be called on an assembler that's already finished") } if ma.s&fieldBits__Block_sufficient != fieldBits__Block_sufficient { - err := ipld.ErrMissingRequiredField{Missing: make([]string, 0)} + err := schema.ErrMissingRequiredField{Missing: make([]string, 0)} if ma.s&fieldBit__Block_Parents == 0 { err.Missing = append(err.Missing, "Parents") } @@ -865,19 +865,19 @@ func (ma *_Block__ReprAssembler) Finish() error { *ma.m = schema.Maybe_Value return nil } -func (ma *_Block__ReprAssembler) KeyPrototype() ipld.NodePrototype { +func (ma *_Block__ReprAssembler) KeyPrototype() datamodel.NodePrototype { return _String__Prototype{} } -func (ma *_Block__ReprAssembler) ValuePrototype(k string) ipld.NodePrototype { +func (ma *_Block__ReprAssembler) ValuePrototype(k string) datamodel.NodePrototype { panic("todo structbuilder mapassembler repr valueprototype") } type _Block__ReprKeyAssembler _Block__ReprAssembler -func (_Block__ReprKeyAssembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Block__ReprKeyAssembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.StringAssembler{TypeName: "chaintypes.Block.Repr.KeyAssembler"}.BeginMap(0) } -func (_Block__ReprKeyAssembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (_Block__ReprKeyAssembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { return mixins.StringAssembler{TypeName: "chaintypes.Block.Repr.KeyAssembler"}.BeginList(0) } func (na *_Block__ReprKeyAssembler) AssignNull() error { @@ -899,7 +899,7 @@ func (ka *_Block__ReprKeyAssembler) AssignString(k string) error { switch k { case "Parents": if ka.s&fieldBit__Block_Parents != 0 { - return ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Parents_serial} + return datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Parents_serial} } ka.s += fieldBit__Block_Parents ka.state = maState_expectValue @@ -907,29 +907,29 @@ func (ka *_Block__ReprKeyAssembler) AssignString(k string) error { return nil case "Messages": if ka.s&fieldBit__Block_Messages != 0 { - return ipld.ErrRepeatedMapKey{Key: &fieldName__Block_Messages_serial} + return datamodel.ErrRepeatedMapKey{Key: &fieldName__Block_Messages_serial} } ka.s += fieldBit__Block_Messages ka.state = maState_expectValue ka.f = 1 return nil } - return ipld.ErrInvalidKey{TypeName: "chaintypes.Block.Repr", Key: &_String{k}} + return schema.ErrInvalidKey{TypeName: "chaintypes.Block.Repr", Key: &_String{k}} } func (_Block__ReprKeyAssembler) AssignBytes([]byte) error { return mixins.StringAssembler{TypeName: "chaintypes.Block.Repr.KeyAssembler"}.AssignBytes(nil) } -func (_Block__ReprKeyAssembler) AssignLink(ipld.Link) error { +func (_Block__ReprKeyAssembler) AssignLink(datamodel.Link) error { return mixins.StringAssembler{TypeName: "chaintypes.Block.Repr.KeyAssembler"}.AssignLink(nil) } -func (ka *_Block__ReprKeyAssembler) AssignNode(v ipld.Node) error { +func (ka *_Block__ReprKeyAssembler) AssignNode(v datamodel.Node) error { if v2, err := v.AsString(); err != nil { return err } else { return ka.AssignString(v2) } } -func (_Block__ReprKeyAssembler) Prototype() ipld.NodePrototype { +func (_Block__ReprKeyAssembler) Prototype() datamodel.NodePrototype { return _String__Prototype{} } @@ -956,12 +956,12 @@ func (m MaybeBytes) IsAbsent() bool { func (m MaybeBytes) Exists() bool { return m.m == schema.Maybe_Value } -func (m MaybeBytes) AsNode() ipld.Node { +func (m MaybeBytes) AsNode() datamodel.Node { switch m.m { case schema.Maybe_Absent: - return ipld.Absent + return datamodel.Absent case schema.Maybe_Null: - return ipld.Null + return datamodel.Null case schema.Maybe_Value: return &m.v default: @@ -975,28 +975,28 @@ func (m MaybeBytes) Must() Bytes { return &m.v } -var _ ipld.Node = (Bytes)(&_Bytes{}) +var _ datamodel.Node = (Bytes)(&_Bytes{}) var _ schema.TypedNode = (Bytes)(&_Bytes{}) -func (Bytes) Kind() ipld.Kind { - return ipld.Kind_Bytes +func (Bytes) Kind() datamodel.Kind { + return datamodel.Kind_Bytes } -func (Bytes) LookupByString(string) (ipld.Node, error) { +func (Bytes) LookupByString(string) (datamodel.Node, error) { return mixins.Bytes{TypeName: "chaintypes.Bytes"}.LookupByString("") } -func (Bytes) LookupByNode(ipld.Node) (ipld.Node, error) { +func (Bytes) LookupByNode(datamodel.Node) (datamodel.Node, error) { return mixins.Bytes{TypeName: "chaintypes.Bytes"}.LookupByNode(nil) } -func (Bytes) LookupByIndex(idx int64) (ipld.Node, error) { +func (Bytes) LookupByIndex(idx int64) (datamodel.Node, error) { return mixins.Bytes{TypeName: "chaintypes.Bytes"}.LookupByIndex(0) } -func (Bytes) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (Bytes) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { return mixins.Bytes{TypeName: "chaintypes.Bytes"}.LookupBySegment(seg) } -func (Bytes) MapIterator() ipld.MapIterator { +func (Bytes) MapIterator() datamodel.MapIterator { return nil } -func (Bytes) ListIterator() ipld.ListIterator { +func (Bytes) ListIterator() datamodel.ListIterator { return nil } func (Bytes) Length() int64 { @@ -1023,16 +1023,16 @@ func (Bytes) AsString() (string, error) { func (n Bytes) AsBytes() ([]byte, error) { return n.x, nil } -func (Bytes) AsLink() (ipld.Link, error) { +func (Bytes) AsLink() (datamodel.Link, error) { return mixins.Bytes{TypeName: "chaintypes.Bytes"}.AsLink() } -func (Bytes) Prototype() ipld.NodePrototype { +func (Bytes) Prototype() datamodel.NodePrototype { return _Bytes__Prototype{} } type _Bytes__Prototype struct{} -func (_Bytes__Prototype) NewBuilder() ipld.NodeBuilder { +func (_Bytes__Prototype) NewBuilder() datamodel.NodeBuilder { var nb _Bytes__Builder nb.Reset() return &nb @@ -1042,7 +1042,7 @@ type _Bytes__Builder struct { _Bytes__Assembler } -func (nb *_Bytes__Builder) Build() ipld.Node { +func (nb *_Bytes__Builder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -1060,10 +1060,10 @@ type _Bytes__Assembler struct { } func (na *_Bytes__Assembler) reset() {} -func (_Bytes__Assembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Bytes__Assembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.BytesAssembler{TypeName: "chaintypes.Bytes"}.BeginMap(0) } -func (_Bytes__Assembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (_Bytes__Assembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { return mixins.BytesAssembler{TypeName: "chaintypes.Bytes"}.BeginList(0) } func (na *_Bytes__Assembler) AssignNull() error { @@ -1099,10 +1099,10 @@ func (na *_Bytes__Assembler) AssignBytes(v []byte) error { *na.m = schema.Maybe_Value return nil } -func (_Bytes__Assembler) AssignLink(ipld.Link) error { +func (_Bytes__Assembler) AssignLink(datamodel.Link) error { return mixins.BytesAssembler{TypeName: "chaintypes.Bytes"}.AssignLink(nil) } -func (na *_Bytes__Assembler) AssignNode(v ipld.Node) error { +func (na *_Bytes__Assembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -1121,27 +1121,27 @@ func (na *_Bytes__Assembler) AssignNode(v ipld.Node) error { return na.AssignBytes(v2) } } -func (_Bytes__Assembler) Prototype() ipld.NodePrototype { +func (_Bytes__Assembler) Prototype() datamodel.NodePrototype { return _Bytes__Prototype{} } func (Bytes) Type() schema.Type { return nil /*TODO:typelit*/ } -func (n Bytes) Representation() ipld.Node { +func (n Bytes) Representation() datamodel.Node { return (*_Bytes__Repr)(n) } type _Bytes__Repr = _Bytes -var _ ipld.Node = &_Bytes__Repr{} +var _ datamodel.Node = &_Bytes__Repr{} type _Bytes__ReprPrototype = _Bytes__Prototype type _Bytes__ReprAssembler = _Bytes__Assembler -func (n Link) Link() ipld.Link { +func (n Link) Link() datamodel.Link { return n.x } -func (_Link__Prototype) FromLink(v ipld.Link) (Link, error) { +func (_Link__Prototype) FromLink(v datamodel.Link) (Link, error) { n := _Link{v} return &n, nil } @@ -1161,12 +1161,12 @@ func (m MaybeLink) IsAbsent() bool { func (m MaybeLink) Exists() bool { return m.m == schema.Maybe_Value } -func (m MaybeLink) AsNode() ipld.Node { +func (m MaybeLink) AsNode() datamodel.Node { switch m.m { case schema.Maybe_Absent: - return ipld.Absent + return datamodel.Absent case schema.Maybe_Null: - return ipld.Null + return datamodel.Null case schema.Maybe_Value: return &m.v default: @@ -1180,28 +1180,28 @@ func (m MaybeLink) Must() Link { return &m.v } -var _ ipld.Node = (Link)(&_Link{}) +var _ datamodel.Node = (Link)(&_Link{}) var _ schema.TypedNode = (Link)(&_Link{}) -func (Link) Kind() ipld.Kind { - return ipld.Kind_Link +func (Link) Kind() datamodel.Kind { + return datamodel.Kind_Link } -func (Link) LookupByString(string) (ipld.Node, error) { +func (Link) LookupByString(string) (datamodel.Node, error) { return mixins.Link{TypeName: "chaintypes.Link"}.LookupByString("") } -func (Link) LookupByNode(ipld.Node) (ipld.Node, error) { +func (Link) LookupByNode(datamodel.Node) (datamodel.Node, error) { return mixins.Link{TypeName: "chaintypes.Link"}.LookupByNode(nil) } -func (Link) LookupByIndex(idx int64) (ipld.Node, error) { +func (Link) LookupByIndex(idx int64) (datamodel.Node, error) { return mixins.Link{TypeName: "chaintypes.Link"}.LookupByIndex(0) } -func (Link) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (Link) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { return mixins.Link{TypeName: "chaintypes.Link"}.LookupBySegment(seg) } -func (Link) MapIterator() ipld.MapIterator { +func (Link) MapIterator() datamodel.MapIterator { return nil } -func (Link) ListIterator() ipld.ListIterator { +func (Link) ListIterator() datamodel.ListIterator { return nil } func (Link) Length() int64 { @@ -1228,16 +1228,16 @@ func (Link) AsString() (string, error) { func (Link) AsBytes() ([]byte, error) { return mixins.Link{TypeName: "chaintypes.Link"}.AsBytes() } -func (n Link) AsLink() (ipld.Link, error) { +func (n Link) AsLink() (datamodel.Link, error) { return n.x, nil } -func (Link) Prototype() ipld.NodePrototype { +func (Link) Prototype() datamodel.NodePrototype { return _Link__Prototype{} } type _Link__Prototype struct{} -func (_Link__Prototype) NewBuilder() ipld.NodeBuilder { +func (_Link__Prototype) NewBuilder() datamodel.NodeBuilder { var nb _Link__Builder nb.Reset() return &nb @@ -1247,7 +1247,7 @@ type _Link__Builder struct { _Link__Assembler } -func (nb *_Link__Builder) Build() ipld.Node { +func (nb *_Link__Builder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -1265,10 +1265,10 @@ type _Link__Assembler struct { } func (na *_Link__Assembler) reset() {} -func (_Link__Assembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Link__Assembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.LinkAssembler{TypeName: "chaintypes.Link"}.BeginMap(0) } -func (_Link__Assembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (_Link__Assembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { return mixins.LinkAssembler{TypeName: "chaintypes.Link"}.BeginList(0) } func (na *_Link__Assembler) AssignNull() error { @@ -1298,7 +1298,7 @@ func (_Link__Assembler) AssignString(string) error { func (_Link__Assembler) AssignBytes([]byte) error { return mixins.LinkAssembler{TypeName: "chaintypes.Link"}.AssignBytes(nil) } -func (na *_Link__Assembler) AssignLink(v ipld.Link) error { +func (na *_Link__Assembler) AssignLink(v datamodel.Link) error { switch *na.m { case schema.Maybe_Value, schema.Maybe_Null: panic("invalid state: cannot assign into assembler that's already finished") @@ -1307,7 +1307,7 @@ func (na *_Link__Assembler) AssignLink(v ipld.Link) error { *na.m = schema.Maybe_Value return nil } -func (na *_Link__Assembler) AssignNode(v ipld.Node) error { +func (na *_Link__Assembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -1326,19 +1326,19 @@ func (na *_Link__Assembler) AssignNode(v ipld.Node) error { return na.AssignLink(v2) } } -func (_Link__Assembler) Prototype() ipld.NodePrototype { +func (_Link__Assembler) Prototype() datamodel.NodePrototype { return _Link__Prototype{} } func (Link) Type() schema.Type { return nil /*TODO:typelit*/ } -func (n Link) Representation() ipld.Node { +func (n Link) Representation() datamodel.Node { return (*_Link__Repr)(n) } type _Link__Repr = _Link -var _ ipld.Node = &_Link__Repr{} +var _ datamodel.Node = &_Link__Repr{} type _Link__ReprPrototype = _Link__Prototype type _Link__ReprAssembler = _Link__Assembler @@ -1400,12 +1400,12 @@ func (m MaybeMessages) IsAbsent() bool { func (m MaybeMessages) Exists() bool { return m.m == schema.Maybe_Value } -func (m MaybeMessages) AsNode() ipld.Node { +func (m MaybeMessages) AsNode() datamodel.Node { switch m.m { case schema.Maybe_Absent: - return ipld.Absent + return datamodel.Absent case schema.Maybe_Null: - return ipld.Null + return datamodel.Null case schema.Maybe_Value: return &m.v default: @@ -1419,40 +1419,40 @@ func (m MaybeMessages) Must() Messages { return &m.v } -var _ ipld.Node = (Messages)(&_Messages{}) +var _ datamodel.Node = (Messages)(&_Messages{}) var _ schema.TypedNode = (Messages)(&_Messages{}) -func (Messages) Kind() ipld.Kind { - return ipld.Kind_List +func (Messages) Kind() datamodel.Kind { + return datamodel.Kind_List } -func (Messages) LookupByString(string) (ipld.Node, error) { +func (Messages) LookupByString(string) (datamodel.Node, error) { return mixins.List{TypeName: "chaintypes.Messages"}.LookupByString("") } -func (n Messages) LookupByNode(k ipld.Node) (ipld.Node, error) { +func (n Messages) LookupByNode(k datamodel.Node) (datamodel.Node, error) { idx, err := k.AsInt() if err != nil { return nil, err } return n.LookupByIndex(idx) } -func (n Messages) LookupByIndex(idx int64) (ipld.Node, error) { +func (n Messages) LookupByIndex(idx int64) (datamodel.Node, error) { if n.Length() <= idx { - return nil, ipld.ErrNotExists{Segment: ipld.PathSegmentOfInt(idx)} + return nil, datamodel.ErrNotExists{Segment: datamodel.PathSegmentOfInt(idx)} } v := &n.x[idx] return v, nil } -func (n Messages) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (n Messages) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { i, err := seg.Index() if err != nil { - return nil, ipld.ErrInvalidSegmentForList{TypeName: "chaintypes.Messages", TroubleSegment: seg, Reason: err} + return nil, datamodel.ErrInvalidSegmentForList{TypeName: "chaintypes.Messages", TroubleSegment: seg, Reason: err} } return n.LookupByIndex(i) } -func (Messages) MapIterator() ipld.MapIterator { +func (Messages) MapIterator() datamodel.MapIterator { return nil } -func (n Messages) ListIterator() ipld.ListIterator { +func (n Messages) ListIterator() datamodel.ListIterator { return &_Messages__ListItr{n, 0} } @@ -1461,9 +1461,9 @@ type _Messages__ListItr struct { idx int } -func (itr *_Messages__ListItr) Next() (idx int64, v ipld.Node, _ error) { +func (itr *_Messages__ListItr) Next() (idx int64, v datamodel.Node, _ error) { if itr.idx >= len(itr.n.x) { - return -1, nil, ipld.ErrIteratorOverread{} + return -1, nil, datamodel.ErrIteratorOverread{} } idx = int64(itr.idx) x := &itr.n.x[itr.idx] @@ -1499,16 +1499,16 @@ func (Messages) AsString() (string, error) { func (Messages) AsBytes() ([]byte, error) { return mixins.List{TypeName: "chaintypes.Messages"}.AsBytes() } -func (Messages) AsLink() (ipld.Link, error) { +func (Messages) AsLink() (datamodel.Link, error) { return mixins.List{TypeName: "chaintypes.Messages"}.AsLink() } -func (Messages) Prototype() ipld.NodePrototype { +func (Messages) Prototype() datamodel.NodePrototype { return _Messages__Prototype{} } type _Messages__Prototype struct{} -func (_Messages__Prototype) NewBuilder() ipld.NodeBuilder { +func (_Messages__Prototype) NewBuilder() datamodel.NodeBuilder { var nb _Messages__Builder nb.Reset() return &nb @@ -1518,7 +1518,7 @@ type _Messages__Builder struct { _Messages__Assembler } -func (nb *_Messages__Builder) Build() ipld.Node { +func (nb *_Messages__Builder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -1543,10 +1543,10 @@ func (na *_Messages__Assembler) reset() { na.state = laState_initial na.va.reset() } -func (_Messages__Assembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Messages__Assembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.ListAssembler{TypeName: "chaintypes.Messages"}.BeginMap(0) } -func (na *_Messages__Assembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (na *_Messages__Assembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { switch *na.m { case schema.Maybe_Value, schema.Maybe_Null: panic("invalid state: cannot assign into assembler that's already finished") @@ -1591,10 +1591,10 @@ func (_Messages__Assembler) AssignString(string) error { func (_Messages__Assembler) AssignBytes([]byte) error { return mixins.ListAssembler{TypeName: "chaintypes.Messages"}.AssignBytes(nil) } -func (_Messages__Assembler) AssignLink(ipld.Link) error { +func (_Messages__Assembler) AssignLink(datamodel.Link) error { return mixins.ListAssembler{TypeName: "chaintypes.Messages"}.AssignLink(nil) } -func (na *_Messages__Assembler) AssignNode(v ipld.Node) error { +func (na *_Messages__Assembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -1609,8 +1609,8 @@ func (na *_Messages__Assembler) AssignNode(v ipld.Node) error { *na.m = schema.Maybe_Value return nil } - if v.Kind() != ipld.Kind_List { - return ipld.ErrWrongKind{TypeName: "chaintypes.Messages", MethodName: "AssignNode", AppropriateKind: ipld.KindSet_JustList, ActualKind: v.Kind()} + if v.Kind() != datamodel.Kind_List { + return datamodel.ErrWrongKind{TypeName: "chaintypes.Messages", MethodName: "AssignNode", AppropriateKind: datamodel.KindSet_JustList, ActualKind: v.Kind()} } itr := v.ListIterator() for !itr.Done() { @@ -1624,7 +1624,7 @@ func (na *_Messages__Assembler) AssignNode(v ipld.Node) error { } return na.Finish() } -func (_Messages__Assembler) Prototype() ipld.NodePrototype { +func (_Messages__Assembler) Prototype() datamodel.NodePrototype { return _Messages__Prototype{} } func (la *_Messages__Assembler) valueFinishTidy() bool { @@ -1639,7 +1639,7 @@ func (la *_Messages__Assembler) valueFinishTidy() bool { return false } } -func (la *_Messages__Assembler) AssembleValue() ipld.NodeAssembler { +func (la *_Messages__Assembler) AssembleValue() datamodel.NodeAssembler { switch la.state { case laState_initial: // carry on @@ -1672,59 +1672,59 @@ func (la *_Messages__Assembler) Finish() error { *la.m = schema.Maybe_Value return nil } -func (la *_Messages__Assembler) ValuePrototype(_ int64) ipld.NodePrototype { +func (la *_Messages__Assembler) ValuePrototype(_ int64) datamodel.NodePrototype { return _Bytes__Prototype{} } func (Messages) Type() schema.Type { return nil /*TODO:typelit*/ } -func (n Messages) Representation() ipld.Node { +func (n Messages) Representation() datamodel.Node { return (*_Messages__Repr)(n) } type _Messages__Repr _Messages -var _ ipld.Node = &_Messages__Repr{} +var _ datamodel.Node = &_Messages__Repr{} -func (_Messages__Repr) Kind() ipld.Kind { - return ipld.Kind_List +func (_Messages__Repr) Kind() datamodel.Kind { + return datamodel.Kind_List } -func (_Messages__Repr) LookupByString(string) (ipld.Node, error) { +func (_Messages__Repr) LookupByString(string) (datamodel.Node, error) { return mixins.List{TypeName: "chaintypes.Messages.Repr"}.LookupByString("") } -func (nr *_Messages__Repr) LookupByNode(k ipld.Node) (ipld.Node, error) { +func (nr *_Messages__Repr) LookupByNode(k datamodel.Node) (datamodel.Node, error) { v, err := (Messages)(nr).LookupByNode(k) - if err != nil || v == ipld.Null { + if err != nil || v == datamodel.Null { return v, err } return v.(Bytes).Representation(), nil } -func (nr *_Messages__Repr) LookupByIndex(idx int64) (ipld.Node, error) { +func (nr *_Messages__Repr) LookupByIndex(idx int64) (datamodel.Node, error) { v, err := (Messages)(nr).LookupByIndex(idx) - if err != nil || v == ipld.Null { + if err != nil || v == datamodel.Null { return v, err } return v.(Bytes).Representation(), nil } -func (n _Messages__Repr) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (n _Messages__Repr) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { i, err := seg.Index() if err != nil { - return nil, ipld.ErrInvalidSegmentForList{TypeName: "chaintypes.Messages.Repr", TroubleSegment: seg, Reason: err} + return nil, datamodel.ErrInvalidSegmentForList{TypeName: "chaintypes.Messages.Repr", TroubleSegment: seg, Reason: err} } return n.LookupByIndex(i) } -func (_Messages__Repr) MapIterator() ipld.MapIterator { +func (_Messages__Repr) MapIterator() datamodel.MapIterator { return nil } -func (nr *_Messages__Repr) ListIterator() ipld.ListIterator { +func (nr *_Messages__Repr) ListIterator() datamodel.ListIterator { return &_Messages__ReprListItr{(Messages)(nr), 0} } type _Messages__ReprListItr _Messages__ListItr -func (itr *_Messages__ReprListItr) Next() (idx int64, v ipld.Node, err error) { +func (itr *_Messages__ReprListItr) Next() (idx int64, v datamodel.Node, err error) { idx, v, err = (*_Messages__ListItr)(itr).Next() - if err != nil || v == ipld.Null { + if err != nil || v == datamodel.Null { return } return idx, v.(Bytes).Representation(), nil @@ -1757,16 +1757,16 @@ func (_Messages__Repr) AsString() (string, error) { func (_Messages__Repr) AsBytes() ([]byte, error) { return mixins.List{TypeName: "chaintypes.Messages.Repr"}.AsBytes() } -func (_Messages__Repr) AsLink() (ipld.Link, error) { +func (_Messages__Repr) AsLink() (datamodel.Link, error) { return mixins.List{TypeName: "chaintypes.Messages.Repr"}.AsLink() } -func (_Messages__Repr) Prototype() ipld.NodePrototype { +func (_Messages__Repr) Prototype() datamodel.NodePrototype { return _Messages__ReprPrototype{} } type _Messages__ReprPrototype struct{} -func (_Messages__ReprPrototype) NewBuilder() ipld.NodeBuilder { +func (_Messages__ReprPrototype) NewBuilder() datamodel.NodeBuilder { var nb _Messages__ReprBuilder nb.Reset() return &nb @@ -1776,7 +1776,7 @@ type _Messages__ReprBuilder struct { _Messages__ReprAssembler } -func (nb *_Messages__ReprBuilder) Build() ipld.Node { +func (nb *_Messages__ReprBuilder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -1801,10 +1801,10 @@ func (na *_Messages__ReprAssembler) reset() { na.state = laState_initial na.va.reset() } -func (_Messages__ReprAssembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Messages__ReprAssembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.ListAssembler{TypeName: "chaintypes.Messages.Repr"}.BeginMap(0) } -func (na *_Messages__ReprAssembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (na *_Messages__ReprAssembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { switch *na.m { case schema.Maybe_Value, schema.Maybe_Null: panic("invalid state: cannot assign into assembler that's already finished") @@ -1849,10 +1849,10 @@ func (_Messages__ReprAssembler) AssignString(string) error { func (_Messages__ReprAssembler) AssignBytes([]byte) error { return mixins.ListAssembler{TypeName: "chaintypes.Messages.Repr"}.AssignBytes(nil) } -func (_Messages__ReprAssembler) AssignLink(ipld.Link) error { +func (_Messages__ReprAssembler) AssignLink(datamodel.Link) error { return mixins.ListAssembler{TypeName: "chaintypes.Messages.Repr"}.AssignLink(nil) } -func (na *_Messages__ReprAssembler) AssignNode(v ipld.Node) error { +func (na *_Messages__ReprAssembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -1867,8 +1867,8 @@ func (na *_Messages__ReprAssembler) AssignNode(v ipld.Node) error { *na.m = schema.Maybe_Value return nil } - if v.Kind() != ipld.Kind_List { - return ipld.ErrWrongKind{TypeName: "chaintypes.Messages.Repr", MethodName: "AssignNode", AppropriateKind: ipld.KindSet_JustList, ActualKind: v.Kind()} + if v.Kind() != datamodel.Kind_List { + return datamodel.ErrWrongKind{TypeName: "chaintypes.Messages.Repr", MethodName: "AssignNode", AppropriateKind: datamodel.KindSet_JustList, ActualKind: v.Kind()} } itr := v.ListIterator() for !itr.Done() { @@ -1882,7 +1882,7 @@ func (na *_Messages__ReprAssembler) AssignNode(v ipld.Node) error { } return na.Finish() } -func (_Messages__ReprAssembler) Prototype() ipld.NodePrototype { +func (_Messages__ReprAssembler) Prototype() datamodel.NodePrototype { return _Messages__ReprPrototype{} } func (la *_Messages__ReprAssembler) valueFinishTidy() bool { @@ -1897,7 +1897,7 @@ func (la *_Messages__ReprAssembler) valueFinishTidy() bool { return false } } -func (la *_Messages__ReprAssembler) AssembleValue() ipld.NodeAssembler { +func (la *_Messages__ReprAssembler) AssembleValue() datamodel.NodeAssembler { switch la.state { case laState_initial: // carry on @@ -1930,7 +1930,7 @@ func (la *_Messages__ReprAssembler) Finish() error { *la.m = schema.Maybe_Value return nil } -func (la *_Messages__ReprAssembler) ValuePrototype(_ int64) ipld.NodePrototype { +func (la *_Messages__ReprAssembler) ValuePrototype(_ int64) datamodel.NodePrototype { return _Bytes__ReprPrototype{} } @@ -1991,12 +1991,12 @@ func (m MaybeParents) IsAbsent() bool { func (m MaybeParents) Exists() bool { return m.m == schema.Maybe_Value } -func (m MaybeParents) AsNode() ipld.Node { +func (m MaybeParents) AsNode() datamodel.Node { switch m.m { case schema.Maybe_Absent: - return ipld.Absent + return datamodel.Absent case schema.Maybe_Null: - return ipld.Null + return datamodel.Null case schema.Maybe_Value: return &m.v default: @@ -2010,40 +2010,40 @@ func (m MaybeParents) Must() Parents { return &m.v } -var _ ipld.Node = (Parents)(&_Parents{}) +var _ datamodel.Node = (Parents)(&_Parents{}) var _ schema.TypedNode = (Parents)(&_Parents{}) -func (Parents) Kind() ipld.Kind { - return ipld.Kind_List +func (Parents) Kind() datamodel.Kind { + return datamodel.Kind_List } -func (Parents) LookupByString(string) (ipld.Node, error) { +func (Parents) LookupByString(string) (datamodel.Node, error) { return mixins.List{TypeName: "chaintypes.Parents"}.LookupByString("") } -func (n Parents) LookupByNode(k ipld.Node) (ipld.Node, error) { +func (n Parents) LookupByNode(k datamodel.Node) (datamodel.Node, error) { idx, err := k.AsInt() if err != nil { return nil, err } return n.LookupByIndex(idx) } -func (n Parents) LookupByIndex(idx int64) (ipld.Node, error) { +func (n Parents) LookupByIndex(idx int64) (datamodel.Node, error) { if n.Length() <= idx { - return nil, ipld.ErrNotExists{Segment: ipld.PathSegmentOfInt(idx)} + return nil, datamodel.ErrNotExists{Segment: datamodel.PathSegmentOfInt(idx)} } v := &n.x[idx] return v, nil } -func (n Parents) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (n Parents) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { i, err := seg.Index() if err != nil { - return nil, ipld.ErrInvalidSegmentForList{TypeName: "chaintypes.Parents", TroubleSegment: seg, Reason: err} + return nil, datamodel.ErrInvalidSegmentForList{TypeName: "chaintypes.Parents", TroubleSegment: seg, Reason: err} } return n.LookupByIndex(i) } -func (Parents) MapIterator() ipld.MapIterator { +func (Parents) MapIterator() datamodel.MapIterator { return nil } -func (n Parents) ListIterator() ipld.ListIterator { +func (n Parents) ListIterator() datamodel.ListIterator { return &_Parents__ListItr{n, 0} } @@ -2052,9 +2052,9 @@ type _Parents__ListItr struct { idx int } -func (itr *_Parents__ListItr) Next() (idx int64, v ipld.Node, _ error) { +func (itr *_Parents__ListItr) Next() (idx int64, v datamodel.Node, _ error) { if itr.idx >= len(itr.n.x) { - return -1, nil, ipld.ErrIteratorOverread{} + return -1, nil, datamodel.ErrIteratorOverread{} } idx = int64(itr.idx) x := &itr.n.x[itr.idx] @@ -2090,16 +2090,16 @@ func (Parents) AsString() (string, error) { func (Parents) AsBytes() ([]byte, error) { return mixins.List{TypeName: "chaintypes.Parents"}.AsBytes() } -func (Parents) AsLink() (ipld.Link, error) { +func (Parents) AsLink() (datamodel.Link, error) { return mixins.List{TypeName: "chaintypes.Parents"}.AsLink() } -func (Parents) Prototype() ipld.NodePrototype { +func (Parents) Prototype() datamodel.NodePrototype { return _Parents__Prototype{} } type _Parents__Prototype struct{} -func (_Parents__Prototype) NewBuilder() ipld.NodeBuilder { +func (_Parents__Prototype) NewBuilder() datamodel.NodeBuilder { var nb _Parents__Builder nb.Reset() return &nb @@ -2109,7 +2109,7 @@ type _Parents__Builder struct { _Parents__Assembler } -func (nb *_Parents__Builder) Build() ipld.Node { +func (nb *_Parents__Builder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -2134,10 +2134,10 @@ func (na *_Parents__Assembler) reset() { na.state = laState_initial na.va.reset() } -func (_Parents__Assembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Parents__Assembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.ListAssembler{TypeName: "chaintypes.Parents"}.BeginMap(0) } -func (na *_Parents__Assembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (na *_Parents__Assembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { switch *na.m { case schema.Maybe_Value, schema.Maybe_Null: panic("invalid state: cannot assign into assembler that's already finished") @@ -2182,10 +2182,10 @@ func (_Parents__Assembler) AssignString(string) error { func (_Parents__Assembler) AssignBytes([]byte) error { return mixins.ListAssembler{TypeName: "chaintypes.Parents"}.AssignBytes(nil) } -func (_Parents__Assembler) AssignLink(ipld.Link) error { +func (_Parents__Assembler) AssignLink(datamodel.Link) error { return mixins.ListAssembler{TypeName: "chaintypes.Parents"}.AssignLink(nil) } -func (na *_Parents__Assembler) AssignNode(v ipld.Node) error { +func (na *_Parents__Assembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -2200,8 +2200,8 @@ func (na *_Parents__Assembler) AssignNode(v ipld.Node) error { *na.m = schema.Maybe_Value return nil } - if v.Kind() != ipld.Kind_List { - return ipld.ErrWrongKind{TypeName: "chaintypes.Parents", MethodName: "AssignNode", AppropriateKind: ipld.KindSet_JustList, ActualKind: v.Kind()} + if v.Kind() != datamodel.Kind_List { + return datamodel.ErrWrongKind{TypeName: "chaintypes.Parents", MethodName: "AssignNode", AppropriateKind: datamodel.KindSet_JustList, ActualKind: v.Kind()} } itr := v.ListIterator() for !itr.Done() { @@ -2215,7 +2215,7 @@ func (na *_Parents__Assembler) AssignNode(v ipld.Node) error { } return na.Finish() } -func (_Parents__Assembler) Prototype() ipld.NodePrototype { +func (_Parents__Assembler) Prototype() datamodel.NodePrototype { return _Parents__Prototype{} } func (la *_Parents__Assembler) valueFinishTidy() bool { @@ -2230,7 +2230,7 @@ func (la *_Parents__Assembler) valueFinishTidy() bool { return false } } -func (la *_Parents__Assembler) AssembleValue() ipld.NodeAssembler { +func (la *_Parents__Assembler) AssembleValue() datamodel.NodeAssembler { switch la.state { case laState_initial: // carry on @@ -2263,59 +2263,59 @@ func (la *_Parents__Assembler) Finish() error { *la.m = schema.Maybe_Value return nil } -func (la *_Parents__Assembler) ValuePrototype(_ int64) ipld.NodePrototype { +func (la *_Parents__Assembler) ValuePrototype(_ int64) datamodel.NodePrototype { return _Link__Prototype{} } func (Parents) Type() schema.Type { return nil /*TODO:typelit*/ } -func (n Parents) Representation() ipld.Node { +func (n Parents) Representation() datamodel.Node { return (*_Parents__Repr)(n) } type _Parents__Repr _Parents -var _ ipld.Node = &_Parents__Repr{} +var _ datamodel.Node = &_Parents__Repr{} -func (_Parents__Repr) Kind() ipld.Kind { - return ipld.Kind_List +func (_Parents__Repr) Kind() datamodel.Kind { + return datamodel.Kind_List } -func (_Parents__Repr) LookupByString(string) (ipld.Node, error) { +func (_Parents__Repr) LookupByString(string) (datamodel.Node, error) { return mixins.List{TypeName: "chaintypes.Parents.Repr"}.LookupByString("") } -func (nr *_Parents__Repr) LookupByNode(k ipld.Node) (ipld.Node, error) { +func (nr *_Parents__Repr) LookupByNode(k datamodel.Node) (datamodel.Node, error) { v, err := (Parents)(nr).LookupByNode(k) - if err != nil || v == ipld.Null { + if err != nil || v == datamodel.Null { return v, err } return v.(Link).Representation(), nil } -func (nr *_Parents__Repr) LookupByIndex(idx int64) (ipld.Node, error) { +func (nr *_Parents__Repr) LookupByIndex(idx int64) (datamodel.Node, error) { v, err := (Parents)(nr).LookupByIndex(idx) - if err != nil || v == ipld.Null { + if err != nil || v == datamodel.Null { return v, err } return v.(Link).Representation(), nil } -func (n _Parents__Repr) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (n _Parents__Repr) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { i, err := seg.Index() if err != nil { - return nil, ipld.ErrInvalidSegmentForList{TypeName: "chaintypes.Parents.Repr", TroubleSegment: seg, Reason: err} + return nil, datamodel.ErrInvalidSegmentForList{TypeName: "chaintypes.Parents.Repr", TroubleSegment: seg, Reason: err} } return n.LookupByIndex(i) } -func (_Parents__Repr) MapIterator() ipld.MapIterator { +func (_Parents__Repr) MapIterator() datamodel.MapIterator { return nil } -func (nr *_Parents__Repr) ListIterator() ipld.ListIterator { +func (nr *_Parents__Repr) ListIterator() datamodel.ListIterator { return &_Parents__ReprListItr{(Parents)(nr), 0} } type _Parents__ReprListItr _Parents__ListItr -func (itr *_Parents__ReprListItr) Next() (idx int64, v ipld.Node, err error) { +func (itr *_Parents__ReprListItr) Next() (idx int64, v datamodel.Node, err error) { idx, v, err = (*_Parents__ListItr)(itr).Next() - if err != nil || v == ipld.Null { + if err != nil || v == datamodel.Null { return } return idx, v.(Link).Representation(), nil @@ -2348,16 +2348,16 @@ func (_Parents__Repr) AsString() (string, error) { func (_Parents__Repr) AsBytes() ([]byte, error) { return mixins.List{TypeName: "chaintypes.Parents.Repr"}.AsBytes() } -func (_Parents__Repr) AsLink() (ipld.Link, error) { +func (_Parents__Repr) AsLink() (datamodel.Link, error) { return mixins.List{TypeName: "chaintypes.Parents.Repr"}.AsLink() } -func (_Parents__Repr) Prototype() ipld.NodePrototype { +func (_Parents__Repr) Prototype() datamodel.NodePrototype { return _Parents__ReprPrototype{} } type _Parents__ReprPrototype struct{} -func (_Parents__ReprPrototype) NewBuilder() ipld.NodeBuilder { +func (_Parents__ReprPrototype) NewBuilder() datamodel.NodeBuilder { var nb _Parents__ReprBuilder nb.Reset() return &nb @@ -2367,7 +2367,7 @@ type _Parents__ReprBuilder struct { _Parents__ReprAssembler } -func (nb *_Parents__ReprBuilder) Build() ipld.Node { +func (nb *_Parents__ReprBuilder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -2392,10 +2392,10 @@ func (na *_Parents__ReprAssembler) reset() { na.state = laState_initial na.va.reset() } -func (_Parents__ReprAssembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_Parents__ReprAssembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.ListAssembler{TypeName: "chaintypes.Parents.Repr"}.BeginMap(0) } -func (na *_Parents__ReprAssembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (na *_Parents__ReprAssembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { switch *na.m { case schema.Maybe_Value, schema.Maybe_Null: panic("invalid state: cannot assign into assembler that's already finished") @@ -2440,10 +2440,10 @@ func (_Parents__ReprAssembler) AssignString(string) error { func (_Parents__ReprAssembler) AssignBytes([]byte) error { return mixins.ListAssembler{TypeName: "chaintypes.Parents.Repr"}.AssignBytes(nil) } -func (_Parents__ReprAssembler) AssignLink(ipld.Link) error { +func (_Parents__ReprAssembler) AssignLink(datamodel.Link) error { return mixins.ListAssembler{TypeName: "chaintypes.Parents.Repr"}.AssignLink(nil) } -func (na *_Parents__ReprAssembler) AssignNode(v ipld.Node) error { +func (na *_Parents__ReprAssembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -2458,8 +2458,8 @@ func (na *_Parents__ReprAssembler) AssignNode(v ipld.Node) error { *na.m = schema.Maybe_Value return nil } - if v.Kind() != ipld.Kind_List { - return ipld.ErrWrongKind{TypeName: "chaintypes.Parents.Repr", MethodName: "AssignNode", AppropriateKind: ipld.KindSet_JustList, ActualKind: v.Kind()} + if v.Kind() != datamodel.Kind_List { + return datamodel.ErrWrongKind{TypeName: "chaintypes.Parents.Repr", MethodName: "AssignNode", AppropriateKind: datamodel.KindSet_JustList, ActualKind: v.Kind()} } itr := v.ListIterator() for !itr.Done() { @@ -2473,7 +2473,7 @@ func (na *_Parents__ReprAssembler) AssignNode(v ipld.Node) error { } return na.Finish() } -func (_Parents__ReprAssembler) Prototype() ipld.NodePrototype { +func (_Parents__ReprAssembler) Prototype() datamodel.NodePrototype { return _Parents__ReprPrototype{} } func (la *_Parents__ReprAssembler) valueFinishTidy() bool { @@ -2488,7 +2488,7 @@ func (la *_Parents__ReprAssembler) valueFinishTidy() bool { return false } } -func (la *_Parents__ReprAssembler) AssembleValue() ipld.NodeAssembler { +func (la *_Parents__ReprAssembler) AssembleValue() datamodel.NodeAssembler { switch la.state { case laState_initial: // carry on @@ -2521,7 +2521,7 @@ func (la *_Parents__ReprAssembler) Finish() error { *la.m = schema.Maybe_Value return nil } -func (la *_Parents__ReprAssembler) ValuePrototype(_ int64) ipld.NodePrototype { +func (la *_Parents__ReprAssembler) ValuePrototype(_ int64) datamodel.NodePrototype { return _Link__ReprPrototype{} } @@ -2552,12 +2552,12 @@ func (m MaybeString) IsAbsent() bool { func (m MaybeString) Exists() bool { return m.m == schema.Maybe_Value } -func (m MaybeString) AsNode() ipld.Node { +func (m MaybeString) AsNode() datamodel.Node { switch m.m { case schema.Maybe_Absent: - return ipld.Absent + return datamodel.Absent case schema.Maybe_Null: - return ipld.Null + return datamodel.Null case schema.Maybe_Value: return &m.v default: @@ -2571,28 +2571,28 @@ func (m MaybeString) Must() String { return &m.v } -var _ ipld.Node = (String)(&_String{}) +var _ datamodel.Node = (String)(&_String{}) var _ schema.TypedNode = (String)(&_String{}) -func (String) Kind() ipld.Kind { - return ipld.Kind_String +func (String) Kind() datamodel.Kind { + return datamodel.Kind_String } -func (String) LookupByString(string) (ipld.Node, error) { +func (String) LookupByString(string) (datamodel.Node, error) { return mixins.String{TypeName: "chaintypes.String"}.LookupByString("") } -func (String) LookupByNode(ipld.Node) (ipld.Node, error) { +func (String) LookupByNode(datamodel.Node) (datamodel.Node, error) { return mixins.String{TypeName: "chaintypes.String"}.LookupByNode(nil) } -func (String) LookupByIndex(idx int64) (ipld.Node, error) { +func (String) LookupByIndex(idx int64) (datamodel.Node, error) { return mixins.String{TypeName: "chaintypes.String"}.LookupByIndex(0) } -func (String) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { +func (String) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) { return mixins.String{TypeName: "chaintypes.String"}.LookupBySegment(seg) } -func (String) MapIterator() ipld.MapIterator { +func (String) MapIterator() datamodel.MapIterator { return nil } -func (String) ListIterator() ipld.ListIterator { +func (String) ListIterator() datamodel.ListIterator { return nil } func (String) Length() int64 { @@ -2619,16 +2619,16 @@ func (n String) AsString() (string, error) { func (String) AsBytes() ([]byte, error) { return mixins.String{TypeName: "chaintypes.String"}.AsBytes() } -func (String) AsLink() (ipld.Link, error) { +func (String) AsLink() (datamodel.Link, error) { return mixins.String{TypeName: "chaintypes.String"}.AsLink() } -func (String) Prototype() ipld.NodePrototype { +func (String) Prototype() datamodel.NodePrototype { return _String__Prototype{} } type _String__Prototype struct{} -func (_String__Prototype) NewBuilder() ipld.NodeBuilder { +func (_String__Prototype) NewBuilder() datamodel.NodeBuilder { var nb _String__Builder nb.Reset() return &nb @@ -2638,7 +2638,7 @@ type _String__Builder struct { _String__Assembler } -func (nb *_String__Builder) Build() ipld.Node { +func (nb *_String__Builder) Build() datamodel.Node { if *nb.m != schema.Maybe_Value { panic("invalid state: cannot call Build on an assembler that's not finished") } @@ -2656,10 +2656,10 @@ type _String__Assembler struct { } func (na *_String__Assembler) reset() {} -func (_String__Assembler) BeginMap(sizeHint int64) (ipld.MapAssembler, error) { +func (_String__Assembler) BeginMap(sizeHint int64) (datamodel.MapAssembler, error) { return mixins.StringAssembler{TypeName: "chaintypes.String"}.BeginMap(0) } -func (_String__Assembler) BeginList(sizeHint int64) (ipld.ListAssembler, error) { +func (_String__Assembler) BeginList(sizeHint int64) (datamodel.ListAssembler, error) { return mixins.StringAssembler{TypeName: "chaintypes.String"}.BeginList(0) } func (na *_String__Assembler) AssignNull() error { @@ -2695,10 +2695,10 @@ func (na *_String__Assembler) AssignString(v string) error { func (_String__Assembler) AssignBytes([]byte) error { return mixins.StringAssembler{TypeName: "chaintypes.String"}.AssignBytes(nil) } -func (_String__Assembler) AssignLink(ipld.Link) error { +func (_String__Assembler) AssignLink(datamodel.Link) error { return mixins.StringAssembler{TypeName: "chaintypes.String"}.AssignLink(nil) } -func (na *_String__Assembler) AssignNode(v ipld.Node) error { +func (na *_String__Assembler) AssignNode(v datamodel.Node) error { if v.IsNull() { return na.AssignNull() } @@ -2717,19 +2717,19 @@ func (na *_String__Assembler) AssignNode(v ipld.Node) error { return na.AssignString(v2) } } -func (_String__Assembler) Prototype() ipld.NodePrototype { +func (_String__Assembler) Prototype() datamodel.NodePrototype { return _String__Prototype{} } func (String) Type() schema.Type { return nil /*TODO:typelit*/ } -func (n String) Representation() ipld.Node { +func (n String) Representation() datamodel.Node { return (*_String__Repr)(n) } type _String__Repr = _String -var _ ipld.Node = &_String__Repr{} +var _ datamodel.Node = &_String__Repr{} type _String__ReprPrototype = _String__Prototype type _String__ReprAssembler = _String__Assembler diff --git a/testutil/chaintypes/ipldsch_types.go b/testutil/chaintypes/ipldsch_types.go index ff97b75d..bd2f9408 100644 --- a/testutil/chaintypes/ipldsch_types.go +++ b/testutil/chaintypes/ipldsch_types.go @@ -3,10 +3,10 @@ package chaintypes // Code generated by go-ipld-prime gengo. DO NOT EDIT. import ( - ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/datamodel" ) -var _ ipld.Node = nil // suppress errors when this dependency is not referenced +var _ datamodel.Node = nil // suppress errors when this dependency is not referenced // Type is a struct embeding a NodePrototype/Type for every Node implementation in this package. // One of its major uses is to start the construction of a value. // You can use it like this: @@ -36,7 +36,7 @@ type typeSlab struct { // --- type definitions follow --- -// Block matches the IPLD Schema type "Block". It has Struct type-kind, and may be interrogated like map kind. +// Block matches the IPLD Schema type "Block". It has struct type-kind, and may be interrogated like map kind. type Block = *_Block type _Block struct { Parents _Parents @@ -49,7 +49,7 @@ type _Bytes struct{ x []byte } // Link matches the IPLD Schema type "Link". It has link kind. type Link = *_Link -type _Link struct{ x ipld.Link } +type _Link struct{ x datamodel.Link } // Messages matches the IPLD Schema type "Messages". It has list kind. type Messages = *_Messages diff --git a/testutil/test_ipld_tree.go b/testutil/test_ipld_tree.go index 2855ec0f..f49feb94 100644 --- a/testutil/test_ipld_tree.go +++ b/testutil/test_ipld_tree.go @@ -9,7 +9,7 @@ import ( _ "github.com/ipld/go-ipld-prime/codec/dagjson" "github.com/ipld/go-ipld-prime/fluent" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" ) // TestIPLDTree is a set of IPLD Data that forms a tree spread across some blocks diff --git a/testutil/testchain.go b/testutil/testchain.go index 5d00ae4a..f6e15deb 100644 --- a/testutil/testchain.go +++ b/testutil/testchain.go @@ -9,7 +9,7 @@ import ( cid "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/ipld/go-ipld-prime/traversal/selector/builder" mh "github.com/multiformats/go-multihash" diff --git a/testutil/testnodes.go b/testutil/testnodes.go index 194bbccf..3e825b18 100644 --- a/testutil/testnodes.go +++ b/testutil/testnodes.go @@ -2,7 +2,7 @@ package testutil import ( ipld "github.com/ipld/go-ipld-prime" - basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/node/basicnode" "github.com/ipld/go-ipld-prime/traversal/selector" "github.com/ipld/go-ipld-prime/traversal/selector/builder" ) diff --git a/testutil/tracing.go b/testutil/tracing.go new file mode 100644 index 00000000..428c00dc --- /dev/null +++ b/testutil/tracing.go @@ -0,0 +1,205 @@ +package testutil + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" +) + +var _ trace.SpanExporter = &Collector{} + +// Collector can be used as a trace batcher to provide traces to, we collect +// individual spans and then extract useful data out of them for test assertions +type Collector struct { + Spans tracetest.SpanStubs +} + +// ExportSpans receives the ReadOnlySpans from the batch provider +func (c *Collector) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + c.Spans = tracetest.SpanStubsFromReadOnlySpans(spans) + return nil +} + +// Shutdown is a noop, we don't need to do anything fancy +func (c *Collector) Shutdown(ctx context.Context) error { + return nil +} + +// FindSpans returns a list of spans by their name +func (c Collector) FindSpans(name string) tracetest.SpanStubs { + var found = tracetest.SpanStubs{} + for _, s := range c.Spans { + if s.Name == name { + found = append(found, s) + } + } + return found +} + +// TracesToString returns an array of all traces represented as strings with each +// span in the trace identified by name and its number (within the parent span) +// in parens, separated by a '->'. e.g. `"foo(0)->bar(0)","foo(0)->bar(1)"` +func (c Collector) TracesToStrings() []string { + return c.tracesToString("", c.FindParentSpans(), "", func(_ tracetest.SpanStub) {}) +} + +func (c Collector) tracesToString(trace string, spans tracetest.SpanStubs, matchString string, matchCb func(tracetest.SpanStub)) []string { + var traces []string + counts := make(map[string]int) // count the span children by name + for _, span := range spans { + nc := counts[span.Name] + counts[span.Name] = nc + 1 + t := fmt.Sprintf("%v(%d)", span.Name, nc) + if trace != "" { + t = fmt.Sprintf("%v->%v", trace, t) + } + if t == matchString { + matchCb(span) + } + children := c.FindSpansWithParent(span) + if len(children) > 0 { + traces = append(traces, c.tracesToString(t, children, matchString, matchCb)...) + } else { + traces = append(traces, t) + } + } + return traces +} + +// FindSpanByTraceString is similar to FindSpans but returns a single span +// identified by its trace string as described in TracesToStrings. Note that +// this string can also be a partial of a complete trace, e.g. just `"foo(0)"` +// without any children to fetch the parent span. +func (c Collector) FindSpanByTraceString(trace string) tracetest.SpanStub { + var found tracetest.SpanStub + c.tracesToString("", c.FindParentSpans(), trace, func(span tracetest.SpanStub) { + if found.Name != "" { + panic("found more than one span with the same trace string") + } + found = span + }) + return found +} + +// FindParentSpans finds spans that have no parents, they are at the top any +// stack. +func (c Collector) FindParentSpans() tracetest.SpanStubs { + var found = tracetest.SpanStubs{} + for _, s := range c.Spans { + if s.Parent.SpanID() == [8]byte{} { + found = append(found, s) + } + } + return found +} + +// FindSpansWithParent finds spans that are children of the provided span. +func (c Collector) FindSpansWithParent(stub tracetest.SpanStub) tracetest.SpanStubs { + var found = tracetest.SpanStubs{} + for _, s := range c.Spans { + if s.Parent.SpanID() == stub.SpanContext.SpanID() { + found = append(found, s) + } + } + return found +} + +// SingleExceptionEvent is a test helper that asserts that a span, identified by a +// trace string (see TracesToStrings) contains a single exception, identified by +// the type (regexp) and message (regexp). If errorCode is true, then we also assert +// that the span has an error status code, with the same message (regexp) +func (c Collector) SingleExceptionEvent(t *testing.T, trace string, typeRe string, messageRe string, errorCode bool) { + t.Helper() + + // has ContextCancelError exception recorded in the right place + et := c.FindSpanByTraceString(trace) + require.Len(t, et.Events, 1, "expected one event in span %v", trace) + ex := EventAsException(t, EventInTraceSpan(t, et, "exception")) + require.Regexp(t, typeRe, ex.Type) + require.Regexp(t, messageRe, ex.Message) + if errorCode { + require.Equal(t, codes.Error, et.Status.Code) + require.Regexp(t, messageRe, et.Status.Description) + } +} + +// SetupTracing returns a test helper that can will collect all spans within +// a Collector. The returned helper function should be called at the point in +// a test where the spans are ready to be analyzed. Any spans not properly +// completed at that point won't be represented in the Collector. +func SetupTracing() func(t *testing.T) *Collector { + collector := &Collector{} + tp := trace.NewTracerProvider(trace.WithBatcher(collector)) + otel.SetTracerProvider(tp) + + collect := func(t *testing.T) *Collector { + t.Helper() + + require.NoError(t, tp.Shutdown(context.Background())) + return collector + } + + return collect +} + +// AttributeValueInTraceSpan is a test helper that asserts that at a span +// contains an attribute with the name provided, and returns the value of +// that attribute for further inspection. +func AttributeValueInTraceSpan(t *testing.T, stub tracetest.SpanStub, attributeName string) attribute.Value { + t.Helper() + + for _, attr := range stub.Attributes { + if attr.Key == attribute.Key(attributeName) { + return attr.Value + } + } + require.Fail(t, "did not find expected attribute %v on trace span %v", attributeName, stub.Name) + return attribute.Value{} +} + +// EventInTraceSpan is a test helper that asserts that at a span +// contains an event with the name provided, and returns the value of +// that event for further inspection. +func EventInTraceSpan(t *testing.T, stub tracetest.SpanStub, eventName string) trace.Event { + t.Helper() + + for _, evt := range stub.Events { + if evt.Name == eventName { + return evt + } + } + require.Fail(t, "did not find expected event %v on trace span %v", eventName, stub.Name) + return trace.Event{} +} + +// ExceptionEvent is a simplistic string form representation of an event +type ExceptionEvent struct { + Type string + Message string +} + +// EventAsException is a test helper that converts a trace event to an ExceptionEvent +// for easier inspection. +func EventAsException(t *testing.T, evt trace.Event) ExceptionEvent { + t.Helper() + + var typ string + var msg string + for _, attr := range evt.Attributes { + if attr.Key == attribute.Key("exception.type") { + typ = attr.Value.AsString() + } else if attr.Key == attribute.Key("exception.message") { + msg = attr.Value.AsString() + } + } + require.NotEmpty(t, typ, "expected non-empty exception.type attribute for %v", evt.Name) + require.NotEmpty(t, msg, "expected non-empty exception.message attribute for %v", evt.Name) + return ExceptionEvent{Type: typ, Message: msg} +}