Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: remove wrong duplicate word #3785

Merged
merged 1 commit into from
Nov 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/osnadmin/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -677,7 +677,7 @@ var _ = Describe("osnadmin", func() {
ordererCACert = filepath.Join(tempDir, "server-ca+intermediate-ca.pem")
})

It("uses the channel participation API to list all application and and the system channel (when it exists)", func() {
It("uses the channel participation API to list all application and the system channel (when it exists)", func() {
args := []string{
"channel",
"list",
Expand Down
2 changes: 1 addition & 1 deletion common/channelconfig/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ type Channel interface {
// Merkle tree to compute the BlockData hash
BlockDataHashingStructureWidth() uint32

// OrdererAddresses returns the list of valid orderer addresses to connect to to invoke Broadcast/Deliver
// OrdererAddresses returns the list of valid orderer addresses to connect to invoke Broadcast/Deliver
OrdererAddresses() []string

// Capabilities defines the capabilities for a channel
Expand Down
4 changes: 2 additions & 2 deletions common/channelconfig/channel.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ type ChannelValues interface {
// Merkle tree to compute the BlockData hash
BlockDataHashingStructureWidth() uint32

// OrdererAddresses returns the list of valid orderer addresses to connect to to invoke Broadcast/Deliver
// OrdererAddresses returns the list of valid orderer addresses to connect to invoke Broadcast/Deliver
OrdererAddresses() []string
}

Expand Down Expand Up @@ -151,7 +151,7 @@ func (cc *ChannelConfig) BlockDataHashingStructureWidth() uint32 {
return cc.protos.BlockDataHashingStructure.Width
}

// OrdererAddresses returns the list of valid orderer addresses to connect to to invoke Broadcast/Deliver
// OrdererAddresses returns the list of valid orderer addresses to connect to invoke Broadcast/Deliver
func (cc *ChannelConfig) OrdererAddresses() []string {
return cc.protos.OrdererAddresses.Addresses
}
Expand Down
2 changes: 1 addition & 1 deletion common/channelconfig/standardvalues.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func NewStandardValues(protosStructs ...interface{}) (*StandardValues, error) {

// Deserialize looks up the backing Values proto of the given name, unmarshals the given bytes
// to populate the backing message structure, and returns a referenced to the retained deserialized
// message (or an error, either because the key did not exist, or there was an an error unmarshalling
// message (or an error, either because the key did not exist, or there was an error unmarshalling
func (sv *StandardValues) Deserialize(key string, value []byte) (proto.Message, error) {
msg, ok := sv.lookup[key]
if !ok {
Expand Down
2 changes: 1 addition & 1 deletion common/channelconfig/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func MSPValue(mspDef *mspprotos.MSPConfig) *StandardConfigValue {
}
}

// CapabilitiesValue returns the config definition for a a set of capabilities.
// CapabilitiesValue returns the config definition for a set of capabilities.
// It is a value for the /Channel/Orderer, Channel/Application/, and /Channel groups.
func CapabilitiesValue(capabilities map[string]bool) *StandardConfigValue {
c := &cb.Capabilities{
Expand Down
2 changes: 1 addition & 1 deletion common/metrics/cmd/gendoc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
"golang.org/x/tools/go/packages"
)

// Gendoc can be used used to discover the metrics options declared at the
// Gendoc can be used to discover the metrics options declared at the
// package level in the fabric tree and output a table that can be used in the
// documentation.

Expand Down
2 changes: 1 addition & 1 deletion core/chaincode/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1027,7 +1027,7 @@ var _ = Describe("Handler", func() {
// ensure that the access cache is used
})

It("returns the the response message from GetPrivateData", func() {
It("returns the response message from GetPrivateData", func() {
fakeCollectionStore.RetrieveReadWritePermissionReturns(true, false, nil) // to
resp, err := handler.HandleGetState(incomingMessage, txContext)
Expect(err).NotTo(HaveOccurred())
Expand Down
2 changes: 1 addition & 1 deletion core/chaincode/lifecycle/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func NewCache(resources *Resources, myOrgMSPID string, metadataManager MetadataH
}

// InitializeLocalChaincodes should be called once after cache creation (timing doesn't matter,
// though already installed chaincodes will not be invokable until it it completes). Ideally,
// though already installed chaincodes will not be invokable until it completes). Ideally,
// this would be part of the constructor, but, we cannot rely on the chaincode store being created
// before the cache is created.
func (c *Cache) InitializeLocalChaincodes() error {
Expand Down
2 changes: 1 addition & 1 deletion core/chaincode/transaction_context_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ var _ = Describe("TransactionContext", func() {
})

Describe("CleanupQueryContext", func() {
It("removes references to the the iterator and results", func() {
It("removes references to the iterator and results", func() {
transactionContext.InitializeQueryContext("query-id", resultsIterator)
transactionContext.CleanupQueryContext("query-id")

Expand Down
2 changes: 1 addition & 1 deletion core/endorser/msgvalidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (up *UnpackedProposal) TxID() string {
return up.ChannelHeader.TxId
}

// UnpackProposal creates an an *UnpackedProposal which is guaranteed to have
// UnpackProposal creates an *UnpackedProposal which is guaranteed to have
// no zero-ed fields or it returns an error.
func UnpackProposal(signedProp *peer.SignedProposal) (*UnpackedProposal, error) {
prop, err := protoutil.UnmarshalProposal(signedProp.ProposalBytes)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1358,7 +1358,7 @@ func validateUpgradeWithCollection(t *testing.T, V1_2Validation bool) {

ccver = "3"

// Test 4: valid collection config config and peer in V1_2Validation mode --> success
// Test 4: valid collection config and peer in V1_2Validation mode --> success
ccp = &peer.CollectionConfigPackage{Config: []*peer.CollectionConfig{coll1, coll2, coll3}}
ccpBytes, err = proto.Marshal(ccp)
require.NoError(t, err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1404,7 +1404,7 @@ func validateUpgradeWithCollection(t *testing.T, V1_2Validation bool) {

ccver = "3"

// Test 4: valid collection config config and peer in V1_2Validation mode --> success
// Test 4: valid collection config and peer in V1_2Validation mode --> success
ccp = &peer.CollectionConfigPackage{Config: []*peer.CollectionConfig{coll1, coll2, coll3}}
ccpBytes, err = proto.Marshal(ccp)
require.NoError(t, err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func buildExpirySchedule(
// i.e., when these private data key and it's hashed-keys are going to be expired
// Note that the 'hashedUpdateKeys' may be superset of the pvtUpdates. This is because,
// the peer may not receive all the private data either because the peer is not eligible for certain private data
// or because we allow proceeding with the missing private data data
// or because we allow proceeding with the missing private data
for pvtUpdateKey, vv := range pvtUpdates.ToCompositeKeyMap() {
keyHash := util.ComputeStringHash(pvtUpdateKey.Key)
hashedCompisiteKey := privacyenabledstate.HashedCompositeKey{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ func (dbclient *couchDatabase) readDoc(id string) (*couchDoc, string, error) {
// readDocRange method provides function to a range of documents based on the start and end keys
// startKey and endKey can also be empty strings. If startKey and endKey are empty, all documents are returned
// This function provides a limit option to specify the max number of entries and is supplied by config.
// Skip is reserved for possible future future use.
// Skip is reserved for possible future use.
func (dbclient *couchDatabase) readDocRange(startKey, endKey string, limit int32) ([]*queryResult, string, error) {
dbName := dbclient.dbName
couchdbLogger.Debugf("[%s] Entering ReadDocRange() startKey=%s, endKey=%s", dbName, startKey, endKey)
Expand Down
2 changes: 1 addition & 1 deletion core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ func (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAnd
// (1) constructs the unique pvt data from the passed reconciledPvtdata
// (2) acquire a lock on oldBlockCommit
// (3) checks for stale pvtData by comparing [version, valueHash] and removes stale data
// (4) creates update batch from the the non-stale pvtData
// (4) creates update batch from the non-stale pvtData
// (5) update the BTL bookkeeping managed by the purge manager and update expiring keys.
// (6) commit the non-stale pvt data to the stateDB
// This function assumes that the passed input contains only transactions that had been
Expand Down
2 changes: 1 addition & 1 deletion core/ledger/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func GetSortedKeys(m interface{}) []string {
}

// GetValuesBySortedKeys returns the values of the map (mapPtr) in the list (listPtr) in the sorted order of key of the map
// This function assumes that the mapPtr is a pointer to a map and listPtr is is a pointer to a list. Further type of keys of the
// This function assumes that the mapPtr is a pointer to a map and listPtr is a pointer to a list. Further type of keys of the
// map are assumed to be string and the types of the values of the maps and the list are same
func GetValuesBySortedKeys(mapPtr interface{}, listPtr interface{}) {
mapVal := reflect.ValueOf(mapPtr).Elem()
Expand Down
2 changes: 1 addition & 1 deletion core/scc/cscc/configure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ func TestConfigerInvokeInvalidParameters(t *testing.T) {
t,
int32(shim.OK),
res.Status,
"invoke invoke expected wrong function name provided",
"invoke expected wrong function name provided",
)
require.Equal(t, "Requested function fooFunction not found.", res.Message)

Expand Down
2 changes: 1 addition & 1 deletion core/scc/lscc/lscc.go
Original file line number Diff line number Diff line change
Expand Up @@ -542,7 +542,7 @@ func (lscc *SCC) getCCCode(ccname string, cdbytes []byte) (*pb.ChaincodeDeployme

// this is the big test and the reason every launch should go through
// getChaincode call. We validate the chaincode entry against the
// the chaincode in FS
// chaincode in FS
if err = ccpack.ValidateCC(cd); err != nil {
return nil, nil, InvalidCCOnFSError(err.Error())
}
Expand Down
2 changes: 1 addition & 1 deletion discovery/endorsement/endorsement_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ func TestPeersForEndorsement(t *testing.T) {
// 2 principal combinations: p0 and p6, or p12 alone.
// The collection has p0, p6, and p12 in it.
// The chaincode EP is (p0 and p6) or p12.
// However, the the chaincode has a collection level EP that requires p6 and p12.
// However, the chaincode has a collection level EP that requires p6 and p12.
// Thus, the only combination that can satisfy would be p6 and p12.
collectionOrgs := []*msp.MSPPrincipal{
peerRole("p0"),
Expand Down
2 changes: 1 addition & 1 deletion gossip/comm/crypto_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ func TestCertificateExtraction(t *testing.T) {
require.Equal(t, clientCertHash, srv.remoteCertHash, "Server side and client hash aren't equal")
}

// GenerateCertificatesOrPanic generates a a random pair of public and private keys
// GenerateCertificatesOrPanic generates a random pair of public and private keys
// and return TLS certificate.
func GenerateCertificatesOrPanic() tls.Certificate {
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
Expand Down
2 changes: 1 addition & 1 deletion gossip/discovery/discovery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1469,7 +1469,7 @@ func TestMsgStoreExpirationWithMembershipMessages(t *testing.T) {
defer instances[index].discoveryImpl().lock.RUnlock()
require.Empty(t, instances[index].discoveryImpl().aliveLastTS, fmt.Sprint(step, " Data from alive msg still exists in aliveLastTS of discovery inst ", index))
require.Empty(t, instances[index].discoveryImpl().deadLastTS, fmt.Sprint(step, " Data from alive msg still exists in deadLastTS of discovery inst ", index))
require.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still still contains data related to Alive msg: discovery inst ", index))
require.Empty(t, instances[index].discoveryImpl().id2Member, fmt.Sprint(step, " id2Member mapping still contains data related to Alive msg: discovery inst ", index))
require.Empty(t, instances[index].discoveryImpl().msgStore.Get(), fmt.Sprint(step, " Expired Alive msg still stored in store of discovery inst ", index))
require.Zero(t, instances[index].discoveryImpl().aliveMembership.Size(), fmt.Sprint(step, " Alive membership list is not empty, discovery instance", index))
require.Zero(t, instances[index].discoveryImpl().deadMembership.Size(), fmt.Sprint(step, " Dead membership list is not empty, discovery instance", index))
Expand Down
2 changes: 1 addition & 1 deletion gossip/gossip/gossip_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ func (g *Node) sendGossipBatch(a []interface{}) {
// For efficiency, we first isolate all the messages that have the same routing policy
// and send them together, and only after that move to the next group of messages.
// i.e: we send all blocks of channel C to the same group of peers,
// and send all StateInfo messages to the same group of peers, etc. etc.
// and send all StateInfo messages to the same group of peers, etc.
// When we send blocks, we send only to peers that advertised themselves in the channel.
// When we send StateInfo messages, we send to peers in the channel.
// When we send messages that are marked to be sent only within the org, we send all of these messages
Expand Down
2 changes: 1 addition & 1 deletion integration/chaincode/kvexecutor/chaincode.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
)

// KVExecutor is a chaincode implementation that takes a KVData array as read parameter
// and a a KVData array as write parameter, and then calls GetXXX/PutXXX methods to read and write
// and a KVData array as write parameter, and then calls GetXXX/PutXXX methods to read and write
// state/collection data. Both input params should be marshalled json data and then base64 encoded.
type KVExecutor struct{}

Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/peer/orderers/connection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ var _ = Describe("Connection", func() {
})
})

When("an update modifies the global endpoints but does does not affect the org endpoints", func() {
When("an update modifies the global endpoints but does not affect the org endpoints", func() {
BeforeEach(func() {
cs.Update(nil, map[string]orderers.OrdererOrg{
"org1": org1,
Expand Down
4 changes: 2 additions & 2 deletions msp/nodeous_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,9 +316,9 @@ func TestLoad142MSPWithInvalidAdminConfiguration(t *testing.T) {

func TestAdminInAdmincertsWith143MSP(t *testing.T) {
// testdata/nodeouadminclient enables NodeOU classification and contains in the admincerts folder
// a certificate classified as client. This test checks that that identity is considered an admin anyway.
// a certificate classified as client. This test checks that identity is considered an admin anyway.
// testdata/nodeouadminclient2 enables NodeOU classification and contains in the admincerts folder
// a certificate classified as client. This test checks that that identity is considered an admin anyway.
// a certificate classified as client. This test checks that identity is considered an admin anyway.
// Notice that the configuration used is one that is usually expected for MSP version < 1.4.3 which
// only define peer and client OU.
testFolders := []string{"testdata/nodeouadminclient", "testdata/nodeouadminclient2"}
Expand Down
2 changes: 1 addition & 1 deletion orderer/common/bootstrap/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ type Helper interface {
GenesisBlock() *ab.Block
}

// Replacer provides the ability to to replace the current genesis block used
// Replacer provides the ability to replace the current genesis block used
// for bootstrapping with the supplied block. It is used during consensus-type
// migration in order to replace the original genesis block used for
// bootstrapping with the latest config block of the system channel, which
Expand Down
2 changes: 1 addition & 1 deletion orderer/common/cluster/replication_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ func testBlockPullerFromConfig(t *testing.T, blockVerifiers []cluster.BlockVerif
validBlock := &common.Block{}
require.NoError(t, proto.Unmarshal(blockBytes, validBlock))

// And inject into it a 127.0.0.1 orderer endpoint endpoint and a new TLS CA certificate.
// And inject into it a 127.0.0.1 orderer endpoint and a new TLS CA certificate.
injectTLSCACert(t, validBlock, caCert)
injectGlobalOrdererEndpoint(t, validBlock, osn.srv.Address())
validBlock.Header.DataHash = protoutil.BlockDataHash(validBlock.Data)
Expand Down
2 changes: 1 addition & 1 deletion orderer/common/msgprocessor/maintenancefilter.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ func (mf *MaintenanceFilter) inspect(configEnvelope *cb.ConfigEnvelope, ordererC
return nil
}

// ensureConsensusTypeChangeOnly checks that the only change is the the Channel/Orderer group, and within that,
// ensureConsensusTypeChangeOnly checks that the only change is the Channel/Orderer group, and within that,
// only to the ConsensusType value.
func (mf *MaintenanceFilter) ensureConsensusTypeChangeOnly(configEnvelope *cb.ConfigEnvelope) error {
configUpdateEnv, err := protoutil.EnvelopeToConfigUpdate(configEnvelope.LastUpdate)
Expand Down
2 changes: 1 addition & 1 deletion orderer/common/server/etcdraft_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ func testEtcdRaftOSNSuccess(gt *GomegaWithT, configPath, configtxgen, orderer, c
// Consensus.EvictionSuspicion is not specified in orderer.yaml, so let's ensure
// it is really configured autonomously via the etcdraft chain itself.
gt.Eventually(ordererProcess.Err, time.Minute).Should(gbytes.Say("EvictionSuspicion not set, defaulting to 10m"))
// Wait until the the node starts up and elects itself as a single leader in a single node cluster.
// Wait until the node starts up and elects itself as a single leader in a single node cluster.
gt.Eventually(ordererProcess.Err, time.Minute).Should(gbytes.Say("Beginning to serve requests"))
gt.Eventually(ordererProcess.Err, time.Minute).Should(gbytes.Say("becomeLeader"))
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/tx/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ type ProcessorCreator interface {
//
// The intent is to support different transaction types via interface Processor such as pure endorser transactions,
// pure post-order transactions, and a mixed transaction - e.g., a transaction that combines an endorser transaction and
// and a post-order transaction (say, a token transaction).
// a post-order transaction (say, a token transaction).
//
// Below is the detail description of the semantics of the function `Process`
// In order to process a transaction on a committing peer, we first evaluate the simulated readwrite set of the transaction
Expand Down Expand Up @@ -119,7 +119,7 @@ type ReadHinter interface {
}

// Reprocessor is an optional interface that a `Processor` is encouraged to implement if a
// a significant large number of transactions of the corresponding type are expected to be present and
// significant large number of transactions of the corresponding type are expected to be present and
// validation of the transaction is significantly resource consuming (e.g., signature matching/crypto operations)
// as compare to manipulating the state.
// The main context in which the function in this interface is to be invoked is to rebuild the ledger constructs such as
Expand Down