Skip to content

Commit 12a6a9c

Browse files
lsgrepmariameda
authored andcommitted
eth: fix typos (ethereum#16414)
1 parent 20c7578 commit 12a6a9c

File tree

11 files changed

+29
-29
lines changed

11 files changed

+29
-29
lines changed

eth/backend.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ type Ethereum struct {
6363
chainConfig *params.ChainConfig
6464

6565
// Channel for shutting down the service
66-
shutdownChan chan bool // Channel for shutting down the ethereum
66+
shutdownChan chan bool // Channel for shutting down the Ethereum
6767
stopDbUpgrade func() error // stop chain db sequential key upgrade
6868

6969
// Handlers
@@ -352,7 +352,7 @@ func (s *Ethereum) StartMining(local bool) error {
352352
if local {
353353
// If local (CPU) mining is started, we can disable the transaction rejection
354354
// mechanism introduced to speed sync times. CPU mining on mainnet is ludicrous
355-
// so noone will ever hit this path, whereas marking sync done on CPU mining
355+
// so none will ever hit this path, whereas marking sync done on CPU mining
356356
// will ensure that private networks work in single miner mode too.
357357
atomic.StoreUint32(&s.protocolManager.acceptTxs, 1)
358358
}

eth/db_upgrade.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
6262
failed error
6363
)
6464
for failed == nil && it.Next() {
65-
// Skip any entries that don't look like old transaction meta entires (<hash>0x01)
65+
// Skip any entries that don't look like old transaction meta entries (<hash>0x01)
6666
key := it.Key()
6767
if len(key) != common.HashLength+1 || key[common.HashLength] != 0x01 {
6868
continue
@@ -86,7 +86,7 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
8686
}
8787
}
8888
// Convert the old metadata to a new lookup entry, delete duplicate data
89-
if failed = db.Put(append([]byte("l"), hash...), it.Value()); failed == nil { // Write the new looku entry
89+
if failed = db.Put(append([]byte("l"), hash...), it.Value()); failed == nil { // Write the new lookup entry
9090
if failed = db.Delete(hash); failed == nil { // Delete the duplicate transaction data
9191
if failed = db.Delete(append([]byte("receipts-"), hash...)); failed == nil { // Delete the duplicate receipt data
9292
if failed = db.Delete(key); failed != nil { // Delete the old transaction metadata

eth/downloader/downloader.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ var (
4747

4848
MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation
4949
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
50-
rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
50+
rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
5151
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
5252
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
5353
ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
@@ -884,7 +884,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
884884
// immediately to the header processor to keep the rest of the pipeline full even
885885
// in the case of header stalls.
886886
//
887-
// The method returs the entire filled skeleton and also the number of headers
887+
// The method returns the entire filled skeleton and also the number of headers
888888
// already forwarded for processing.
889889
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
890890
log.Debug("Filling up skeleton", "from", from)
@@ -1377,7 +1377,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
13771377
pivot = height - uint64(fsMinFullBlocks)
13781378
}
13791379
// To cater for moving pivot points, track the pivot block and subsequently
1380-
// accumulated download results separatey.
1380+
// accumulated download results separately.
13811381
var (
13821382
oldPivot *fetchResult // Locked in pivot block, might change eventually
13831383
oldTail []*fetchResult // Downloaded content after the pivot
@@ -1615,7 +1615,7 @@ func (d *Downloader) qosReduceConfidence() {
16151615
//
16161616
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
16171617
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
1618-
// be adapted to, but smaller ones are preffered (stabler download stream).
1618+
// be adapted to, but smaller ones are preferred (stabler download stream).
16191619
func (d *Downloader) requestRTT() time.Duration {
16201620
return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
16211621
}

eth/downloader/downloader_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentRec
159159
// Create the common suffix
160160
hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
161161

162-
// Create the forks, making the second heavyer if non balanced forks were requested
162+
// Create the forks, making the second heavier if non balanced forks were requested
163163
hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
164164
hashes1 = append(hashes1, hashes[1:]...)
165165

eth/downloader/fakepeer.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import (
2727

2828
// FakePeer is a mock downloader peer that operates on a local database instance
2929
// instead of being an actual live node. It's useful for testing and to implement
30-
// sync commands from an xisting local database.
30+
// sync commands from an existing local database.
3131
type FakePeer struct {
3232
id string
3333
db ethdb.Database
@@ -48,7 +48,7 @@ func (p *FakePeer) Head() (common.Hash, *big.Int) {
4848
}
4949

5050
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
51-
// defined by the origin hash and the associaed query parameters.
51+
// defined by the origin hash and the associated query parameters.
5252
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
5353
var (
5454
headers []*types.Header
@@ -92,7 +92,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
9292
}
9393

9494
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
95-
// defined by the origin number and the associaed query parameters.
95+
// defined by the origin number and the associated query parameters.
9696
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
9797
var (
9898
headers []*types.Header

eth/downloader/peer.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -551,7 +551,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
551551
// medianRTT returns the median RTT of the peerset, considering only the tuning
552552
// peers if there are more peers available.
553553
func (ps *peerSet) medianRTT() time.Duration {
554-
// Gather all the currnetly measured round trip times
554+
// Gather all the currently measured round trip times
555555
ps.lock.RLock()
556556
defer ps.lock.RUnlock()
557557

eth/downloader/queue.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
275275
if q.headerResults != nil {
276276
panic("skeleton assembly already in progress")
277277
}
278-
// Shedule all the header retrieval tasks for the skeleton assembly
278+
// Schedule all the header retrieval tasks for the skeleton assembly
279279
q.headerTaskPool = make(map[uint64]*types.Header)
280280
q.headerTaskQueue = prque.New()
281281
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains

eth/downloader/statesync.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ import (
3131
"github.com/ethereum/go-ethereum/trie"
3232
)
3333

34-
// stateReq represents a batch of state fetch requests groupped together into
34+
// stateReq represents a batch of state fetch requests grouped together into
3535
// a single data retrieval network packet.
3636
type stateReq struct {
3737
items []common.Hash // Hashes of the state items to download
@@ -139,7 +139,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
139139

140140
// Handle incoming state packs:
141141
case pack := <-d.stateCh:
142-
// Discard any data not requested (or previsouly timed out)
142+
// Discard any data not requested (or previously timed out)
143143
req := active[pack.PeerId()]
144144
if req == nil {
145145
log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
@@ -182,7 +182,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
182182
case req := <-d.trackStateReq:
183183
// If an active request already exists for this peer, we have a problem. In
184184
// theory the trie node schedule must never assign two requests to the same
185-
// peer. In practive however, a peer might receive a request, disconnect and
185+
// peer. In practice however, a peer might receive a request, disconnect and
186186
// immediately reconnect before the previous times out. In this case the first
187187
// request is never honored, alas we must not silently overwrite it, as that
188188
// causes valid requests to go missing and sync to get stuck.
@@ -228,7 +228,7 @@ type stateSync struct {
228228
err error // Any error hit during sync (set before completion)
229229
}
230230

231-
// stateTask represents a single trie node download taks, containing a set of
231+
// stateTask represents a single trie node download task, containing a set of
232232
// peers already attempted retrieval from to detect stalled syncs and abort.
233233
type stateTask struct {
234234
attempts map[string]struct{}
@@ -333,7 +333,7 @@ func (s *stateSync) commit(force bool) error {
333333
return nil
334334
}
335335

336-
// assignTasks attempts to assing new tasks to all idle peers, either from the
336+
// assignTasks attempts to assign new tasks to all idle peers, either from the
337337
// batch currently being retried, or fetching new data from the trie sync itself.
338338
func (s *stateSync) assignTasks() {
339339
// Iterate over all idle peers and try to assign them state fetches

eth/fetcher/fetcher.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ type Fetcher struct {
127127
// Block cache
128128
queue *prque.Prque // Queue containing the import operations (block number sorted)
129129
queues map[string]int // Per peer block counts to prevent memory exhaustion
130-
queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports)
130+
queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
131131

132132
// Callbacks
133133
getBlock blockRetrievalFn // Retrieves a block from the local chain

eth/filters/api.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ func (api *PublicFilterAPI) timeoutLoop() {
9898
// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes
9999
// as transactions enter the pending state.
100100
//
101-
// It is part of the filter package because this filter can be used throug the
101+
// It is part of the filter package because this filter can be used through the
102102
// `eth_getFilterChanges` polling method that is also used for log filters.
103103
//
104104
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter

0 commit comments

Comments
 (0)