Correct broken unaligned load/store in armv5 (#1355)
Update vendor github.com/boltdb/bolt to take care of the issue #1354.
This commit is contained in:
parent
d800305b34
commit
a78a0266c4
|
@ -1,4 +1,4 @@
|
||||||
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
|
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
|
||||||
====
|
====
|
||||||
|
|
||||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||||
|
@ -15,11 +15,11 @@ and setting values. That's it.
|
||||||
|
|
||||||
## Project Status
|
## Project Status
|
||||||
|
|
||||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||||
black box testing are used to ensure database consistency and thread safety.
|
test coverage and randomized black box testing are used to ensure database
|
||||||
Bolt is currently in high-load production environments serving databases as
|
consistency and thread safety. Bolt is currently used in high-load production
|
||||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
environments serving databases as large as 1TB. Many companies such as
|
||||||
services every day.
|
Shopify and Heroku use Bolt-backed services every day.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ and then safely close your transaction if an error is returned. This is the
|
||||||
recommended way to use Bolt transactions.
|
recommended way to use Bolt transactions.
|
||||||
|
|
||||||
However, sometimes you may want to manually start and end your transactions.
|
However, sometimes you may want to manually start and end your transactions.
|
||||||
You can use the `Tx.Begin()` function directly but **please** be sure to close
|
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||||
the transaction.
|
the transaction.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
@ -313,7 +313,7 @@ func (s *Store) CreateUser(u *User) error {
|
||||||
// Generate ID for the user.
|
// Generate ID for the user.
|
||||||
// This returns an error only if the Tx is closed or not writeable.
|
// This returns an error only if the Tx is closed or not writeable.
|
||||||
// That can't happen in an Update() call so I ignore the error check.
|
// That can't happen in an Update() call so I ignore the error check.
|
||||||
id, _ = b.NextSequence()
|
id, _ := b.NextSequence()
|
||||||
u.ID = int(id)
|
u.ID = int(id)
|
||||||
|
|
||||||
// Marshal user data into bytes.
|
// Marshal user data into bytes.
|
||||||
|
@ -395,7 +395,7 @@ db.View(func(tx *bolt.Tx) error {
|
||||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||||
|
|
||||||
prefix := []byte("1234")
|
prefix := []byte("1234")
|
||||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,6 +448,10 @@ db.View(func(tx *bolt.Tx) error {
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Please note that keys and values in `ForEach()` are only valid while
|
||||||
|
the transaction is open. If you need to use a key or value outside of
|
||||||
|
the transaction, you must use `copy()` to copy it to another byte
|
||||||
|
slice.
|
||||||
|
|
||||||
### Nested buckets
|
### Nested buckets
|
||||||
|
|
||||||
|
@ -460,6 +464,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||||
func (*Bucket) DeleteBucket(key []byte) error
|
func (*Bucket) DeleteBucket(key []byte) error
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
// createUser creates a new user in the given account.
|
||||||
|
func createUser(accountID int, u *User) error {
|
||||||
|
// Start the transaction.
|
||||||
|
tx, err := db.Begin(true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
// Retrieve the root bucket for the account.
|
||||||
|
// Assume this has already been created when the account was set up.
|
||||||
|
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
|
||||||
|
|
||||||
|
// Setup the users bucket.
|
||||||
|
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate an ID for the new user.
|
||||||
|
userID, err := bkt.NextSequence()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
u.ID = userID
|
||||||
|
|
||||||
|
// Marshal and save the encoded user.
|
||||||
|
if buf, err := json.Marshal(u); err != nil {
|
||||||
|
return err
|
||||||
|
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit the transaction.
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Database backups
|
### Database backups
|
||||||
|
|
||||||
|
@ -557,7 +610,7 @@ if err != nil {
|
||||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||||
contstructor that takes in a filepath where the database file will be stored.
|
constructor that takes in a filepath where the database file will be stored.
|
||||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
@ -715,6 +768,9 @@ Here are a few things to note when evaluating and using Bolt:
|
||||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||||
see an `unexpected fault address` panic when accessing it.
|
see an `unexpected fault address` panic when accessing it.
|
||||||
|
|
||||||
|
* Bolt uses an exclusive write lock on the database file so it cannot be
|
||||||
|
shared by multiple processes.
|
||||||
|
|
||||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||||
buckets that have random inserts will cause your database to have very poor
|
buckets that have random inserts will cause your database to have very poor
|
||||||
page utilization.
|
page utilization.
|
||||||
|
@ -807,6 +863,7 @@ them via pull request.
|
||||||
|
|
||||||
Below is a list of public, open source projects that use Bolt:
|
Below is a list of public, open source projects that use Bolt:
|
||||||
|
|
||||||
|
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||||
|
@ -825,7 +882,6 @@ Below is a list of public, open source projects that use Bolt:
|
||||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||||
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
|
|
||||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||||
|
@ -842,7 +898,18 @@ Below is a list of public, open source projects that use Bolt:
|
||||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||||
* [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB.
|
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||||
|
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||||
|
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||||
|
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||||
|
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||||
|
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||||
|
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||||
|
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||||
|
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||||
|
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||||
|
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||||
|
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||||
|
|
||||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
If you are using Bolt in a project please send a pull request to add it to the list.
|
||||||
|
|
|
@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0xFFFFFFF
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -1,7 +1,28 @@
|
||||||
package bolt
|
package bolt
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0xFFFFFFF
|
const maxAllocSize = 0xFFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Simple check to see whether this arch handles unaligned load/stores
|
||||||
|
// correctly.
|
||||||
|
|
||||||
|
// ARM9 and older devices require load/stores to be from/to aligned
|
||||||
|
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||||
|
// read in a jumbled up order.
|
||||||
|
|
||||||
|
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||||
|
|
||||||
|
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||||
|
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||||
|
|
||||||
|
brokenUnaligned = val != 0x11222211
|
||||||
|
}
|
||||||
|
|
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -6,3 +6,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// brokenUnaligned Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -6,3 +6,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// brokenUnaligned Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||||
|
|
||||||
// maxAllocSize is the size used when creating array pointers.
|
// maxAllocSize is the size used when creating array pointers.
|
||||||
const maxAllocSize = 0x7FFFFFFF
|
const maxAllocSize = 0x7FFFFFFF
|
||||||
|
|
||||||
|
// Are unaligned load/stores broken on this arch?
|
||||||
|
var brokenUnaligned = false
|
||||||
|
|
|
@ -89,7 +89,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
|
||||||
func funlock(db *DB) error {
|
func funlock(db *DB) error {
|
||||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||||
db.lockfile.Close()
|
db.lockfile.Close()
|
||||||
os.Remove(db.path+lockExt)
|
os.Remove(db.path + lockExt)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -130,9 +130,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||||
var child = newBucket(b.tx)
|
var child = newBucket(b.tx)
|
||||||
|
|
||||||
|
// If unaligned load/stores are broken on this arch and value is
|
||||||
|
// unaligned simply clone to an aligned byte array.
|
||||||
|
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||||
|
|
||||||
|
if unaligned {
|
||||||
|
value = cloneBytes(value)
|
||||||
|
}
|
||||||
|
|
||||||
// If this is a writable transaction then we need to copy the bucket entry.
|
// If this is a writable transaction then we need to copy the bucket entry.
|
||||||
// Read-only transactions can point directly at the mmap entry.
|
// Read-only transactions can point directly at the mmap entry.
|
||||||
if b.tx.writable {
|
if b.tx.writable && !unaligned {
|
||||||
child.bucket = &bucket{}
|
child.bucket = &bucket{}
|
||||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||||
} else {
|
} else {
|
||||||
|
@ -167,9 +175,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||||
if bytes.Equal(key, k) {
|
if bytes.Equal(key, k) {
|
||||||
if (flags & bucketLeafFlag) != 0 {
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
return nil, ErrBucketExists
|
return nil, ErrBucketExists
|
||||||
} else {
|
|
||||||
return nil, ErrIncompatibleValue
|
|
||||||
}
|
}
|
||||||
|
return nil, ErrIncompatibleValue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create empty, inline bucket.
|
// Create empty, inline bucket.
|
||||||
|
@ -329,6 +336,28 @@ func (b *Bucket) Delete(key []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sequence returns the current integer for the bucket without incrementing it.
|
||||||
|
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||||
|
|
||||||
|
// SetSequence updates the sequence number for the bucket.
|
||||||
|
func (b *Bucket) SetSequence(v uint64) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Materialize the root node if it hasn't been already so that the
|
||||||
|
// bucket will be saved during commit.
|
||||||
|
if b.rootNode == nil {
|
||||||
|
_ = b.node(b.root, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment and return the sequence.
|
||||||
|
b.bucket.sequence = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// NextSequence returns an autoincrementing integer for the bucket.
|
// NextSequence returns an autoincrementing integer for the bucket.
|
||||||
func (b *Bucket) NextSequence() (uint64, error) {
|
func (b *Bucket) NextSequence() (uint64, error) {
|
||||||
if b.tx.db == nil {
|
if b.tx.db == nil {
|
||||||
|
|
|
@ -36,6 +36,9 @@ const (
|
||||||
DefaultAllocSize = 16 * 1024 * 1024
|
DefaultAllocSize = 16 * 1024 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// default page size for db is set to the OS page size.
|
||||||
|
var defaultPageSize = os.Getpagesize()
|
||||||
|
|
||||||
// DB represents a collection of buckets persisted to a file on disk.
|
// DB represents a collection of buckets persisted to a file on disk.
|
||||||
// All data access is performed through transactions which can be obtained through the DB.
|
// All data access is performed through transactions which can be obtained through the DB.
|
||||||
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
||||||
|
@ -94,7 +97,7 @@ type DB struct {
|
||||||
path string
|
path string
|
||||||
file *os.File
|
file *os.File
|
||||||
lockfile *os.File // windows only
|
lockfile *os.File // windows only
|
||||||
dataref []byte // mmap'ed readonly, write throws SEGV
|
dataref []byte // mmap'ed readonly, write throws SEGV
|
||||||
data *[maxMapSize]byte
|
data *[maxMapSize]byte
|
||||||
datasz int
|
datasz int
|
||||||
filesz int // current on disk file size
|
filesz int // current on disk file size
|
||||||
|
@ -107,6 +110,8 @@ type DB struct {
|
||||||
freelist *freelist
|
freelist *freelist
|
||||||
stats Stats
|
stats Stats
|
||||||
|
|
||||||
|
pagePool sync.Pool
|
||||||
|
|
||||||
batchMu sync.Mutex
|
batchMu sync.Mutex
|
||||||
batch *batch
|
batch *batch
|
||||||
|
|
||||||
|
@ -200,12 +205,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||||
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
||||||
m := db.pageInBuffer(buf[:], 0).meta()
|
m := db.pageInBuffer(buf[:], 0).meta()
|
||||||
if err := m.validate(); err != nil {
|
if err := m.validate(); err != nil {
|
||||||
return nil, err
|
// If we can't read the page size, we can assume it's the same
|
||||||
|
// as the OS -- since that's how the page size was chosen in the
|
||||||
|
// first place.
|
||||||
|
//
|
||||||
|
// If the first page is invalid and this OS uses a different
|
||||||
|
// page size than what the database was created with then we
|
||||||
|
// are out of luck and cannot access the database.
|
||||||
|
db.pageSize = os.Getpagesize()
|
||||||
|
} else {
|
||||||
|
db.pageSize = int(m.pageSize)
|
||||||
}
|
}
|
||||||
db.pageSize = int(m.pageSize)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize page pool.
|
||||||
|
db.pagePool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return make([]byte, db.pageSize)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
// Memory map the data file.
|
// Memory map the data file.
|
||||||
if err := db.mmap(options.InitialMmapSize); err != nil {
|
if err := db.mmap(options.InitialMmapSize); err != nil {
|
||||||
_ = db.close()
|
_ = db.close()
|
||||||
|
@ -262,12 +282,13 @@ func (db *DB) mmap(minsz int) error {
|
||||||
db.meta0 = db.page(0).meta()
|
db.meta0 = db.page(0).meta()
|
||||||
db.meta1 = db.page(1).meta()
|
db.meta1 = db.page(1).meta()
|
||||||
|
|
||||||
// Validate the meta pages.
|
// Validate the meta pages. We only return an error if both meta pages fail
|
||||||
if err := db.meta0.validate(); err != nil {
|
// validation, since meta0 failing validation means that it wasn't saved
|
||||||
return err
|
// properly -- but we can recover using meta1. And vice-versa.
|
||||||
}
|
err0 := db.meta0.validate()
|
||||||
if err := db.meta1.validate(); err != nil {
|
err1 := db.meta1.validate()
|
||||||
return err
|
if err0 != nil && err1 != nil {
|
||||||
|
return err0
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -339,6 +360,7 @@ func (db *DB) init() error {
|
||||||
m.root = bucket{root: 3}
|
m.root = bucket{root: 3}
|
||||||
m.pgid = 4
|
m.pgid = 4
|
||||||
m.txid = txid(i)
|
m.txid = txid(i)
|
||||||
|
m.checksum = m.sum64()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write an empty freelist at page 3.
|
// Write an empty freelist at page 3.
|
||||||
|
@ -387,7 +409,6 @@ func (db *DB) close() error {
|
||||||
db.opened = false
|
db.opened = false
|
||||||
|
|
||||||
db.freelist = nil
|
db.freelist = nil
|
||||||
db.path = ""
|
|
||||||
|
|
||||||
// Clear ops.
|
// Clear ops.
|
||||||
db.ops.writeAt = nil
|
db.ops.writeAt = nil
|
||||||
|
@ -414,6 +435,7 @@ func (db *DB) close() error {
|
||||||
db.file = nil
|
db.file = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
db.path = ""
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -530,7 +552,10 @@ func (db *DB) removeTx(tx *Tx) {
|
||||||
// Remove the transaction.
|
// Remove the transaction.
|
||||||
for i, t := range db.txs {
|
for i, t := range db.txs {
|
||||||
if t == tx {
|
if t == tx {
|
||||||
db.txs = append(db.txs[:i], db.txs[i+1:]...)
|
last := len(db.txs) - 1
|
||||||
|
db.txs[i] = db.txs[last]
|
||||||
|
db.txs[last] = nil
|
||||||
|
db.txs = db.txs[:last]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -778,16 +803,37 @@ func (db *DB) pageInBuffer(b []byte, id pgid) *page {
|
||||||
|
|
||||||
// meta retrieves the current meta page reference.
|
// meta retrieves the current meta page reference.
|
||||||
func (db *DB) meta() *meta {
|
func (db *DB) meta() *meta {
|
||||||
if db.meta0.txid > db.meta1.txid {
|
// We have to return the meta with the highest txid which doesn't fail
|
||||||
return db.meta0
|
// validation. Otherwise, we can cause errors when in fact the database is
|
||||||
|
// in a consistent state. metaA is the one with the higher txid.
|
||||||
|
metaA := db.meta0
|
||||||
|
metaB := db.meta1
|
||||||
|
if db.meta1.txid > db.meta0.txid {
|
||||||
|
metaA = db.meta1
|
||||||
|
metaB = db.meta0
|
||||||
}
|
}
|
||||||
return db.meta1
|
|
||||||
|
// Use higher meta page if valid. Otherwise fallback to previous, if valid.
|
||||||
|
if err := metaA.validate(); err == nil {
|
||||||
|
return metaA
|
||||||
|
} else if err := metaB.validate(); err == nil {
|
||||||
|
return metaB
|
||||||
|
}
|
||||||
|
|
||||||
|
// This should never be reached, because both meta1 and meta0 were validated
|
||||||
|
// on mmap() and we do fsync() on every write.
|
||||||
|
panic("bolt.DB.meta(): invalid meta pages")
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocate returns a contiguous block of memory starting at a given page.
|
// allocate returns a contiguous block of memory starting at a given page.
|
||||||
func (db *DB) allocate(count int) (*page, error) {
|
func (db *DB) allocate(count int) (*page, error) {
|
||||||
// Allocate a temporary buffer for the page.
|
// Allocate a temporary buffer for the page.
|
||||||
buf := make([]byte, count*db.pageSize)
|
var buf []byte
|
||||||
|
if count == 1 {
|
||||||
|
buf = db.pagePool.Get().([]byte)
|
||||||
|
} else {
|
||||||
|
buf = make([]byte, count*db.pageSize)
|
||||||
|
}
|
||||||
p := (*page)(unsafe.Pointer(&buf[0]))
|
p := (*page)(unsafe.Pointer(&buf[0]))
|
||||||
p.overflow = uint32(count - 1)
|
p.overflow = uint32(count - 1)
|
||||||
|
|
||||||
|
@ -909,7 +955,7 @@ func (s *Stats) Sub(other *Stats) Stats {
|
||||||
diff.PendingPageN = s.PendingPageN
|
diff.PendingPageN = s.PendingPageN
|
||||||
diff.FreeAlloc = s.FreeAlloc
|
diff.FreeAlloc = s.FreeAlloc
|
||||||
diff.FreelistInuse = s.FreelistInuse
|
diff.FreelistInuse = s.FreelistInuse
|
||||||
diff.TxN = other.TxN - s.TxN
|
diff.TxN = s.TxN - other.TxN
|
||||||
diff.TxStats = s.TxStats.Sub(&other.TxStats)
|
diff.TxStats = s.TxStats.Sub(&other.TxStats)
|
||||||
return diff
|
return diff
|
||||||
}
|
}
|
||||||
|
@ -937,12 +983,12 @@ type meta struct {
|
||||||
|
|
||||||
// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
|
// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
|
||||||
func (m *meta) validate() error {
|
func (m *meta) validate() error {
|
||||||
if m.checksum != 0 && m.checksum != m.sum64() {
|
if m.magic != magic {
|
||||||
return ErrChecksum
|
|
||||||
} else if m.magic != magic {
|
|
||||||
return ErrInvalid
|
return ErrInvalid
|
||||||
} else if m.version != version {
|
} else if m.version != version {
|
||||||
return ErrVersionMismatch
|
return ErrVersionMismatch
|
||||||
|
} else if m.checksum != 0 && m.checksum != m.sum64() {
|
||||||
|
return ErrChecksum
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,8 @@ var (
|
||||||
// already open.
|
// already open.
|
||||||
ErrDatabaseOpen = errors.New("database already open")
|
ErrDatabaseOpen = errors.New("database already open")
|
||||||
|
|
||||||
// ErrInvalid is returned when a data file is not a Bolt-formatted database.
|
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||||
|
// This typically occurs when a file is not a bolt database.
|
||||||
ErrInvalid = errors.New("invalid database")
|
ErrInvalid = errors.New("invalid database")
|
||||||
|
|
||||||
// ErrVersionMismatch is returned when the data file was created with a
|
// ErrVersionMismatch is returned when the data file was created with a
|
||||||
|
|
|
@ -24,7 +24,12 @@ func newFreelist() *freelist {
|
||||||
|
|
||||||
// size returns the size of the page after serialization.
|
// size returns the size of the page after serialization.
|
||||||
func (f *freelist) size() int {
|
func (f *freelist) size() int {
|
||||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
|
n := f.count()
|
||||||
|
if n >= 0xFFFF {
|
||||||
|
// The first element will be used to store the count. See freelist.write.
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// count returns count of pages on the freelist
|
// count returns count of pages on the freelist
|
||||||
|
@ -46,16 +51,15 @@ func (f *freelist) pending_count() int {
|
||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
// all returns a list of all free ids and all pending ids in one sorted list.
|
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||||
func (f *freelist) all() []pgid {
|
// f.count returns the minimum length required for dst.
|
||||||
m := make(pgids, 0)
|
func (f *freelist) copyall(dst []pgid) {
|
||||||
|
m := make(pgids, 0, f.pending_count())
|
||||||
for _, list := range f.pending {
|
for _, list := range f.pending {
|
||||||
m = append(m, list...)
|
m = append(m, list...)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(m)
|
sort.Sort(m)
|
||||||
return pgids(f.ids).merge(m)
|
mergepgids(dst, f.ids, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||||
|
@ -166,12 +170,16 @@ func (f *freelist) read(p *page) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the list of page ids from the freelist.
|
// Copy the list of page ids from the freelist.
|
||||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
if count == 0 {
|
||||||
f.ids = make([]pgid, len(ids))
|
f.ids = nil
|
||||||
copy(f.ids, ids)
|
} else {
|
||||||
|
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||||
|
f.ids = make([]pgid, len(ids))
|
||||||
|
copy(f.ids, ids)
|
||||||
|
|
||||||
// Make sure they're sorted.
|
// Make sure they're sorted.
|
||||||
sort.Sort(pgids(f.ids))
|
sort.Sort(pgids(f.ids))
|
||||||
|
}
|
||||||
|
|
||||||
// Rebuild the page cache.
|
// Rebuild the page cache.
|
||||||
f.reindex()
|
f.reindex()
|
||||||
|
@ -182,20 +190,22 @@ func (f *freelist) read(p *page) {
|
||||||
// become free.
|
// become free.
|
||||||
func (f *freelist) write(p *page) error {
|
func (f *freelist) write(p *page) error {
|
||||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||||
ids := f.all()
|
|
||||||
|
|
||||||
// Update the header flag.
|
// Update the header flag.
|
||||||
p.flags |= freelistPageFlag
|
p.flags |= freelistPageFlag
|
||||||
|
|
||||||
// The page.count can only hold up to 64k elements so if we overflow that
|
// The page.count can only hold up to 64k elements so if we overflow that
|
||||||
// number then we handle it by putting the size in the first element.
|
// number then we handle it by putting the size in the first element.
|
||||||
if len(ids) < 0xFFFF {
|
lenids := f.count()
|
||||||
p.count = uint16(len(ids))
|
if lenids == 0 {
|
||||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
p.count = uint16(lenids)
|
||||||
|
} else if lenids < 0xFFFF {
|
||||||
|
p.count = uint16(lenids)
|
||||||
|
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||||
} else {
|
} else {
|
||||||
p.count = 0xFFFF
|
p.count = 0xFFFF
|
||||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
|
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
|
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -230,7 +240,7 @@ func (f *freelist) reload(p *page) {
|
||||||
|
|
||||||
// reindex rebuilds the free cache based on available and pending free lists.
|
// reindex rebuilds the free cache based on available and pending free lists.
|
||||||
func (f *freelist) reindex() {
|
func (f *freelist) reindex() {
|
||||||
f.cache = make(map[pgid]bool)
|
f.cache = make(map[pgid]bool, len(f.ids))
|
||||||
for _, id := range f.ids {
|
for _, id := range f.ids {
|
||||||
f.cache[id] = true
|
f.cache[id] = true
|
||||||
}
|
}
|
||||||
|
|
|
@ -201,6 +201,11 @@ func (n *node) write(p *page) {
|
||||||
}
|
}
|
||||||
p.count = uint16(len(n.inodes))
|
p.count = uint16(len(n.inodes))
|
||||||
|
|
||||||
|
// Stop here if there are no items to write.
|
||||||
|
if p.count == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Loop over each item and write it to the page.
|
// Loop over each item and write it to the page.
|
||||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||||
for i, item := range n.inodes {
|
for i, item := range n.inodes {
|
||||||
|
|
|
@ -62,6 +62,9 @@ func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||||
|
|
||||||
// leafPageElements retrieves a list of leaf nodes.
|
// leafPageElements retrieves a list of leaf nodes.
|
||||||
func (p *page) leafPageElements() []leafPageElement {
|
func (p *page) leafPageElements() []leafPageElement {
|
||||||
|
if p.count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,6 +75,9 @@ func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||||
|
|
||||||
// branchPageElements retrieves a list of branch nodes.
|
// branchPageElements retrieves a list of branch nodes.
|
||||||
func (p *page) branchPageElements() []branchPageElement {
|
func (p *page) branchPageElements() []branchPageElement {
|
||||||
|
if p.count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,12 +145,33 @@ func (a pgids) merge(b pgids) pgids {
|
||||||
// Return the opposite slice if one is nil.
|
// Return the opposite slice if one is nil.
|
||||||
if len(a) == 0 {
|
if len(a) == 0 {
|
||||||
return b
|
return b
|
||||||
} else if len(b) == 0 {
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
merged := make(pgids, len(a)+len(b))
|
||||||
|
mergepgids(merged, a, b)
|
||||||
|
return merged
|
||||||
|
}
|
||||||
|
|
||||||
// Create a list to hold all elements from both lists.
|
// mergepgids copies the sorted union of a and b into dst.
|
||||||
merged := make(pgids, 0, len(a)+len(b))
|
// If dst is too small, it panics.
|
||||||
|
func mergepgids(dst, a, b pgids) {
|
||||||
|
if len(dst) < len(a)+len(b) {
|
||||||
|
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||||
|
}
|
||||||
|
// Copy in the opposite slice if one is nil.
|
||||||
|
if len(a) == 0 {
|
||||||
|
copy(dst, b)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
|
copy(dst, a)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merged will hold all elements from both lists.
|
||||||
|
merged := dst[:0]
|
||||||
|
|
||||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||||
lead, follow := a, b
|
lead, follow := a, b
|
||||||
|
@ -166,7 +193,5 @@ func (a pgids) merge(b pgids) pgids {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append what's left in follow.
|
// Append what's left in follow.
|
||||||
merged = append(merged, follow...)
|
_ = append(merged, follow...)
|
||||||
|
|
||||||
return merged
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -381,7 +381,9 @@ func (tx *Tx) Check() <-chan error {
|
||||||
func (tx *Tx) check(ch chan error) {
|
func (tx *Tx) check(ch chan error) {
|
||||||
// Check if any pages are double freed.
|
// Check if any pages are double freed.
|
||||||
freed := make(map[pgid]bool)
|
freed := make(map[pgid]bool)
|
||||||
for _, id := range tx.db.freelist.all() {
|
all := make([]pgid, tx.db.freelist.count())
|
||||||
|
tx.db.freelist.copyall(all)
|
||||||
|
for _, id := range all {
|
||||||
if freed[id] {
|
if freed[id] {
|
||||||
ch <- fmt.Errorf("page %d: already freed", id)
|
ch <- fmt.Errorf("page %d: already freed", id)
|
||||||
}
|
}
|
||||||
|
@ -473,6 +475,8 @@ func (tx *Tx) write() error {
|
||||||
for _, p := range tx.pages {
|
for _, p := range tx.pages {
|
||||||
pages = append(pages, p)
|
pages = append(pages, p)
|
||||||
}
|
}
|
||||||
|
// Clear out page cache early.
|
||||||
|
tx.pages = make(map[pgid]*page)
|
||||||
sort.Sort(pages)
|
sort.Sort(pages)
|
||||||
|
|
||||||
// Write pages to disk in order.
|
// Write pages to disk in order.
|
||||||
|
@ -517,8 +521,22 @@ func (tx *Tx) write() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear out page cache.
|
// Put small pages back to page pool.
|
||||||
tx.pages = make(map[pgid]*page)
|
for _, p := range pages {
|
||||||
|
// Ignore page sizes over 1 page.
|
||||||
|
// These are allocated using make() instead of the page pool.
|
||||||
|
if int(p.overflow) != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||||
|
|
||||||
|
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||||
|
for i := range buf {
|
||||||
|
buf[i] = 0
|
||||||
|
}
|
||||||
|
tx.db.pagePool.Put(buf)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -257,10 +257,10 @@
|
||||||
"revisionTime": "2016-12-08T13:07:38Z"
|
"revisionTime": "2016-12-08T13:07:38Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "JQCb0Qb3kl18te1fq+2volRoqIs=",
|
"checksumSHA1": "J+kVue/HTH4sELlqMO4z0tx9DiA=",
|
||||||
"path": "github.com/boltdb/bolt",
|
"path": "github.com/boltdb/bolt",
|
||||||
"revision": "831b652a7f8dbefaf94da0eb66abd46c0c4bcf23",
|
"revision": "e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd",
|
||||||
"revisionTime": "2016-03-26T03:18:27Z"
|
"revisionTime": "2017-01-31T19:20:18Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "NClRfzxXDSt/g4lM5BIkKhYRVoQ=",
|
"checksumSHA1": "NClRfzxXDSt/g4lM5BIkKhYRVoQ=",
|
||||||
|
@ -1158,17 +1158,17 @@
|
||||||
"revisionTime": "2016-11-02T13:18:01Z"
|
"revisionTime": "2016-11-02T13:18:01Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
|
|
||||||
"path": "golang.org/x/crypto/cast5",
|
|
||||||
"revision": "b8a2a83acfe6e6770b75de42d5ff4c67596675c0",
|
|
||||||
"revisionTime": "2017-01-13T19:21:00Z"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"checksumSHA1": "pkrINpw0HkmO+18SdtSjje9MB9g=",
|
"checksumSHA1": "pkrINpw0HkmO+18SdtSjje9MB9g=",
|
||||||
"path": "github.com/yohcop/openid-go",
|
"path": "github.com/yohcop/openid-go",
|
||||||
"revision": "2c050d2dae5345c417db301f11fda6fbf5ad0f0a",
|
"revision": "2c050d2dae5345c417db301f11fda6fbf5ad0f0a",
|
||||||
"revisionTime": "2016-09-14T08:04:27Z"
|
"revisionTime": "2016-09-14T08:04:27Z"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
|
||||||
|
"path": "golang.org/x/crypto/cast5",
|
||||||
|
"revision": "b8a2a83acfe6e6770b75de42d5ff4c67596675c0",
|
||||||
|
"revisionTime": "2017-01-13T19:21:00Z"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "dwOedwBJ1EIK9+S3t108Bx054Y8=",
|
"checksumSHA1": "dwOedwBJ1EIK9+S3t108Bx054Y8=",
|
||||||
"path": "golang.org/x/crypto/curve25519",
|
"path": "golang.org/x/crypto/curve25519",
|
||||||
|
|
Loading…
Reference in New Issue