From 865c570e419a212105dc808fc4d622e1942262b0 Mon Sep 17 00:00:00 2001 From: Kegan Myers Date: Wed, 15 Apr 2020 23:53:50 -0500 Subject: [PATCH] initial commit --- .gitignore | 2 + Jenkinsfile | 15 + actions.go | 7 + common_test.go | 248 +++++++++++++++++ constraint_elementsnotnull.go | 55 ++++ constraint_elementsnotnull_test.go | 13 + constraint_foreign.go | 122 +++++++++ constraint_notnoull_test.go | 18 ++ constraint_notnull.go | 52 ++++ constraint_unique.go | 45 +++ constraint_unique_test.go | 74 +++++ constraints.go | 49 ++++ db.go | 262 ++++++++++++++++++ db_test.go | 119 ++++++++ go.mod | 8 + go.sum | 216 +++++++++++++++ index_array.go | 175 ++++++++++++ index_array_test.go | 15 + index_simple.go | 175 ++++++++++++ indicies.go | 168 ++++++++++++ indicies_test.go | 169 ++++++++++++ internals.go | 221 +++++++++++++++ iteration.go | 27 ++ query.go | 180 ++++++++++++ query_test.go | 192 +++++++++++++ queryop.go | 158 +++++++++++ stringy/stringy.go | 149 ++++++++++ stringy/tostring_test.go | 99 +++++++ table.go | 426 +++++++++++++++++++++++++++++ test.proto | 19 ++ test.sh | 14 + transaction.go | 14 + 32 files changed, 3506 insertions(+) create mode 100644 .gitignore create mode 100644 Jenkinsfile create mode 100644 actions.go create mode 100644 common_test.go create mode 100644 constraint_elementsnotnull.go create mode 100644 constraint_elementsnotnull_test.go create mode 100644 constraint_foreign.go create mode 100644 constraint_notnoull_test.go create mode 100644 constraint_notnull.go create mode 100644 constraint_unique.go create mode 100644 constraint_unique_test.go create mode 100644 constraints.go create mode 100644 db.go create mode 100644 db_test.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 index_array.go create mode 100644 index_array_test.go create mode 100644 index_simple.go create mode 100644 indicies.go create mode 100644 indicies_test.go create mode 100644 internals.go create mode 100644 iteration.go create mode 100644 query.go create mode 100644 query_test.go create mode 100644 queryop.go create mode 100644 stringy/stringy.go create mode 100644 stringy/tostring_test.go create mode 100644 table.go create mode 100644 test.proto create mode 100644 test.sh create mode 100644 transaction.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fbfe564 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +proto_test.go +*.testdb diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..c13ec9a --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,15 @@ +pipeline { + agent any + stages { + stage('build') { + steps { + checkout scm + script { + docker.image("golang:1.14-alpine").inside { + sh './test.sh' + } + } + } + } + } +} diff --git a/actions.go b/actions.go new file mode 100644 index 0000000..5583a99 --- /dev/null +++ b/actions.go @@ -0,0 +1,7 @@ +package tdb + +import ( + bolt "go.etcd.io/bbolt" +) + +type BucketAction func(b *bolt.Bucket) error diff --git a/common_test.go b/common_test.go new file mode 100644 index 0000000..19a10c2 --- /dev/null +++ b/common_test.go @@ -0,0 +1,248 @@ +package tdb + +import ( + "errors" + "fmt" + "log" + "math/rand" + "os" + "reflect" + "strings" + "testing" + "time" + + // "encoding/ascii85" + // "log" + // "strconv" + + "git.keganmyers.com/terribleplan/tdb/stringy" + + bolt "go.etcd.io/bbolt" +) + +const debug = false + +type testDb struct { + db DB + TEST_Main Table + TEST_OwnedBy Table + TEST_ArrayHas Table +} + +const ( + letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + sb.WriteByte(letterBytes[idx]) + i-- + } + cache >>= letterIdxBits + remain-- + } + + return sb.String() +} + +var tdb *testDb +var testFilename string + +func setupCustomTestDb(tb interface{}, schema func(db DBSetup) error) DB { + if debug { + log.Print("- Creating test database") + } + + testFilename = fmt.Sprintf("%s_test.testdb", randomString(16)) + b, err := bolt.Open(testFilename, 0600, nil) + if err != nil { + panic(fmt.Errorf("Unable to create test DB '%s'", testFilename)) + } + + if db, err := New(b, tb, schema); err != nil { + panic(err) + } else { + return db + } +} + +func setupTestDb() { + tdb = &testDb{} + + db := setupCustomTestDb(tdb, func(db DBSetup) error { + db.SetDebug(debug) + db.AddTableOrPanic(&TEST_Main{}, func(t TableSetup) error { + return nil + }) + db.AddTableOrPanic(&TEST_OwnedBy{}, func(t TableSetup) error { + t.AddIndexOrPanic(SimpleIndexOptions{ + ConstraintOptions: ConstraintOptions{ + Field: "MainId", + Foreign: "TEST_Main", + NotNull: true, + }, + }) + return nil + }) + db.AddTableOrPanic(&TEST_ArrayHas{}, func(t TableSetup) error { + t.AddArrayIndexOrPanic(ArrayIndexOptions{ + ElementsNotNull: true, + ConstraintOptions: ConstraintOptions{ + Field: "MainIds", + Foreign: "TEST_Main", + NotNull: true, + }, + }) + return nil + }) + return nil + }) + + tdb.db = db + + if tdb.TEST_Main == nil { + panic(errors.New("tdb.TEST_Main was not set")) + } + + if tdb.TEST_OwnedBy == nil { + panic(errors.New("tdb.TEST_OwnedBy was not set")) + } +} + +func cleanupTestDb() { + if debug { + log.Print("- Closing test database") + } + tdb.db.Close() + err := os.Remove(testFilename) + testFilename = "" + if err != nil { + panic(err) + } + if debug { + log.Print("- Closed test database") + } + return +} + +func assertUint64Equal(t *testing.T, actual, expected uint64, message string) { + assertUint64EqualEnd(t, actual, expected, message) +} + +func assertUint64EqualEnd(t *testing.T, actual, expected uint64, message string) bool { + if actual != expected { + t.Errorf("%s: got %s, expected %s", message, stringy.ToStringOrPanic(actual), stringy.ToStringOrPanic(expected)) + return true + } + return false +} + +func assertEqual(t *testing.T, actual, expected interface{}, message string) { + assertEqualEnd(t, actual, expected, message) +} + +func assertEqualEnd(t *testing.T, actual, expected interface{}, message string) bool { + if actual != expected { + t.Errorf("%s: got %s, expected %s", message, stringy.ToStringOrPanic(actual), stringy.ToStringOrPanic(expected)) + return true + } + return false +} + +func assertNotEqual(t *testing.T, actual, expected interface{}, message string) { + assertNotEqualEnd(t, actual, expected, message) +} + +func assertNotEqualEnd(t *testing.T, actual, expected interface{}, message string) bool { + if actual == expected { + t.Errorf("%s: got %s, expected anything else", message, stringy.ToStringOrPanic(actual)) + return true + } + return false +} + +func assertNotNil(t *testing.T, actual interface{}, message string) { + assertNotNilEnd(t, actual, message) +} + +func assertNotNilEnd(t *testing.T, actual interface{}, message string) bool { + if actual == nil { + t.Errorf("%s: got %#v, expected non-nil", message, actual) + return true + } + return false +} + +func assertNil(t *testing.T, actual interface{}, message string) { + assertNilEnd(t, actual, message) +} + +// typed nil is stupid +func assertNilEnd(t *testing.T, actual interface{}, message string) bool { + if actual != nil { + av := reflect.ValueOf(actual) + if !av.IsValid() { + return false + } + if !av.IsNil() || !av.IsZero() { + t.Errorf("%s: got %#v, expected nil", message, actual) + return true + } + } + return false +} + +func assertOk(t *testing.T, actual bool, message string) { + assertOkEnd(t, actual, message) +} + +func assertOkEnd(t *testing.T, actual bool, message string) bool { + if !actual { + t.Errorf("%s: expected to be ok", message) + return true + } + return false +} + +func dumpBuckets(db *bolt.DB) { + fmt.Printf("\n----------\nbuckets:\n\n") + if err := db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + fmt.Printf("%s\n", name) + return nil + }) + }); err != nil { + fmt.Printf("\nerror while dumping buckets: %s", err.Error()) + } +} + +func dumpKeyspace(db *bolt.DB, bucket string) { + fmt.Printf("\n----------\nkeys in '%s':\n\n", bucket) + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(bucket)) + if b == nil { + fmt.Printf(" (nil bucket)") + return nil + } + + c := b.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + fmt.Printf("%s\n", k) + } + return nil + }); err != nil { + fmt.Printf("\nerror while dumping keys: %s", err.Error()) + } +} diff --git a/constraint_elementsnotnull.go b/constraint_elementsnotnull.go new file mode 100644 index 0000000..b62d7d9 --- /dev/null +++ b/constraint_elementsnotnull.go @@ -0,0 +1,55 @@ +package tdb + +import ( + "errors" + "fmt" +) + +type elementsNotNullConstraint struct { + table *table + field dbField +} + +func newElementsNotNullConstraint(table *table, field dbField) (constraintish, error) { + if table == nil { + return nil, errors.New("[constraint.elementsnotnull] unable to create not-null without table") + } + + if !field.IsSliceish() { + return nil, errors.New("[constraint.elementsnotnull] field is not an array or slice") + } + + return &elementsNotNullConstraint{ + table: table, + field: field, + }, nil +} + +// func (c *notNullConstraint) validateRaw(tx *Tx, foreignKeys [][]byte) error { +// for _, foreignKey := range foreignKeys { +// for _, nullishValue := range nullishValues { +// if bytes.Equal(nullishValue, foreignKey) { +// c.table.debugLogf("[constraint.elementsnotnull.validateRaw] violation: '%s'.'%s' is null-ish value '%s'", c.table.name, c.field.Name, nullishValue) +// return fmt.Errorf("[constraint.elementsnotnull.validateRaw] violation: '%s'.'%s' is null-ish value '%s'", c.table.name, c.field.Name, nullishValue) +// } +// } +// } +// return nil +// } + +func (c *elementsNotNullConstraint) validate(tx *Tx, pv dbPtrValue) error { + c.table.debugLogf("[constraint.elementsnotnull.validate] validating not-null for '%s'.'%s'", c.table.name, c.field.Name) + val := pv.dangerous_Field(c.field) + if val.IsZero() { + c.table.debugLogf("[constraint.elementsnotnull.validate] '%s'.'%s' is zero value, not validating elements", c.table.name, c.field.Name) + return nil + } + lenVal := val.Len() + for i := 0; i < lenVal; i++ { + if val.Index(i).IsZero() { + c.table.debugLogf("[constraint.elementsnotnull.validate] violation: '%s'.'%s' contains zero value at index %d", c.table.name, c.field.Name, i) + return fmt.Errorf("[constraint.elementsnotnull.validate] violation: '%s'.'%s' contains zero value at index %d", c.table.name, c.field.Name, i) + } + } + return nil +} diff --git a/constraint_elementsnotnull_test.go b/constraint_elementsnotnull_test.go new file mode 100644 index 0000000..24e0fea --- /dev/null +++ b/constraint_elementsnotnull_test.go @@ -0,0 +1,13 @@ +package tdb + +import ( + "testing" +) + +func TestElementsNotNullConstraint(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + _, err := tdb.TEST_ArrayHas.Create(&TEST_ArrayHas{MainIds: []uint64{0}}) + assertNotNil(t, err, "Expected error while inserting nil value in array") +} diff --git a/constraint_foreign.go b/constraint_foreign.go new file mode 100644 index 0000000..ada952c --- /dev/null +++ b/constraint_foreign.go @@ -0,0 +1,122 @@ +package tdb + +import ( + "errors" + "fmt" + + "git.keganmyers.com/terribleplan/tdb/stringy" + + bolt "go.etcd.io/bbolt" +) + +type foreignConstraint struct { + domestic *table + foreign *table + field dbField + index indexish +} +type foreignSimpleConstraint foreignConstraint +type foreignArrayConstraint foreignConstraint + +func validateForeignRaw(b *bolt.Bucket, foreignKey []byte) ConstraintValidationStatus { + if b.Get(foreignKey) == nil { + return ConstraintViolation + } + return ConstraintValidated +} + +func newSimpleForeignConstraint(domestic *table, foreign string, field dbField, index indexish) (constraintish, error) { + if domestic == nil { + return nil, errors.New("[constraint] [foreign] unable to create: no domestic table") + } + + if !field.IsUint64() { + return nil, fmt.Errorf("[constraint] [foreign] unable to create: '%s'.'%s' is not a uint64", domestic.name, field.Name) + } + + if foreign == "" { + return nil, errors.New("[constraint] [foreign] unable to create: no foreign table") + } + foreignTable, ok := domestic.db.tables[foreign] + if !ok { + return nil, fmt.Errorf("[constraint] [foreign] unable to create: no such table '%s'", foreign) + } + + if index == nil { + domestic.debugLogf("[constraint] [foreign] warning: creating constraint on '%s'.'%s' without index. will not check when foreign records are removed (to avoid table scan)", domestic.name, field.Name) + } + + return &foreignSimpleConstraint{ + domestic: domestic, + foreign: foreignTable, + field: field, + index: index, + }, nil +} + +func (c *foreignSimpleConstraint) validate(tx *Tx, pv dbPtrValue) error { // foreign keys must all be uint64, those are the only supported primary keys + foreignId := pv.dangerous_Field(c.field).Uint() + + // the foreign constraint is not responsible for enforcing nullability + if foreignId == 0 { + return nil + } + + foreignKey := []byte(stringy.LiteralUintToString(foreignId)) + if validateForeignRaw(c.foreign.bucket(tx), foreignKey) == ConstraintViolation { + c.domestic.debugLogf("[constraint] [foreign] violation: '%s' with Id '%s' does not exist", c.foreign.name, foreignKey) + return fmt.Errorf("[constraint] [foreign] violation: '%s' with Id '%s' does not exist", c.foreign.name, foreignKey) + } + return nil +} + +func newArrayForeignConstraint(domestic *table, foreign string, field dbField, index indexish) (constraintish, error) { + if domestic == nil { + return nil, errors.New("[constraint] [foreign] unable to create: no domestic table") + } + + if !field.IsUint64Slice() { + return nil, fmt.Errorf("[constraint] [foreign] unable to create: '%s'.'%s' is not a uint64 array", domestic.name, field.Name) + } + + if foreign == "" { + return nil, errors.New("[constraint] [foreign] unable to create: no foreign table") + } + foreignTable, ok := domestic.db.tables[foreign] + if !ok { + return nil, fmt.Errorf("[constraint] [foreign] unable to create: no such table '%s'", foreign) + } + + if index == nil { + domestic.debugLogf("[constraint] [foreign] warning: creating constraint on '%s'.'%s' without index. will not check when foreign records are removed (to avoid table scan)", domestic.name, field.Name) + } + + return &foreignArrayConstraint{ + domestic: domestic, + foreign: foreignTable, + field: field, + index: index, + }, nil +} + +func (c *foreignArrayConstraint) validate(tx *Tx, pv dbPtrValue) error { // foreign keys must all be uint64, those are the only supported primary keys + foreignIds := pv.dangerous_Field(c.field) + + foreignIdsLen := foreignIds.Len() + for i := 0; i < foreignIdsLen; i++ { + foreignId := foreignIds.Index(i).Uint() + // the foreign constraint is not responsible for enforcing nullability + if foreignId == 0 { + continue + } + + foreignKey := []byte(stringy.LiteralUintToString(foreignId)) + if validateForeignRaw(c.foreign.bucket(tx), foreignKey) == ConstraintViolation { + c.domestic.debugLogf("[constraint] [foreign] violation: '%s' with Id '%s' does not exist", c.foreign.name, foreignKey) + return fmt.Errorf("[constraint] [foreign] violation: '%s' with Id '%s' does not exist", c.foreign.name, foreignKey) + } + } + + return nil + +} diff --git a/constraint_notnoull_test.go b/constraint_notnoull_test.go new file mode 100644 index 0000000..c21b31b --- /dev/null +++ b/constraint_notnoull_test.go @@ -0,0 +1,18 @@ +package tdb + +import ( + "testing" +) + +func TestNotNullConstraint(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + id, err := tdb.TEST_OwnedBy.Create(&TEST_OwnedBy{}) + + if assertNotNilEnd(t, err, "Expected constraint error") { + return + } + + assertEqual(t, id, uint64(0), "Expected 0 (zero value) for inserted id") +} diff --git a/constraint_notnull.go b/constraint_notnull.go new file mode 100644 index 0000000..0950332 --- /dev/null +++ b/constraint_notnull.go @@ -0,0 +1,52 @@ +package tdb + +import ( + "bytes" + "errors" + "fmt" + + "git.keganmyers.com/terribleplan/tdb/stringy" +) + +var nullishValues [][]byte = [][]byte{ + []byte(stringy.LiteralUintToString(uint64(0))), + []byte(stringy.LiteralIntToString(int64(0))), + []byte(""), + []byte("nil"), + []byte(""), +} + +type notNullConstraint struct { + table *table + field dbField +} + +func newNotNullConstraint(table *table, field dbField) (constraintish, error) { + if table == nil { + return nil, errors.New("[constraint.notnull] unable to create not-null without table") + } + + return ¬NullConstraint{ + table: table, + field: field, + }, nil +} + +func (c *notNullConstraint) validateRaw(tx *Tx, foreignKey []byte) error { + for _, nullishValue := range nullishValues { + if bytes.Equal(nullishValue, foreignKey) { + c.table.debugLogf("[constraint.notnull.validateRaw] violation: '%s'.'%s' is null-ish value '%s'", c.table.name, c.field.Name, nullishValue) + return fmt.Errorf("[constraint.notnull.validateRaw] violation: '%s'.'%s' is null-ish value '%s'", c.table.name, c.field.Name, nullishValue) + } + } + return nil +} + +func (c *notNullConstraint) validate(tx *Tx, pv dbPtrValue) error { + c.table.debugLogf("[constraint.notnull.validate] validating not-null for '%s'.'%s'", c.table.name, c.field.Name) + if pv.dangerous_Field(c.field).IsZero() { + c.table.debugLogf("[constraint.notnull.validate] violation: '%s'.'%s' is zero value", c.table.name, c.field.Name) + return fmt.Errorf("[constraint.notnull.validate] violation: '%s'.'%s' is zero value", c.table.name, c.field.Name) + } + return nil +} diff --git a/constraint_unique.go b/constraint_unique.go new file mode 100644 index 0000000..7790963 --- /dev/null +++ b/constraint_unique.go @@ -0,0 +1,45 @@ +package tdb + +import ( + "bytes" + "errors" + "fmt" +) + +type uniqueConstraint struct { + table *table + field dbField + index indexish +} + +func newUniqueConstraint(table *table, index indexish, field dbField) (constraintish, error) { + if table == nil { + return nil, errors.New("[constraint] [unique] unable to create without table") + } + if index == nil { + return nil, fmt.Errorf("[constraint] [unique] is only valid for indicies (to avoid full table scans)") + } + + return &uniqueConstraint{ + table: table, + field: field, + index: index, + }, nil +} + +func (c *uniqueConstraint) validate(tx *Tx, pv dbPtrValue) error { + indexedVals := c.index.indexedValues(pv) + keyValue := c.index.keyValue(pv) + for _, indexedVal := range indexedVals { + if err := c.index.iteratePrefixed(tx, indexedVal, func(indexed []byte) (IterationSignal, error) { + if !bytes.Equal(keyValue, indexed) { + c.table.debugLogf("[constraint] [unique] violation: record with '%s'.'%s' = '%s' already exists (id: %s)", c.table.name, c.field.Name, indexedVal, indexed) + return StopIteration, fmt.Errorf("[constraint] [unique] violation for field '%s'.'%s'", c.table.name, c.field.Name) + } + return StopIteration, nil + }); err != nil { + return err + } + } + return nil +} diff --git a/constraint_unique_test.go b/constraint_unique_test.go new file mode 100644 index 0000000..0ee8be1 --- /dev/null +++ b/constraint_unique_test.go @@ -0,0 +1,74 @@ +package tdb + +import ( + "errors" + "testing" +) + +func setupUniqueTestDb() { + tdb = &testDb{} + + db := setupCustomTestDb(tdb, func(db DBSetup) error { + db.SetDebug(debug) + db.AddTableOrPanic(&TEST_Main{}, func(t TableSetup) error { + t.AddIndexOrPanic(SimpleIndexOptions{ + ConstraintOptions: ConstraintOptions{ + Field: "Guarantee", + }, + Unique: true, + }) + return nil + }) + return nil + }) + + tdb.db = db + + if tdb.TEST_Main == nil { + panic(errors.New("tdb.TEST_Main was not set")) + } +} + +func TestUniqueConstraint(t *testing.T) { + setupUniqueTestDb() + defer cleanupTestDb() + + _, err := tdb.TEST_Main.Create(&TEST_Main{Guarantee: "asdf"}) + + if assertNilEnd(t, err, "Unable to insert first (unique) record") { + return + } + + id, err := tdb.TEST_Main.Create(&TEST_Main{Guarantee: "asdf"}) + + if assertNotNilEnd(t, err, "Expected constraint error") { + return + } + + assertEqual(t, id, uint64(0), "Expected 0 (zero value) for inserted id") +} + +func TestEmptyStringUniqueConstraint(t *testing.T) { + setupUniqueTestDb() + defer cleanupTestDb() + + _, err := tdb.TEST_Main.Create(&TEST_Main{Guarantee: "0"}) + + if assertNilEnd(t, err, "Unable to insert non-null record") { + return + } + + _, err = tdb.TEST_Main.Create(&TEST_Main{Guarantee: ""}) + + if assertNilEnd(t, err, "Unable to insert second (unique test) record") { + return + } + + id, err := tdb.TEST_Main.Create(&TEST_Main{Guarantee: ""}) + + if assertNotNilEnd(t, err, "Expected constraint error") { + return + } + + assertEqual(t, id, uint64(0), "Expected 0 (zero value) for inserted id") +} diff --git a/constraints.go b/constraints.go new file mode 100644 index 0000000..c8a932c --- /dev/null +++ b/constraints.go @@ -0,0 +1,49 @@ +package tdb + +import ( + "errors" + "fmt" + "strings" +) + +var ( + ConstraintValidated ConstraintValidationStatus = true + ConstraintViolation ConstraintValidationStatus = false +) + +type ConstraintValidationStatus bool + +type ConstraintOptions struct { + Table string + Field string + Foreign string + NotNull bool +} + +type constraintish interface { + validate(tx *Tx, val dbPtrValue) error +} + +type constraints []constraintish + +func (c constraints) validate(tx *Tx, val dbPtrValue) error { + errs := make([]error, 0) + for _, constraint := range c { + if err := constraint.validate(tx, val); err != nil { + errs = append(errs, err) + } + } + + errLen := len(errs) + if errLen == 0 { + return nil + } + + sb := &strings.Builder{} + sb.WriteString(fmt.Sprintf("%d constraint violation errors:", errLen)) + for _, e := range errs { + sb.WriteString("\n") + sb.WriteString(e.Error()) + } + return errors.New(sb.String()) +} diff --git a/db.go b/db.go new file mode 100644 index 0000000..c83e650 --- /dev/null +++ b/db.go @@ -0,0 +1,262 @@ +// Package tdb provides a terrible database built on top of bolt. +// It does all sorts of too-smart things with reflection that will +// either be great and make your life easier, or suck and you just +// shouldn't use this package. +package tdb + +import ( + "fmt" + "log" + "reflect" + + "github.com/golang/protobuf/proto" + bolt "go.etcd.io/bbolt" +) + +const logPrefix = "[tdb] " + +type debugLogger interface { + debugLog(message string) + debugLogf(f string, args ...interface{}) +} + +type DB interface { + debugLogger + Transactable + Close() error + GetTable(name string) (Table, error) + GetTableOrPanic(name string) Table +} + +type DBSetup interface { + debugLogger + AddTable(thing proto.Message, createSchema CreateTableSchema) error + AddTableOrPanic(thing proto.Message, createSchema CreateTableSchema) + AddIndex(options SimpleIndexOptions) error + AddIndexOrPanic(options SimpleIndexOptions) + SetDebug(enabled bool) +} + +type CreateDBSchema func(DBSetup) error + +type db struct { + ready bool + closed bool + debug bool + b *bolt.DB + tables map[string]*table +} + +func New(b *bolt.DB, tableBucket interface{}, createSchema CreateDBSchema) (DB, error) { + tdb := &db{ + b: b, + tables: make(map[string]*table), + } + err := createSchema(tdb) + if err != nil { + return nil, err + } + tdb.debugLog("Schema creation completed successfuly, initializing...") + err = tdb.b.Update(func(tx *bolt.Tx) error { + return tdb.initialize(convertTx(tx)) + }) + if err != nil { + return nil, err + } + + tdb.debugLog("Initialization complete, populating table bucket...") + tdb.populateTableBucket(tableBucket) + + tdb.debugLog("Setup of new tdb complete... returning") + return tdb, err +} + +func NewOrPanic(b *bolt.DB, tableBucket interface{}, createSchema CreateDBSchema) DB { + tdb, err := New(b, tableBucket, createSchema) + if err != nil { + panic(err) + } + return tdb +} + +func (db *db) Close() error { + db.closed = true + return db.b.Close() +} + +func (db *db) populateTableBucket(tableBucket interface{}) { + if tableBucket == nil { + db.debugLog("[populate] no table bucket") + return + } + + bucketPtrVal := reflect.ValueOf(tableBucket) + + if bucketPtrVal.Kind() != reflect.Ptr { + db.debugLog("[populate] tableBucket is not a pointer") + return + } + + bucketVal := bucketPtrVal.Elem() + + if bucketVal.Kind() != reflect.Struct { + db.debugLog("[populate] tableBucket is not a ptr to a struct") + return + } + + tableBucketType := bucketVal.Type() + fieldCount := tableBucketType.NumField() + for i := 0; i < fieldCount; i++ { + db.populateField(tableBucketType.Field(i), bucketVal) + } +} + +func (db *db) populateField(field reflect.StructField, bucketVal reflect.Value) { + table, ok := db.tables[field.Name] + if !ok { + db.debugLogf("[populate] no such table '%s'", field.Name) + return + } + + tableType := reflect.TypeOf((*Table)(nil)).Elem() + if field.Type != tableType { + db.debugLogf("[populate] wrong types for '%s', got '%s', expected '%s'", field.Name, field.Type.String(), tableType.String()) + return + } + // maybe check CanSet()? + bucketValField := bucketVal.FieldByName(field.Name) + if !bucketValField.CanSet() { + db.debugLogf("[populate] cannot set field '%s'", field.Name) + return + } + + db.debugLogf("[populate] set field '%s'", field.Name) + bucketValField.Set(reflect.ValueOf(table)) +} + +func (db *db) SetDebug(debug bool) { + db.debug = debug +} + +func (db *db) AddTable(thing proto.Message, createSchema CreateTableSchema) error { + t := dbTypeOf(thing) + db.debugLogf("AddTable invoked for type %s", t.Name) + + if _, has := db.tables[t.Name]; has { + return fmt.Errorf("Database already has table with name '%s'", t.Name) + } + + idField := t.IdField() + + table, err := newTable(db, t, idField, createSchema) + if err != nil { + return err + } + db.debugLogf("Table schema creation for '%s' completed successfuly", t.Name) + db.tables[t.Name] = table + return nil +} + +func (db *db) AddTableOrPanic(thing proto.Message, createSchema CreateTableSchema) { + if err := db.AddTable(thing, createSchema); err != nil { + panic(err) + } +} + +func (db *db) AddIndex(options SimpleIndexOptions) error { + table, ok := db.tables[options.Table] + if !ok { + return fmt.Errorf("No such table '%s'", options.Table) + } + return table.AddIndex(options) +} + +func (db *db) AddIndexOrPanic(options SimpleIndexOptions) { + if err := db.AddIndex(options); err != nil { + panic(err) + } +} + +func (db *db) debugLog(message string) { + if db.debug { + log.Print(logPrefix + message) + } +} + +func (db *db) debugLogf(f string, args ...interface{}) { + if db.debug { + log.Printf(logPrefix + fmt.Sprintf(f, args...)) + } +} + +func (db *db) initialize(tx *Tx) error { + err := db.initializeTables(tx) + if err != nil { + return err + } + db.debugLog("Initialization complete") + return nil +} + +func (db *db) initializeTables(tx *Tx) error { + for name, table := range db.tables { + db.debugLogf("Initializating table '%s'...", name) + err := table.initialize(tx) + if err != nil { + return err + } + db.debugLogf("Initialized table '%s'", name) + } + return nil +} + +func (db *db) GetTable(name string) (Table, error) { + table, ok := db.tables[name] + if !ok { + return nil, fmt.Errorf("No such table '%s'", name) + } + return table, nil +} + +func (db *db) GetTableOrPanic(name string) Table { + table, err := db.GetTable(name) + if err != nil { + panic(err) + } + return table +} + +func (db *db) ReadTx(t Transaction) error { + return db.b.View(func(tx *bolt.Tx) error { + return t(convertTx(tx)) + }) +} + +func (db *db) readTxHelper(t Transaction, txs ...*Tx) error { + txlen := len(txs) + if txlen > 1 { + db.debugLogf("[db.readTxHelper] Got %d transactions, can only handle 1.", txlen) + return fmt.Errorf("Got %d transactions, can only handle 1.", txlen) + } else if txlen == 1 { + db.debugLogf("[db.readTxHelper] Found existing transaction: %#v", txs) + return t(txs[0]) + } + return db.ReadTx(t) +} + +func (db *db) WriteTx(t Transaction) error { + return db.b.Update(func(tx *bolt.Tx) error { + return t(convertTx(tx)) + }) +} + +func (db *db) writeTxHelper(t Transaction, txs ...*Tx) error { + txlen := len(txs) + if txlen > 1 { + db.debugLogf("[db.readTxHelper] Got %d transactions, can only handle 1.", txlen) + return fmt.Errorf("Got %d transactions, can only handle 1.", txlen) + } else if txlen == 1 { + return t(txs[0]) + } + return db.WriteTx(t) +} diff --git a/db_test.go b/db_test.go new file mode 100644 index 0000000..846acc5 --- /dev/null +++ b/db_test.go @@ -0,0 +1,119 @@ +package tdb + +import ( + "errors" + "testing" + + // "encoding/ascii85" + // "log" + // "reflect" + // "strconv" + // "git.keganmyers.com/terribleplan/tdb/stringy" + // bolt "go.etcd.io/bbolt" + + "github.com/golang/protobuf/proto" +) + +func TestInvariants(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + if tdb == nil { + t.Error("DB is nil") + } + + if tdb.TEST_Main == nil { + t.Error("TEST_Main is nil") + } + + if tdb.TEST_OwnedBy == nil { + t.Error("TEST_OwnedBy is nil") + } +} + +func TestGet(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + guarantee := randomString(16) + id := tdb.TEST_Main.CreateOrPanic(&TEST_Main{Guarantee: guarantee}) + if assertNotEqualEnd(t, id, uint64(0), "Invalid inserted ID") { + return + } + + item, err := tdb.TEST_Main.Get(id) + if assertNilEnd(t, err, "Unable to get record") { + return + } + + if assertNotNilEnd(t, item, "Invalid result") { + return + } + + tmi, ok := item.(*TEST_Main) + if assertOkEnd(t, ok, "Unable to cast returned to *TEST_Main") { + return + } + + assertEqual(t, tmi.Guarantee, guarantee, "Mismatched guarantee strings") +} + +func TestGetNil(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + item, err := tdb.TEST_Main.Get(1) + if err != nil { + t.Errorf("WAT") + } + if assertNilEnd(t, err, "Unable to get record") { + return + } + + assertNil(t, item, "Invalid result") +} + +func TestUpdateAndGet(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + guarantee := randomString(16) + id := tdb.TEST_Main.CreateOrPanic(&TEST_Main{Guarantee: guarantee}) + + if assertNilEnd(t, tdb.TEST_Main.Update(id, func(item proto.Message) error { + if assertNotNilEnd(t, item, "Invalid result") { + return errors.New("invoked with nil") + } + + tmi, ok := item.(*TEST_Main) + if assertOkEnd(t, ok, "Unable to cast returned to *TEST_Main") { + return errors.New("bad type/cast") + } + + if assertEqualEnd(t, tmi.Guarantee, guarantee, "Mismatched guarantee strings") { + return errors.New("bad guarantee") + } + + guarantee = randomString(16) + tmi.Guarantee = guarantee + return nil + }), "Unable to update record") { + return + } + + item, err := tdb.TEST_Main.Get(id) + if assertNilEnd(t, err, "Unable to get record") { + return + } + + if assertNotNilEnd(t, item, "Invalid result") { + return + } + + tmi, ok := item.(*TEST_Main) + if assertOkEnd(t, ok, "Unable to cast returned to *TEST_Main") { + return + } + + assertEqual(t, tmi.Guarantee, guarantee, "Mismatched guarantee strings") +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..6765d12 --- /dev/null +++ b/go.mod @@ -0,0 +1,8 @@ +module git.keganmyers.com/terribleplan/tdb + +go 1.12 + +require ( + github.com/golang/protobuf v1.3.2 + go.etcd.io/bbolt v1.3.3 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..9a089da --- /dev/null +++ b/go.sum @@ -0,0 +1,216 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Joker/hpp v0.0.0-20180418125244-6893e659854a/go.mod h1:MzD2WMdSxvbHw5fM/OXOFily/lipJWRc9C1px0Mt0ZE= +github.com/Joker/hpp v1.0.0 h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.0 h1:lOCEPvTAtWfLpSZYMOv/g44MGQFAolbKh2khHHGu0Kc= +github.com/Joker/jade v1.0.0/go.mod h1:efZIdO0py/LtcJRSa/j2WEklMSAw84WV0zZVMxNToB8= +github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/appleboy/gofight/v2 v2.1.1/go.mod h1:6E7pthKhmwss84j/zEixBNim8Q6ahhHcYOtmW5ts5vA= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/astaxie/beego v1.11.1/go.mod h1:i69hVzgauOPSw5qeyF4GVZhn7Od0yG5bbCGzmhbWxgQ= +github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ= +github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU= +github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff/go.mod h1:PhH1ZhyCzHKt4uAasyx+ljRCgoezetRNf59CUtwUkqY= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= +github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE= +github.com/casbin/casbin/v2 v2.0.0/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= +github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= +github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ= +github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg= +github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= +github.com/labstack/echo-contrib v0.7.0 h1:Zc7AjwtvjR4dlqflNJb0sawX1Y7YjcZi7LvUGI3bz5o= +github.com/labstack/echo-contrib v0.7.0/go.mod h1:tEGgUvjB2p2eJAvI05bxsZwQ084O0xHCR3oVXYc+ltg= +github.com/labstack/echo/v4 v4.1.6/go.mod h1:kU/7PwzgNxZH4das4XNsSpBSOD09XIF5YEPzjpkGnGE= +github.com/labstack/echo/v4 v4.1.11 h1:z0BZoArY4FqdpUEl+wlHp4hnr/oSR6MTmQmv8OHSoww= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/gommon v0.2.8/go.mod h1:/tj9csK2iPSBvn+3NLM9e52usepMtrd5ilFYA+wQNJ4= +github.com/labstack/gommon v0.2.9/go.mod h1:E8ZTmW9vw5az5/ZyHWCp0Lw4OH2ecsaBP1C/NKavGG4= +github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= +github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg= +github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= +github.com/uber/jaeger-client-go v2.19.1-0.20191002155754-0be28c34dabf+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/wellington/go-libsass v0.9.2 h1:6Ims04UDdBs6/CGSVK5JC8FNikR5ssrsMMKE/uaO5Q8= +github.com/wellington/go-libsass v0.9.2/go.mod h1:mxgxgam0N0E+NAUMHLcu20Ccfc3mVpDkyrLDayqfiTs= +github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190607181551-461777fb6f67/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190609082536-301114b31cce/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190608022120-eacb66d2a7c3/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a h1:mEQZbbaBjWyLNy0tmZmgEuQAR8XOQ3hL8GYi3J/NG64= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20191116214431-80313e1ba718 h1:cWviR33VVbwok1/RNvFm9XHNcdJCsaSocBflkEXrIdo= +golang.org/x/tools v0.0.0-20191116214431-80313e1ba718/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/index_array.go b/index_array.go new file mode 100644 index 0000000..3975368 --- /dev/null +++ b/index_array.go @@ -0,0 +1,175 @@ +package tdb + +import ( + "bytes" + "fmt" + + "git.keganmyers.com/terribleplan/tdb/stringy" + + bolt "go.etcd.io/bbolt" +) + +type ArrayIndexOptions struct { + ConstraintOptions + ElementsNotNull bool +} + +type arrayIndex struct { + table *table + bucketName []byte + field dbField + idField dbField + options ArrayIndexOptions + constraints constraints +} + +func newArrayIndex(table *table, options ArrayIndexOptions) (*arrayIndex, error) { + field := table.t.NamedField(options.Field) + + index := &arrayIndex{ + table: table, + bucketName: []byte(fmt.Sprintf("i@%s.%s", table.name, options.Field)), + field: field, + idField: table.idField, + options: options, + } + + constraints := make([]constraintish, 0) + + if options.Foreign != "" { + if c, err := newArrayForeignConstraint(table, options.Foreign, field, index); err != nil { + return nil, err + } else { + constraints = append(constraints, c) + } + } + + if options.NotNull { + if c, err := newNotNullConstraint(table, field); err != nil { + return nil, err + } else { + constraints = append(constraints, c) + } + } + + if options.ElementsNotNull { + if c, err := newElementsNotNullConstraint(table, field); err != nil { + return nil, err + } else { + constraints = append(constraints, c) + } + } + + index.constraints = constraints + + return index, nil +} + +func (i *arrayIndex) debugLog(message string) { + i.table.debugLog(message) +} + +func (i *arrayIndex) debugLogf(f string, args ...interface{}) { + i.table.debugLogf(f, args...) +} + +func (i *arrayIndex) bucket(tx *Tx) *bolt.Bucket { + return tx.tx().Bucket(i.bucketName) +} + +func (i *arrayIndex) count(tx *Tx) int { + return i.bucket(tx).Stats().KeyN +} + +func (i *arrayIndex) indexedValues(pv dbPtrValue) [][]byte { + vals := pv.dangerous_Field(i.field).Interface().([]uint64) + strs := make([][]byte, len(vals)) + for i, val := range vals { + strs[i] = []byte(stringy.LiteralUintToString(val)) + } + return strs +} + +func (i *arrayIndex) keyValue(pv dbPtrValue) []byte { + return []byte(stringy.ValToStringOrPanic(pv.dangerous_Field(i.idField))) +} + +func (i *arrayIndex) indexKeys(pv dbPtrValue) [][]byte { + return indexishKeys(i, pv) +} + +func (index *arrayIndex) initialize(tx *Tx) error { + _, err := tx.tx().CreateBucketIfNotExists(index.bucketName) + return err +} + +func (i *arrayIndex) put(tx *Tx, newVal dbPtrValue) { + i.debugLogf("[arrayIndex.put] Putting index '%s' for '%s'", i.field.Name, i.table.name) + i.putRaw(tx, i.indexKeys(newVal)) +} + +func (i *arrayIndex) putRaw(tx *Tx, writes [][]byte) { + indexishPutRaw(i, tx, writes) +} + +func (i *arrayIndex) delete(tx *Tx, oldVal dbPtrValue) { + i.debugLogf("[arrayIndex.delete] Deleting index '%s' for '%s'", i.field.Name, i.table.name) + i.deleteRaw(tx, i.indexKeys(oldVal)) +} + +func (i *arrayIndex) deleteRaw(tx *Tx, deletes [][]byte) { + indexishDeleteRaw(i, tx, deletes) +} + +func (i *arrayIndex) update(tx *Tx, oldVal, newVal dbPtrValue) { + i.debugLogf("[arrayIndex.update] Updating index '%s' for '%s'", i.field.Name, i.table.name) + shouldUpdate, _, _, writes, deletes := i.shouldUpdate(tx, oldVal, newVal) + if !shouldUpdate { + return + } + + i.updateRaw(tx, writes, deletes) +} + +func (i *arrayIndex) updateRaw(tx *Tx, writes, deletes [][]byte) { + indexishUpdateRaw(i, tx, writes, deletes) +} + +func (i *arrayIndex) shouldUpdate(tx *Tx, oldVal, newVal dbPtrValue) (bool, [][]byte, [][]byte, [][]byte, [][]byte) { + return indexishShouldUpdate(i, oldVal, newVal) +} + +func (i *arrayIndex) validate(tx *Tx, val dbPtrValue) error { + return i.constraints.validate(tx, val) +} + +func (i *arrayIndex) iteratePrefixed(tx *Tx, prefix []byte, ki KeyIterator) error { + pb := &bytes.Buffer{} + pb.Write(prefix) + pb.Write(IndexKeySeparator) + + i.debugLogf("[index.iteratePrefixed] seeking prefix '%s'", pb.Bytes()) + + c := i.bucket(tx).Cursor() + for k, _ := c.Seek(pb.Bytes()); k != nil; k, _ = c.Next() { + parts := bytes.Split(k, IndexKeySeparator) + lenParts := len(parts) + if lenParts != 2 { + i.debugLogf("[index.iteratePrefixed] iterating prefix '%s', got %d parts from key '%s'", prefix, lenParts, k) + return fmt.Errorf("[index.iteratePrefixed] Invalid index key for '%s'.'%s': %s", i.table.name, i.field.Name, k) + } + + if !bytes.Equal(prefix, parts[0]) { + break + } + + signal, err := ki(parts[1]) + if err != nil { + return err + } + if signal == StopIteration { + break + } + } + return nil +} diff --git a/index_array_test.go b/index_array_test.go new file mode 100644 index 0000000..94bc504 --- /dev/null +++ b/index_array_test.go @@ -0,0 +1,15 @@ +package tdb + +import ( + "testing" +) + +func TestArrayIndexedInsert(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + mid1 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + mid2 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + + tdb.TEST_ArrayHas.CreateOrPanic(&TEST_ArrayHas{MainIds: []uint64{mid1, mid2}}) +} diff --git a/index_simple.go b/index_simple.go new file mode 100644 index 0000000..825c06e --- /dev/null +++ b/index_simple.go @@ -0,0 +1,175 @@ +package tdb + +import ( + "bytes" + "fmt" + + "git.keganmyers.com/terribleplan/tdb/stringy" + + bolt "go.etcd.io/bbolt" +) + +type SimpleIndexOptions struct { + ConstraintOptions + Unique bool +} + +type simpleIndex struct { + table *table + bucketName []byte + field dbField + idField dbField + options SimpleIndexOptions + constraints constraints +} + +func newSimpleIndex(table *table, options SimpleIndexOptions) (*simpleIndex, error) { + field := table.t.NamedField(options.Field) + + index := &simpleIndex{ + table: table, + bucketName: []byte(fmt.Sprintf("i@%s.%s", table.name, options.Field)), + field: field, + idField: table.idField, + options: options, + } + + constraints := make([]constraintish, 0) + + if options.Foreign != "" { + if c, err := newSimpleForeignConstraint(table, options.Foreign, field, index); err != nil { + return nil, err + } else { + constraints = append(constraints, c) + } + } + + if options.Unique { + if c, err := newUniqueConstraint(table, index, field); err != nil { + return nil, err + } else { + constraints = append(constraints, c) + } + } + + if options.NotNull { + if c, err := newNotNullConstraint(table, field); err != nil { + return nil, err + } else { + constraints = append(constraints, c) + } + } + + index.constraints = constraints + + return index, nil +} + +func (i *simpleIndex) debugLog(message string) { + i.table.debugLog(message) +} + +func (i *simpleIndex) debugLogf(f string, args ...interface{}) { + i.table.debugLogf(f, args...) +} + +func (i *simpleIndex) bucket(tx *Tx) *bolt.Bucket { + return tx.tx().Bucket(i.bucketName) +} + +func (i *simpleIndex) count(tx *Tx) int { + return i.bucket(tx).Stats().KeyN +} + +func (i *simpleIndex) indexedValues(pv dbPtrValue) [][]byte { + return [][]byte{[]byte(stringy.ValToStringOrPanic(pv.dangerous_Field(i.field)))} +} + +func (i *simpleIndex) keyValue(pv dbPtrValue) []byte { + return []byte(stringy.ValToStringOrPanic(pv.dangerous_Field(i.idField))) +} + +func (i *simpleIndex) indexKeys(pv dbPtrValue) [][]byte { + return indexishKeys(i, pv) +} + +func (index *simpleIndex) initialize(tx *Tx) error { + _, err := tx.tx().CreateBucketIfNotExists(index.bucketName) + return err +} + +// func (i *simpleIndex) getAll(tx *Tx, indexed []byte) ([][]byte, error) { +// b := i.bucket(tx) + +// } + +func (i *simpleIndex) put(tx *Tx, newVal dbPtrValue) { + i.debugLogf("[simpleIndex.put] Putting index '%s' for '%s'", i.field.Name, i.table.name) + i.putRaw(tx, i.indexKeys(newVal)) +} + +func (i *simpleIndex) putRaw(tx *Tx, writes [][]byte) { + indexishPutRaw(i, tx, writes) +} + +func (i *simpleIndex) delete(tx *Tx, oldVal dbPtrValue) { + i.debugLogf("Deleting index '%s' for '%s'", i.field.Name, i.table.name) + i.deleteRaw(tx, i.indexKeys(oldVal)) +} + +func (i *simpleIndex) deleteRaw(tx *Tx, deletes [][]byte) { + indexishDeleteRaw(i, tx, deletes) +} + +func (i *simpleIndex) update(tx *Tx, oldVal, newVal dbPtrValue) { + i.debugLogf("[simpleIndex.update] Updating index '%s' for '%s'", i.field.Name, i.table.name) + shouldUpdate, _, _, writes, deletes := i.shouldUpdate(tx, oldVal, newVal) + if !shouldUpdate { + return + } + + i.updateRaw(tx, writes, deletes) +} + +func (i *simpleIndex) updateRaw(tx *Tx, writes, deletes [][]byte) { + indexishUpdateRaw(i, tx, writes, deletes) +} + +func (i *simpleIndex) shouldUpdate(tx *Tx, oldVal, newVal dbPtrValue) (bool, [][]byte, [][]byte, [][]byte, [][]byte) { + return indexishShouldUpdate(i, oldVal, newVal) +} + +func (i *simpleIndex) validate(tx *Tx, val dbPtrValue) error { + return i.constraints.validate(tx, val) +} + +func (i *simpleIndex) iteratePrefixed(tx *Tx, prefix []byte, ki KeyIterator) error { + pb := &bytes.Buffer{} + pb.Write(prefix) + pb.Write(IndexKeySeparator) + + i.debugLogf("[index.iteratePrefixed] seeking prefix '%s'", pb.Bytes()) + + c := i.bucket(tx).Cursor() + for k, _ := c.Seek(pb.Bytes()); k != nil; k, _ = c.Next() { + parts := bytes.Split(k, IndexKeySeparator) + lenParts := len(parts) + if lenParts != 2 { + i.debugLogf("[index.iteratePrefixed] iterating prefix '%s', got %d parts from key '%s'", prefix, lenParts, k) + return fmt.Errorf("[index.iteratePrefixed] Invalid index key for '%s'.'%s': %s", i.table.name, i.field.Name, k) + } + + if !bytes.Equal(prefix, parts[0]) { + break + } + + signal, err := ki(parts[1]) + if err != nil { + return err + } + if signal == StopIteration { + break + } + } + return nil +} diff --git a/indicies.go b/indicies.go new file mode 100644 index 0000000..b4ff884 --- /dev/null +++ b/indicies.go @@ -0,0 +1,168 @@ +package tdb + +import ( + "bytes" + + bolt "go.etcd.io/bbolt" +) + +var ( + IndexKeySeparator = []byte("&") + IndexFieldSeparator = []byte("=") +) + +type indexish interface { + debugLogger + count(tx *Tx) int + initialize(tx *Tx) error + validate(tx *Tx, val dbPtrValue) error + update(tx *Tx, old, new dbPtrValue) + updateRaw(tx *Tx, write, delete [][]byte) + put(tx *Tx, val dbPtrValue) + putRaw(tx *Tx, val [][]byte) + delete(tx *Tx, val dbPtrValue) + deleteRaw(tx *Tx, val [][]byte) + bucket(tx *Tx) *bolt.Bucket + iteratePrefixed(tx *Tx, prefix []byte, i KeyIterator) error + indexedValues(val dbPtrValue) [][]byte + keyValue(val dbPtrValue) []byte + indexKeys(val dbPtrValue) [][]byte + shouldUpdate(tx *Tx, oldVal, newVal dbPtrValue) (needsUpdate bool, oldKeys, newKeys, writes, deletes [][]byte) +} + +func indexishKeys(i indexish, pv dbPtrValue) [][]byte { + vals := i.indexedValues(pv) + + if len(vals) == 0 { + return vals + } + + keyVal := i.keyValue(pv) + for i, val := range vals { + bb := &bytes.Buffer{} + bb.Write(val) + bb.Write(IndexKeySeparator) + bb.Write(keyVal) + vals[i] = bb.Bytes() + } + return vals +} + +func indexishPutRaw(i indexish, tx *Tx, writes [][]byte) { + lenWrites := len(writes) + i.debugLogf("[indexishPutRaw] putting %d keys", lenWrites) + if lenWrites == 0 { + return + } + + b := i.bucket(tx) + for _, entry := range writes { + err := b.Put(entry, []byte{}) + if err != nil { + i.debugLogf("%s", err) + panic(err) + } + } +} + +func indexishDeleteRaw(i indexish, tx *Tx, deletes [][]byte) { + lenDeletes := len(deletes) + i.debugLogf("[indexishDeleteRaw] deleting %d keys", lenDeletes) + if lenDeletes == 0 { + return + } + + b := i.bucket(tx) + for _, entry := range deletes { + err := b.Put(entry, []byte{}) + if err != nil { + i.debugLogf("%s", err) + panic(err) + } + } +} + +func indexishUpdateRaw(i indexish, tx *Tx, writes, deletes [][]byte) { + i.deleteRaw(tx, deletes) + i.putRaw(tx, writes) +} + +func indexishShouldUpdate(i indexish, oldVal, newVal dbPtrValue) (bool, [][]byte, [][]byte, [][]byte, [][]byte) { + oldKeys := i.indexKeys(oldVal) + lenOldKeys := len(oldKeys) + newKeys := i.indexKeys(newVal) + lenNewKeys := len(newKeys) + + // no keys before or after, nothing to do + if lenOldKeys == 0 && lenNewKeys == 0 { + return false, nil, nil, nil, nil + } + + // either all new or all deleted, then just do that + if lenNewKeys == 0 || lenOldKeys == 0 { + return true, oldKeys, newKeys, newKeys, oldKeys + } + + // we can handle things simply if we have exactly 1 of everything, this will be a fairly common case + if lenNewKeys == 1 && lenOldKeys == 1 { + // if the keys are the same then we don't need to do anything + if bytes.Equal(oldKeys[0], newKeys[0]) { + return false, nil, nil, nil, nil + } + // otherwise we need to delete and write the old and new respectively + return true, oldKeys, newKeys, newKeys, oldKeys + } + + // the real meat and potatoes starts here + // we will need a lookup table of one of the slices, old was chosen for no particular reason + oldMap := make(map[string]bool) + for _, oldKey := range oldKeys { + oldMap[string(oldKey)] = true + } + i.debugLogf("[indexishDeleteRaw] indexed old with %d entries", len(oldMap)) + + writes := make([][]byte, 0, lenNewKeys) + // then we look at the other (new) slice + for _, newKey := range newKeys { + // and check if it needs to be created/not deleted (missing from lookup) + if _, has := oldMap[string(newKey)]; !has { + i.debugLogf("[indexishDeleteRaw] old does not have new key %s", newKey) + writes = append(writes, newKey) + } else { + i.debugLogf("[indexishDeleteRaw] old does has new key %s", newKey) + delete(oldMap, string(newKey)) + } + } + + // before having to do more we can check a few optimized paths + lenWrites := len(writes) + i.debugLogf("[indexishDeleteRaw] found %d writes", lenWrites) + // skip some steps if we need to write all keys, which implies deleting everything that's old + if lenWrites == lenNewKeys { + return true, oldKeys, newKeys, newKeys, oldKeys + } + // don't do anything if we have no writes and the old and new are the same length as they must be equal + if lenWrites == 0 && lenOldKeys == lenNewKeys { + i.debugLog("[indexishDeleteRaw] found no changes") + return false, nil, nil, nil, nil + } + lenDeletes := len(oldMap) + i.debugLogf("[indexishDeleteRaw] found %d deletes", lenDeletes) + // finally, we can skip building the deletion slice if we don't have to delete anything + if lenDeletes == 0 { + return true, oldKeys, newKeys, writes, [][]byte{} + } + + deletes := make([][]byte, 0, lenDeletes) + // and finally we turn anything still in the lookup table into the list of deletions + for oldKey, _ := range oldMap { + deletes = append(deletes, []byte(oldKey)) + } + + // this case _should_ be unreachable due to our earlier optimized cases, but it is safer to leave it + if len(writes) == 0 && len(deletes) == 0 { + return false, nil, nil, nil, nil + } + + return true, oldKeys, newKeys, writes, deletes +} diff --git a/indicies_test.go b/indicies_test.go new file mode 100644 index 0000000..b2f3051 --- /dev/null +++ b/indicies_test.go @@ -0,0 +1,169 @@ +package tdb + +import ( + "errors" + "log" + "reflect" + "testing" + // "encoding/ascii85" + // "log" + // "strconv" + + bolt "go.etcd.io/bbolt" +) + +type testIndexish struct { + indexKeysResponses map[int64][][]byte +} + +func (i *testIndexish) debugLog(s string) { + if debug { + log.Print(s) + } +} +func (i *testIndexish) debugLogf(f string, args ...interface{}) { + if debug { + log.Printf(f, args...) + } +} +func (i *testIndexish) count(tx *Tx) int { panic(errors.New("unimplemented")) } +func (i *testIndexish) initialize(tx *Tx) error { panic(errors.New("unimplemented")) } +func (i *testIndexish) validate(tx *Tx, val dbPtrValue) error { panic(errors.New("unimplemented")) } +func (i *testIndexish) update(tx *Tx, old, new dbPtrValue) { panic(errors.New("unimplemented")) } +func (i *testIndexish) updateRaw(tx *Tx, write, delete [][]byte) { + panic(errors.New("unimplemented")) +} +func (i *testIndexish) put(tx *Tx, val dbPtrValue) { panic(errors.New("unimplemented")) } +func (i *testIndexish) putRaw(tx *Tx, val [][]byte) { panic(errors.New("unimplemented")) } +func (i *testIndexish) delete(tx *Tx, val dbPtrValue) { panic(errors.New("unimplemented")) } +func (i *testIndexish) deleteRaw(tx *Tx, val [][]byte) { panic(errors.New("unimplemented")) } +func (i *testIndexish) bucket(tx *Tx) *bolt.Bucket { panic(errors.New("unimplemented")) } +func (i *testIndexish) iteratePrefixed(tx *Tx, prefix []byte, it KeyIterator) error { + panic(errors.New("unimplemented")) +} +func (i *testIndexish) indexedValues(val dbPtrValue) [][]byte { panic(errors.New("unimplemented")) } +func (i *testIndexish) keyValue(val dbPtrValue) []byte { panic(errors.New("unimplemented")) } +func (i *testIndexish) indexKeys(val dbPtrValue) [][]byte { + rv := reflect.Value(val) + if rv.Type().Kind() != reflect.Int { + panic(errors.New("unknowable response")) + } + if r, ok := i.indexKeysResponses[rv.Int()]; ok { + return r + } + panic(errors.New("unknown response")) +} +func (i *testIndexish) shouldUpdate(tx *Tx, oldVal, newVal dbPtrValue) (needsUpdate bool, oldKeys, newKeys, writes, deletes [][]byte) { + panic(errors.New("unimplemented")) +} + +type indexishShouldUpdateTest struct { + old [][]byte + new [][]byte + needsUpdate bool + writes [][]byte + deletes [][]byte +} + +var indexishShouldUpdateTests = []indexishShouldUpdateTest{ + { // 0 + old: [][]byte{}, + new: [][]byte{}, + needsUpdate: false, + writes: [][]byte{}, + deletes: [][]byte{}, + }, + { // 1 + old: [][]byte{[]byte("1")}, + new: [][]byte{}, + needsUpdate: true, + writes: [][]byte{}, + deletes: [][]byte{[]byte("1")}, + }, + { // 2 + old: [][]byte{}, + new: [][]byte{[]byte("2")}, + needsUpdate: true, + writes: [][]byte{[]byte("2")}, + deletes: [][]byte{}, + }, + { // 3 + old: [][]byte{[]byte("1")}, + new: [][]byte{[]byte("2")}, + needsUpdate: true, + writes: [][]byte{[]byte("2")}, + deletes: [][]byte{[]byte("1")}, + }, + { // 4 + old: [][]byte{[]byte("1")}, + new: [][]byte{[]byte("1")}, + needsUpdate: false, + writes: [][]byte{}, + deletes: [][]byte{}, + }, + { // 5 + old: [][]byte{[]byte("1"), []byte("2")}, + new: [][]byte{[]byte("1"), []byte("2")}, + needsUpdate: false, + writes: [][]byte{}, + deletes: [][]byte{}, + }, + { // 6 + old: [][]byte{[]byte("1"), []byte("2")}, + new: [][]byte{[]byte("2")}, + needsUpdate: true, + writes: [][]byte{}, + deletes: [][]byte{[]byte("1")}, + }, + { // 7 + old: [][]byte{[]byte("2")}, + new: [][]byte{[]byte("1"), []byte("2")}, + needsUpdate: true, + writes: [][]byte{[]byte("1")}, + deletes: [][]byte{}, + }, + { // 8 + old: [][]byte{[]byte("1"), []byte("2")}, + new: [][]byte{[]byte("2"), []byte("3")}, + needsUpdate: true, + writes: [][]byte{[]byte("3")}, + deletes: [][]byte{[]byte("1")}, + }, +} + +func TestIndexishShouldUpdate(t *testing.T) { + ti := &testIndexish{} + oldVal := dbPtrValue(reflect.ValueOf(1)) + newVal := dbPtrValue(reflect.ValueOf(2)) + for i, tc := range indexishShouldUpdateTests { + ti.debugLogf("- Started test case %d", i) + ti.indexKeysResponses = map[int64][][]byte{ + 1: tc.old, + 2: tc.new, + } + needsUpdate, _, _, writes, deletes := indexishShouldUpdate(ti, oldVal, newVal) + if needsUpdate != tc.needsUpdate { + ti.debugLog("! Incorrect needsUpdate") + t.Errorf("Incorrect needsUpdate (case %d)", i) + // continue + } + + if len(writes) != len(tc.writes) { + ti.debugLogf("! Wrong # of writes (%d vs %d)", len(writes), len(tc.writes)) + for _, write := range writes { + ti.debugLogf("%s", write) + } + t.Errorf("Wrong # of writes (case %d, %d vs %d)", i, len(writes), len(tc.writes)) + // continue + } + if len(deletes) != len(tc.deletes) { + ti.debugLogf("! Wrong # of deletes (%d vs %d)", len(deletes), len(tc.deletes)) + for _, delete := range deletes { + ti.debugLogf("%s", delete) + } + t.Errorf("Wrong # of deletes (case %d, %d vs %d)", i, len(deletes), len(tc.deletes)) + // continue + } + + } +} diff --git a/internals.go b/internals.go new file mode 100644 index 0000000..da7994f --- /dev/null +++ b/internals.go @@ -0,0 +1,221 @@ +package tdb + +import ( + "errors" + "fmt" + "reflect" + "strings" + + // "git.keganmyers.com/terribleplan/tdb/stringy" + + "github.com/golang/protobuf/proto" + bolt "go.etcd.io/bbolt" +) + +// these types are simply to help pass around things and know what they are +// some would argue that having such types is bad practice, but I see it as +// a necessary evil since the reflection in golang is fairly bare-bones + +// dbValue - a non-pointer thing stored by the database + +type dbValue reflect.Value + +func (val dbValue) Ptr() dbPtrValue { + return dbPtrValue(reflect.Value(val).Addr()) +} + +func (val dbValue) Type() *dbType { + return &dbType{ + T: reflect.Value(val).Type(), + } +} + +func (val dbValue) dangerous_Field(f dbField) reflect.Value { + return reflect.Value(val).FieldByIndex(reflect.StructField(f).Index) +} + +func (val dbValue) Marshal() ([]byte, error) { + return proto.Marshal(reflect.Value(val).Addr().Interface().(proto.Message)) +} + +// dbPtrValue - a pointer to a thing stored by the database + +func dbPtrValueOf(p proto.Message) dbPtrValue { + return dbPtrValue(reflect.ValueOf(p)) +} + +type dbPtrValue reflect.Value + +func (ptrVal dbPtrValue) Val() dbValue { + return dbValue(reflect.Value(ptrVal).Elem()) +} + +func (ptrVal dbPtrValue) PtrType() *dbPtrType { + return &dbPtrType{ + T: reflect.Value(ptrVal).Type(), + } +} + +func (ptrVal dbPtrValue) Proto() proto.Message { + return reflect.Value(ptrVal).Interface().(proto.Message) +} + +func (ptrVal dbPtrValue) IsOfPtrType(pt *dbPtrType) bool { + return pt.T == reflect.Value(ptrVal).Type() +} + +func (ptrVal dbPtrValue) IsNil() bool { + return reflect.Value(ptrVal).IsNil() +} + +func (ptrVal dbPtrValue) dangerous_Field(f dbField) reflect.Value { + return reflect.Value(ptrVal).Elem().FieldByIndex(reflect.StructField(f).Index) +} + +func (ptrVal dbPtrValue) Marshal() ([]byte, error) { + return proto.Marshal(reflect.Value(ptrVal).Interface().(proto.Message)) +} + +// dbType - the type of a non-pointer thing stored by the database + +func dbTypeOf(p proto.Message) *dbType { + t := reflect.TypeOf(p).Elem() + + typeString := t.String() + nameComponents := strings.Split(typeString, ".") + name := nameComponents[len(nameComponents)-1] + if name[0] == '*' { + name = name[1:] + } + + if name == "" { + panic(errors.New("[tdb] [internal] ]Unable to reliably determine name of thing")) + } + + return &dbType{ + Name: name, + T: t, + } +} + +type dbType struct { + Name string + T reflect.Type +} + +func (t *dbType) New() dbPtrValue { + return dbPtrValue(reflect.New(t.T)) +} + +func (t *dbType) PtrType() *dbPtrType { + return &dbPtrType{ + Name: "*" + t.Name, + T: reflect.PtrTo(t.T), + } +} + +func (t *dbType) IdField() dbField { + idField := t.NamedField("Id") + + if idField.Type.Kind() != reflect.Uint64 { + panic(fmt.Errorf("[tdb] [internal] %s's 'Id' field is not a uint64", t.Name)) + } + + return idField +} + +func (t *dbType) NamedField(name string) dbField { + field, exists := t.T.FieldByName(name) + + if !exists { + panic(fmt.Errorf("[tdb] [internal] %s lacks a '%s' field", t.Name, name)) + } + + return dbField(field) +} + +// dbPtrType - the type of a pointer to a thing stored by the database + +type dbPtrType struct { + Name string + T reflect.Type +} + +func (ptr *dbPtrType) New() dbPtrValue { + return dbPtrValue(reflect.New(ptr.T.Elem())) +} + +func (ptr *dbPtrType) Type() dbType { + return dbType{ + Name: ptr.Name[1:], + T: ptr.T.Elem(), + } +} + +func (ptr *dbPtrType) String() string { + return ptr.T.String() +} + +func (ptr *dbPtrType) Zero() dbPtrValue { + return dbPtrValue(reflect.Zero(ptr.T)) +} + +func (ptr *dbPtrType) Unmarshal(data []byte) (dbPtrValue, error) { + pv := ptr.New() + if err := proto.Unmarshal(data, pv.Proto()); err != nil { + return ptr.Zero(), err + } + return pv, nil +} + +func (ptr *dbPtrType) IdField() dbField { + idField := ptr.NamedField("Id") + + if idField.Type.Kind() != reflect.Uint64 { + panic(fmt.Errorf("[tdb] [internal] %s's 'Id' field is not a uint64", ptr.Name)) + } + + return idField +} + +func (ptr *dbPtrType) NamedField(name string) dbField { + field, exists := ptr.T.Elem().FieldByName(name) + + if !exists { + panic(fmt.Errorf("[tdb] [internal] %s lacks a '%s' field", ptr.Name, name)) + } + + return dbField(field) +} + +// dbField - a field on a struct... this one is quite unnecessary + +type dbField reflect.StructField + +func (f dbField) IsUint64() bool { + return f.Type.Kind() == reflect.Uint64 +} + +func (f dbField) IsUint64Slice() bool { + return f.Type.Kind() == reflect.Slice && f.Type.Elem().Kind() == reflect.Uint64 +} + +func (f dbField) IsSliceish() bool { + fieldKind := f.Type.Kind() + return fieldKind == reflect.Array || fieldKind == reflect.Slice +} + +// Tx - is it dumb to provide this just so consumers of this package don't have to include bolt? +// I think not + +type Tx struct { + btx *bolt.Tx +} + +func convertTx(btx *bolt.Tx) *Tx { + return &Tx{btx: btx} +} + +func (tx *Tx) tx() *bolt.Tx { + return tx.btx +} diff --git a/iteration.go b/iteration.go new file mode 100644 index 0000000..b7bf1db --- /dev/null +++ b/iteration.go @@ -0,0 +1,27 @@ +package tdb + +import ( + "github.com/golang/protobuf/proto" + bolt "go.etcd.io/bbolt" +) + +var ( + ContinueIteration IterationSignal = true + StopIteration IterationSignal = false +) + +type IterationSignal bool + +type Iterable interface { + Iterate(Iterator, ...*Tx) error + IterateKeys(KeyIterator, ...*Tx) error +} + +type rawIterable interface { + iterateRaw(rawIterator, ...*Tx) error +} + +type rawIterator func(dbPtrValue) (IterationSignal, error) +type Iterator func(proto.Message) (IterationSignal, error) +type KeyIterator func([]byte) (IterationSignal, error) +type keyIteratorWithBucket func([]byte, *bolt.Bucket) (IterationSignal, error) diff --git a/query.go b/query.go new file mode 100644 index 0000000..92b70b7 --- /dev/null +++ b/query.go @@ -0,0 +1,180 @@ +package tdb + +import ( + "sort" + + "git.keganmyers.com/terribleplan/tdb/stringy" + + "github.com/golang/protobuf/proto" +) + +type queryData struct { + err error + table *table + ops []queryOpish + sr uint64 +} + +// NB: "Where" operations should be expected to mutate the underlying query. +type Query interface { + Iterable + debugLogger + Run(txs ...*Tx) ([]proto.Message, error) + RunOrPanic(txs ...*Tx) []proto.Message + First(txs ...*Tx) (proto.Message, error) + Where(fieldName, op string, value interface{}) Query +} + +func (q *queryData) debugLog(message string) { + q.table.db.debugLog(message) +} + +func (q *queryData) debugLogf(f string, args ...interface{}) { + q.table.db.debugLogf(f, args...) +} + +func (q *queryData) Where(fieldName, op string, value interface{}) Query { + if q.err != nil { + return q + } + + qop, err := createQueryOp(q.table, fieldName, op, value) + if q.err != nil { + q.err = err + return q + } + + q.ops = append(q.ops, qop) + + return q +} + +func (q *queryData) Ok() error { + return q.err +} + +func (q *queryData) Iterate(i Iterator, txs ...*Tx) error { + return q.iterateRaw(func(pv dbPtrValue) (IterationSignal, error) { + return i(pv.Proto()) + }, txs...) +} + +func (q *queryData) IterateKeys(i KeyIterator, txs ...*Tx) error { + return q.iterateRaw(func(pv dbPtrValue) (IterationSignal, error) { + return i([]byte(stringy.LiteralUintToString(pv.dangerous_Field(q.table.idField).Uint()))) + }, txs...) +} + +func (q *queryData) iterateRaw(i rawIterator, txs ...*Tx) error { + q.sr = 0 + lenOps := len(q.ops) + // straight iteration + if lenOps == 0 { + q.debugLog("[query] No ops, doing table scan") + return q.table.iterateRaw(i, txs...) + } + + if lenOps == 1 { + q.debugLog("[query] Single op") + op := q.ops[0] + if op.indexed() { + q.debugLog("[query] Op has index") + return op.iterateRaw(func(v dbPtrValue) (IterationSignal, error) { + q.sr++ + return i(v) + }, txs...) + } else { + q.debugLog("[query] Op missing index, doing table scan") + return q.table.iterateRaw(func(v dbPtrValue) (IterationSignal, error) { + q.sr++ + if op.match(v) { + return i(v) + } + return ContinueIteration, nil + }, txs...) + } + } + + anyHaveIndex := false + sort.SliceStable(q.ops, func(i, j int) bool { + ihi := q.ops[i].indexed() + jhi := q.ops[j].indexed() + + anyHaveIndex = anyHaveIndex || ihi || jhi + + if ihi { + return true + } + return false + }) + + var source rawIterable = q.table + conditions := q.ops + + if anyHaveIndex { + q.debugLogf("[query] Using index for '%s' to scan", conditions[0].String()) + // first condition is iterated over, others are executed as conditions + source = conditions[0] + conditions = conditions[1:] + } else { + q.debugLog("[query] No index, using table scan") + } + + return source.iterateRaw(func(v dbPtrValue) (IterationSignal, error) { + matches := true + q.sr++ + for _, op := range conditions { + if !op.match(v) { + matches = false + break + } + } + if matches { + return i(v) + } + return ContinueIteration, nil + }, txs...) +} + +func (query *queryData) Run(txs ...*Tx) ([]proto.Message, error) { + res := make([]proto.Message, 0) + if err := query.Iterate(func(item proto.Message) (IterationSignal, error) { + res = append(res, item) + return ContinueIteration, nil + }, txs...); err != nil { + return nil, err + } + + return res, nil +} + +func (query *queryData) RunOrPanic(txs ...*Tx) []proto.Message { + res, err := query.Run(txs...) + if err != nil { + panic(err) + } + return res +} + +func (query *queryData) First(txs ...*Tx) (proto.Message, error) { + var rm proto.Message + if err := query.Iterate(func(m proto.Message) (IterationSignal, error) { + rm = m + return StopIteration, nil + }, txs...); err != nil { + return nil, err + } + return rm, nil +} + +func (query *queryData) Update(txs ...*Tx) ([]proto.Message, error) { + res := make([]proto.Message, 0) + if err := query.Iterate(func(item proto.Message) (IterationSignal, error) { + res = append(res, item) + return ContinueIteration, nil + }, txs...); err != nil { + return nil, err + } + + return res, nil +} diff --git a/query_test.go b/query_test.go new file mode 100644 index 0000000..d42c86a --- /dev/null +++ b/query_test.go @@ -0,0 +1,192 @@ +package tdb + +import ( + "testing" + // "encoding/ascii85" + // "log" + // "reflect" + // "strconv" + // "git.keganmyers.com/terribleplan/tdb/stringy" + // bolt "go.etcd.io/bbolt" + // "github.com/golang/protobuf/proto" +) + +func TestSimpleQuery(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + guarantee := randomString(16) + id := tdb.TEST_Main.CreateOrPanic(&TEST_Main{Guarantee: guarantee}) + + items, err := tdb.TEST_Main.Query(). + Where("Id", "=", id). + Run() + if assertNilEnd(t, err, "Unable to run query") { + return + } + + if assertEqualEnd(t, len(items), 1, "Wrong number of results") { + return + } + + tmi, ok := items[0].(*TEST_Main) + if assertOkEnd(t, ok, "Unable to cast returned to *TEST_Main") { + return + } + + assertEqual(t, tmi.Guarantee, guarantee, "Mismatched guarantee strings") +} + +func TestSimpleQueryAmongstMany(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + guarantee := randomString(16) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + // id: 6 + id := tdb.TEST_Main.CreateOrPanic(&TEST_Main{Guarantee: guarantee}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + + items, err := tdb.TEST_Main.Query(). + Where("Id", "=", id). + Run() + if assertNilEnd(t, err, "Unable to run query") { + return + } + + if assertEqualEnd(t, len(items), 1, "Wrong number of results") { + return + } + + tmi, ok := items[0].(*TEST_Main) + if assertOkEnd(t, ok, "Unable to cast returned to *TEST_Main") { + return + } + + assertEqual(t, tmi.Guarantee, guarantee, "Mismatched guarantee strings") +} + +func TestForeignQuery(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + mid1 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + mid2 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + id1 := tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2}) + id3 := tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + + q := tdb.TEST_OwnedBy.Query(). + Where("MainId", "=", mid1) + + items, err := q.Run() + if assertNilEnd(t, err, "Unable to run query") { + return + } + + if assertEqualEnd(t, len(items), 2, "Wrong number of results") { + return + } + + for _, item := range items { + tmi, ok := item.(*TEST_OwnedBy) + if assertOkEnd(t, ok, "Unable to cast returned item to *TEST_OwnedBy") { + continue + } + + assertEqual(t, tmi.MainId, mid1, "Got result with bad MainId") + + if tmi.Id != id1 && tmi.Id != id3 { + t.Errorf("Got result with bad Id: got %d, expected %d or %d", tmi.Id, id1, id3) + } + } + + qd := q.(*queryData) + assertUint64Equal(t, qd.sr, 2, "Scanned incorrect number of records") +} + +func EmptyIndexQuery(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + mid1 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + mid2 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + + q := tdb.TEST_OwnedBy.Query(). + Where("MainId", "=", mid1) + + items, err := q.Run() + if assertNilEnd(t, err, "Unable to run query") { + return + } + + if assertEqualEnd(t, len(items), 0, "Wrong number of results") { + return + } + + qd := q.(*queryData) + assertUint64Equal(t, qd.sr, 0, "Scanned incorrect number of records") +} + +func TestComplexQueryAmongstMany(t *testing.T) { + setupTestDb() + defer cleanupTestDb() + + guarantee := randomString(16) + mid1 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + mid2 := tdb.TEST_Main.CreateOrPanic(&TEST_Main{}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + id := tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2, Guarantee: guarantee}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid1}) + tdb.TEST_OwnedBy.CreateOrPanic(&TEST_OwnedBy{MainId: mid2}) + + q := tdb.TEST_OwnedBy.Query(). + Where("MainId", "=", mid2). // indexed, speeds query + Where("Guarantee", "=", guarantee) // non-indexed, filters during index scan + + items, err := q.Run() + if assertNilEnd(t, err, "Unable to run query") { + return + } + + if assertEqualEnd(t, len(items), 1, "Wrong number of results") { + return + } + + tobi, ok := items[0].(*TEST_OwnedBy) + if assertOkEnd(t, ok, "Unable to cast returned to *TEST_OwnedBy") { + return + } + + assertEqual(t, tobi.Id, id, "Mismatched IDs") + assertEqual(t, tobi.Guarantee, guarantee, "Mismatched guarantee strings") + + qd := q.(*queryData) + assertUint64Equal(t, qd.sr, 6, "Scanned incorrect number of records") +} diff --git a/queryop.go b/queryop.go new file mode 100644 index 0000000..4c8bc30 --- /dev/null +++ b/queryop.go @@ -0,0 +1,158 @@ +package tdb + +import ( + "bytes" + "errors" + "fmt" + + // "sort" + + "git.keganmyers.com/terribleplan/tdb/stringy" + + bolt "go.etcd.io/bbolt" +) + +type queryOpCreator func(*table, string, interface{}) (queryOpish, error) + +var queryOps map[string]queryOpCreator = map[string]queryOpCreator{ + "=": createEqualQueryOp, + // "in": createInQueryOp, +} + +func createQueryOp(table *table, field, opType string, value interface{}) (queryOpish, error) { + create, ok := queryOps[opType] + if !ok { + return nil, fmt.Errorf("Unknown query operation '%s'", opType) + } + + op, err := create(table, field, value) + if err != nil { + table.debugLogf("Failed creating query for '%s' on table '%s'", opType, table.name) + return nil, err + } + + err = op.valid() + if err != nil { + table.debugLogf("Unable to form valid query for '%s' on table '%s'", opType, table.name) + return nil, err + } + + return op, nil +} + +type queryOpish interface { + rawIterable + Iterable + match(v dbPtrValue) bool + indexed() bool + valid() error + String() string +} + +type queryOp struct { + table *table + field dbField + value []byte + index indexish +} + +func createCommonQueryOp(table *table, field string, value interface{}) (*queryOp, error) { + index, ok := table.indicies[field] + if ok { + table.debugLogf("[query] found index for field '%s'", field) + } else { + table.debugLogf("[query] did not find index for field '%s'", field) + if len(table.indicies) == 0 { + table.debugLog("[query] (0 indicies found)") + } else { + for name, _ := range table.indicies { + table.debugLogf("[query] '%s' is indexed", name) + } + } + } + + q := &queryOp{ + table: table, + field: table.t.NamedField(field), + value: []byte(stringy.ToStringOrPanic(value)), + index: index, + } + return q, nil +} + +// re-used for queryOpEqual +func (op *queryOp) indexed() bool { + return op.index != nil +} + +type queryOpEqual queryOp + +func createEqualQueryOp(table *table, field string, value interface{}) (queryOpish, error) { + qo, err := createCommonQueryOp(table, field, value) + if err != nil { + return nil, err + } + + qoe := queryOpEqual(*qo) + return &qoe, nil +} + +func (op *queryOpEqual) String() string { + return fmt.Sprintf("%s = %s", op.field.Name, op.value) +} + +func (op *queryOpEqual) indexed() bool { + indexed := op.index != nil + op.table.debugLogf("[query] Table '%s'.'%s' indexed() -> %t", op.table.name, op.field.Name, indexed) + return indexed +} + +func (op *queryOpEqual) valid() error { + return nil +} + +func (op *queryOpEqual) match(pv dbPtrValue) bool { + return bytes.Equal([]byte(stringy.ValToStringOrPanic(pv.dangerous_Field(op.field))), op.value) +} + +func (op *queryOpEqual) rawIterateKeys(i keyIteratorWithBucket, txs ...*Tx) error { + if op.index == nil { + return errors.New("This method is only applicable if the op is indexed") + } + + return op.table.db.readTxHelper(func(tx *Tx) error { + db := op.table.bucket(tx) + + return op.index.iteratePrefixed(tx, op.value, func(key []byte) (IterationSignal, error) { + return i(key, db) + }) + }, txs...) +} + +func (op *queryOpEqual) IterateKeys(i KeyIterator, txs ...*Tx) error { + return op.rawIterateKeys(func(k []byte, _ *bolt.Bucket) (IterationSignal, error) { + return i(k) + }, txs...) +} + +func (op *queryOpEqual) Iterate(i Iterator, txs ...*Tx) error { + return op.rawIterateKeys(func(k []byte, b *bolt.Bucket) (IterationSignal, error) { + v, err := op.table.getWithinTx(b, k) + if err != nil { + op.table.debugLogf("[query] Encountered error while iterating (%s)", err) + return StopIteration, err + } + return i(v) + }, txs...) +} + +func (op *queryOpEqual) iterateRaw(i rawIterator, txs ...*Tx) error { + return op.rawIterateKeys(func(k []byte, b *bolt.Bucket) (IterationSignal, error) { + v, err := op.table.getValWithinTx(b, k) + if err != nil { + op.table.debugLogf("[query] Encountered error while iterating (%s)", err) + return StopIteration, err + } + return i(v) + }, txs...) +} diff --git a/stringy/stringy.go b/stringy/stringy.go new file mode 100644 index 0000000..f12757a --- /dev/null +++ b/stringy/stringy.go @@ -0,0 +1,149 @@ +// Package stringy implements string conversion helpers for tdb +package stringy + +import ( + "encoding/base64" + "fmt" + //"log" + "reflect" + "strconv" + "strings" +) + +var Serializers = map[reflect.Kind]func(reflect.Value) string{ + reflect.Uint: UintToString, + reflect.Uint64: UintToString, + reflect.Uint32: UintToString, + reflect.Uint16: UintToString, + reflect.Uint8: UintToString, + reflect.Int: IntToString, + reflect.Int64: IntToString, + reflect.Int32: IntToString, + reflect.Int16: IntToString, + reflect.Int8: IntToString, + reflect.Float64: FloatToString, + reflect.Float32: FloatToString, + reflect.String: StringToString, +} + +func ToString(v interface{}) (string, error) { + return ValToString(reflect.ValueOf(v)) +} + +func ToStringOrPanic(v interface{}) string { + s, err := ValToString(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return s +} + +func ValToString(val reflect.Value) (string, error) { + switch v := val.Interface().(type) { + case bool: + panic("unimplemented") + case float32: + case float64: + return FloatToString(val), nil + case complex64: + case complex128: + panic("unimplemented") + case int: + case int8: + case int16: + case int32: + case int64: + return IntToString(val), nil + case uint: + case uint8: + case uint16: + case uint32: + case uint64: + return UintToString(val), nil + case uintptr: + panic("unimplemented") + case string: + return StringToString(val), nil + case []byte: + return BytesToString(v), nil + } + + valType := val.Type() + kind := valType.Kind() + + if kind == reflect.Ptr { + return ptrToString(val) + } + + if kind == reflect.Slice || kind == reflect.Array { + return indexableToString(val) + } + + s, ok := Serializers[kind] + if !ok { + return "", fmt.Errorf("Unable to convert kind '%s' / type '%s' to string", kind.String(), valType.String()) + } + + return s(val), nil +} + +func ValToStringOrPanic(val reflect.Value) string { + s, err := ValToString(val) + if err != nil { + panic(err) + } + return s +} + +func LiteralIntToString(v int64) string { + return fmt.Sprintf("%+020d", v) +} + +func IntToString(val reflect.Value) string { + return LiteralIntToString(val.Int()) +} + +func LiteralUintToString(v uint64) string { + return fmt.Sprintf("%020d", v) +} + +func UintToString(val reflect.Value) string { + return LiteralUintToString(val.Uint()) +} + +func FloatToString(val reflect.Value) string { + return strconv.FormatFloat(val.Float(), 'E', -1, 64) +} + +func BoolToString(val interface{}) string { + v, _ := val.(bool) + return strconv.FormatBool(v) +} + +func StringToString(val reflect.Value) string { + return val.String() +} + +func BytesToString(val []byte) string { + return base64.RawURLEncoding.EncodeToString(val) +} + +func indexableToString(val reflect.Value) (string, error) { + l := val.Len() + items := make([]string, l) + for i := 0; i < l; i++ { + str, err := ValToString(val.Index(i)) + if err != nil { + return "", err + } + items[i] = str + } + return "[" + strings.Join(items, ",") + "]", nil +} + +func ptrToString(val reflect.Value) (string, error) { + if val.IsNil() { + return "nil", nil + } + return ValToString(val.Elem()) +} diff --git a/stringy/tostring_test.go b/stringy/tostring_test.go new file mode 100644 index 0000000..e8fead0 --- /dev/null +++ b/stringy/tostring_test.go @@ -0,0 +1,99 @@ +package stringy + +import ( + "math" + "testing" +) + +type tostringTestcase struct { + input interface{} + expected string +} + +var toStringTests = []tostringTestcase{ + // strings + {input: "", expected: ""}, + {input: "Hello world", expected: "Hello world"}, + // int + {input: int(-1), expected: "-0000000000000000001"}, + {input: int8(-1), expected: "-0000000000000000001"}, + {input: int16(-1), expected: "-0000000000000000001"}, + {input: int32(-1), expected: "-0000000000000000001"}, + {input: int64(-1), expected: "-0000000000000000001"}, + {input: int(0), expected: "+0000000000000000000"}, + {input: int8(0), expected: "+0000000000000000000"}, + {input: int16(0), expected: "+0000000000000000000"}, + {input: int32(0), expected: "+0000000000000000000"}, + {input: int64(0), expected: "+0000000000000000000"}, + {input: int(1), expected: "+0000000000000000001"}, + {input: int8(1), expected: "+0000000000000000001"}, + {input: int16(1), expected: "+0000000000000000001"}, + {input: int32(1), expected: "+0000000000000000001"}, + {input: int64(1), expected: "+0000000000000000001"}, + + {input: int8(math.MinInt8), expected: "-0000000000000000128"}, + {input: int16(math.MinInt8), expected: "-0000000000000000128"}, + {input: int32(math.MinInt8), expected: "-0000000000000000128"}, + {input: int64(math.MinInt8), expected: "-0000000000000000128"}, + {input: int16(math.MinInt16), expected: "-0000000000000032768"}, + {input: int32(math.MinInt16), expected: "-0000000000000032768"}, + {input: int64(math.MinInt16), expected: "-0000000000000032768"}, + {input: int32(math.MinInt32), expected: "-0000000002147483648"}, + {input: int64(math.MinInt32), expected: "-0000000002147483648"}, + {input: int64(math.MinInt64), expected: "-9223372036854775808"}, + + {input: int8(math.MaxInt8), expected: "+0000000000000000127"}, + {input: int16(math.MaxInt8), expected: "+0000000000000000127"}, + {input: int32(math.MaxInt8), expected: "+0000000000000000127"}, + {input: int64(math.MaxInt8), expected: "+0000000000000000127"}, + {input: int16(math.MaxInt16), expected: "+0000000000000032767"}, + {input: int32(math.MaxInt16), expected: "+0000000000000032767"}, + {input: int64(math.MaxInt16), expected: "+0000000000000032767"}, + {input: int32(math.MaxInt32), expected: "+0000000002147483647"}, + {input: int64(math.MaxInt32), expected: "+0000000002147483647"}, + {input: int64(math.MaxInt64), expected: "+9223372036854775807"}, + + // uint + {input: uint(0), expected: "00000000000000000000"}, + {input: uint8(0), expected: "00000000000000000000"}, + {input: uint16(0), expected: "00000000000000000000"}, + {input: uint32(0), expected: "00000000000000000000"}, + {input: uint64(0), expected: "00000000000000000000"}, + {input: uint(1), expected: "00000000000000000001"}, + {input: uint8(1), expected: "00000000000000000001"}, + {input: uint16(1), expected: "00000000000000000001"}, + {input: uint32(1), expected: "00000000000000000001"}, + {input: uint64(1), expected: "00000000000000000001"}, + + {input: uint8(math.MaxUint8), expected: "00000000000000000255"}, + {input: uint16(math.MaxUint8), expected: "00000000000000000255"}, + {input: uint32(math.MaxUint8), expected: "00000000000000000255"}, + {input: uint64(math.MaxUint8), expected: "00000000000000000255"}, + {input: uint16(math.MaxUint16), expected: "00000000000000065535"}, + {input: uint32(math.MaxUint16), expected: "00000000000000065535"}, + {input: uint64(math.MaxUint16), expected: "00000000000000065535"}, + {input: uint32(math.MaxUint32), expected: "00000000004294967295"}, + {input: uint64(math.MaxUint32), expected: "00000000004294967295"}, + {input: uint64(math.MaxUint64), expected: "18446744073709551615"}, + + // slices + {input: []int{}, expected: "[]"}, + {input: []int{0, 1}, expected: "[+0000000000000000000,+0000000000000000001]"}, + {input: []string{"once", "upon", "a", "midnight", "dreary"}, expected: "[once,upon,a,midnight,dreary]"}, + + // bytes (get base64 encoded) + {input: []byte("hi"), expected: "aGk"}, + {input: [][]byte{[]byte("hi")}, expected: "[aGk]"}, +} + +func TestToString(t *testing.T) { + for _, tc := range toStringTests { + actual, err := ToString(tc.input) + if err != nil { + t.Errorf("Did not expect string conversion of %#v to give error, got %s", tc.input, err) + } + if actual != tc.expected { + t.Errorf("Expected %#v to convert to '%s', got '%s'", tc.input, tc.expected, actual) + } + } +} diff --git a/table.go b/table.go new file mode 100644 index 0000000..6fc6ade --- /dev/null +++ b/table.go @@ -0,0 +1,426 @@ +package tdb + +import ( + "errors" + "fmt" + + "git.keganmyers.com/terribleplan/tdb/stringy" + + "github.com/golang/protobuf/proto" + bolt "go.etcd.io/bbolt" +) + +type Table interface { + debugLogger + Iterable + Transactable + + // New + Create(proto.Message, ...*Tx) (uint64, error) + CreateOrPanic(proto.Message, ...*Tx) uint64 + + // Read + Get(uint64, ...*Tx) (proto.Message, error) + Query() Query + Where(fieldName, op string, value interface{}) Query + + // Modify + Put(value proto.Message, txs ...*Tx) error + Update(id uint64, action func(proto.Message) error, txs ...*Tx) error +} +type TableSetup interface { + debugLogger + AddIndex(options SimpleIndexOptions) error + AddIndexOrPanic(options SimpleIndexOptions) + AddArrayIndex(options ArrayIndexOptions) error + AddArrayIndexOrPanic(options ArrayIndexOptions) +} + +type IndexQueryOpts struct { + Desc bool +} + +type CreateTableSchema func(createSchema TableSetup) error + +type table struct { + db *db + name string + nameBytes []byte + t *dbType + tPtr *dbPtrType + idField dbField + indicies map[string]indexish + constraints map[string]constraintish +} + +func newTable(db *db, t *dbType, idField dbField, createSchema CreateTableSchema) (*table, error) { + db.debugLogf("Creating table for %s", t.Name) + ktbl := &table{ + db: db, + name: t.Name, + nameBytes: []byte(t.Name), + t: t, + tPtr: t.PtrType(), + idField: idField, + indicies: make(map[string]indexish), + constraints: make(map[string]constraintish), + } + + err := createSchema(ktbl) + if err != nil { + return nil, err + } + + return ktbl, nil +} + +func (t *table) debugLog(message string) { + t.db.debugLog(message) +} + +func (t *table) debugLogf(f string, args ...interface{}) { + t.db.debugLogf(f, args...) +} + +func (t *table) bucket(tx *Tx) *bolt.Bucket { + return tx.tx().Bucket(t.nameBytes) +} + +func (t *table) AddIndex(options SimpleIndexOptions) error { + if options.Table != "" && options.Table != t.name { + t.debugLogf("warn: ignoring table name in index creation options, leave blank to disable this warning (got '%s')", options.Table) + } + + if _, exists := t.indicies[options.Field]; exists { + return fmt.Errorf("There is already an index on '%s'.'%s'", t.name, options.Field) + } + + if _, exists := t.constraints[options.Field]; exists { + return fmt.Errorf("There are already constraints on '%s'.'%s'", t.name, options.Field) + } + + options.Table = t.name + index, err := newSimpleIndex(t, options) + if err != nil { + return err + } + + t.indicies[options.Field] = index + t.constraints[options.Field] = index + return nil +} + +func (t *table) AddIndexOrPanic(options SimpleIndexOptions) { + if err := t.AddIndex(options); err != nil { + panic(err) + } +} + +func (t *table) AddArrayIndex(options ArrayIndexOptions) error { + if options.Table != "" && options.Table != t.name { + t.debugLogf("warn: ignoring table name in index creation options, leave blank to disable this warning (got '%s')", options.Table) + } + + if _, exists := t.indicies[options.Field]; exists { + return fmt.Errorf("There is already an index on '%s'.'%s'", t.name, options.Field) + } + + if _, exists := t.constraints[options.Field]; exists { + return fmt.Errorf("There are already constraints on '%s'.'%s'", t.name, options.Field) + } + + options.Table = t.name + index, err := newArrayIndex(t, options) + if err != nil { + return err + } + + t.indicies[options.Field] = index + t.constraints[options.Field] = index + return nil +} + +func (t *table) AddArrayIndexOrPanic(options ArrayIndexOptions) { + if err := t.AddArrayIndex(options); err != nil { + panic(err) + } +} + +// Create will insert a record with the next available ID in sequence +func (t *table) Create(thing proto.Message, txs ...*Tx) (uint64, error) { + t.debugLogf("[table.Create] Creating '%s'", t.name) + var id uint64 + pv := dbPtrValueOf(thing) + + if !pv.IsOfPtrType(t.tPtr) { + return 0, fmt.Errorf("[table.Create] Expected type '%s' in call (got '%s')", t.tPtr.String(), pv.PtrType().String()) + } + + if err := t.writeTxHelper(func(tx *Tx) error { + var idString []byte + b := t.bucket(tx) + for { + id, _ = b.NextSequence() + idString = []byte(stringy.LiteralUintToString(id)) + if b.Get(idString) == nil { + break + } + } + t.debugLogf("[table.Create] New '%s' will have Id '%d'", t.name, id) + + pv.dangerous_Field(t.idField).SetUint(id) + + if err := t.validate(pv, txs...); err != nil { + return err + } + + data, err := pv.Marshal() + if err != nil { + return err + } + b.Put(idString, data) + + t.updateIndicies(tx, t.tPtr.Zero(), pv) + t.debugLogf("[table.Create] Created '%s' with Id '%d'", t.name, id) + return nil + }, txs...); err != nil { + return 0, err + } + return id, nil +} + +func (t *table) Put(thing proto.Message, txs ...*Tx) error { + t.debugLogf("[table.Put] Putting '%s'", t.name) + pv := dbPtrValueOf(thing) + + if !pv.IsOfPtrType(t.tPtr) { + return fmt.Errorf("[table.Put] Expected type '%s' in call (got '%s')", t.tPtr.String(), pv.PtrType().String()) + } + + id := pv.dangerous_Field(t.idField).Uint() + idString := []byte(stringy.LiteralUintToString(id)) + + data, err := pv.Marshal() + if err != nil { + return err + } + + if err := t.writeTxHelper(func(tx *Tx) error { + b := t.bucket(tx) + old, err := t.getValWithinTx(b, idString) + if err != nil { + return err + } + + if err := t.validate(pv, tx); err != nil { + return err + } + + if err := b.Put(idString, data); err != nil { + return err + } + + t.updateIndicies(tx, old, pv) + return nil + }, txs...); err != nil { + return err + } + return nil +} + +func (t *table) CreateOrPanic(thing proto.Message, txs ...*Tx) uint64 { + id, err := t.Create(thing, txs...) + if err != nil { + panic(err) + } + return id +} + +func (t *table) Get(id uint64, txs ...*Tx) (proto.Message, error) { + //todo: replace with a query? (once the query engine can optimize .Id = x) + return t.getRaw([]byte(stringy.ToStringOrPanic(id)), txs...) +} + +func (t *table) getRaw(id []byte, txs ...*Tx) (vProtoMessage proto.Message, err error) { + return vProtoMessage, t.readTxHelper(func(tx *Tx) error { + vProtoMessage, err = t.getWithinTx(t.bucket(tx), id) + if err != nil { + return err + } + return nil + }, txs...) +} + +func (t *table) getValWithinTx(b *bolt.Bucket, id []byte) (dbPtrValue, error) { + t.debugLogf("[table.getValWithinTx] looking up '%s'", id) + //todo: replace with a query? (once the query engine can optimize .Id = x) + v := b.Get([]byte(id)) + if v == nil { + t.debugLogf("got nil for '%s'", id) + return t.tPtr.Zero(), nil + } + + return t.tPtr.Unmarshal(v) +} + +func (t *table) getWithinTx(b *bolt.Bucket, id []byte) (proto.Message, error) { + pv, err := t.getValWithinTx(b, id) + if err != nil { + return nil, err + } + return pv.Proto(), nil +} + +func (t *table) Query() Query { + return &queryData{ + table: t, + ops: make([]queryOpish, 0), + } +} + +func (t *table) Where(fieldName, op string, value interface{}) Query { + return t.Query().Where(fieldName, op, value) +} + +func (t *table) Update(id uint64, action func(proto.Message) error, txs ...*Tx) error { + idBytes := []byte(stringy.LiteralUintToString(id)) + return t.writeTxHelper(func(tx *Tx) error { + b := t.bucket(tx) + + v := b.Get(idBytes) + if v == nil { + t.debugLogf("got nil for '%s'", idBytes) + return fmt.Errorf("No such entry '%d' in table '%s'", id, t.name) + } + + original, err := t.tPtr.Unmarshal(v) + if err != nil { + return err + } + + updated, err := t.tPtr.Unmarshal(v) + if err != nil { + return err + } + + if err := action(updated.Proto()); err != nil { + return err + } + + data, err := updated.Marshal() + if err != nil { + return err + } + err = b.Put(idBytes, data) + if err != nil { + return err + } + + t.updateIndicies(tx, original, updated) + return nil + }, txs...) +} + +func (t *table) iterateRaw(i rawIterator, txs ...*Tx) error { + return t.readTxHelper(func(tx *Tx) error { + c := t.bucket(tx).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + t.debugLogf("iterating over '%s' '%s'", t.name, k) + pv, err := t.tPtr.Unmarshal(v) + if err != nil { + t.debugLogf("[table.iterateRaw] error while iterating over '%s' '%s'", t.name, k) + } + + signal, err := i(pv) + if err != nil { + return err + } + if signal == StopIteration { + break + } + } + return nil + }, txs...) +} + +func (t *table) Iterate(i Iterator, txs ...*Tx) error { + return t.iterateRaw(func(pv dbPtrValue) (IterationSignal, error) { + return i(pv.Proto()) + }, txs...) +} + +func (t *table) IterateKeys(i KeyIterator, txs ...*Tx) error { + panic(errors.New("unimplemented")) +} + +func (t *table) initialize(tx *Tx) error { + _, err := tx.tx().CreateBucketIfNotExists(t.nameBytes) + for _, index := range t.indicies { + if err := index.initialize(tx); err != nil { + return err + } + } + return err +} + +func (t *table) validate(pv dbPtrValue, txs ...*Tx) error { + if pv.IsNil() { + return nil + } + + return t.readTxHelper(func(tx *Tx) error { + for _, c := range t.constraints { + if err := c.validate(tx, pv); err != nil { + return err + } + } + return nil + }, txs...) +} + +func (t *table) putIndicies(tx *Tx, after dbPtrValue) { + for _, index := range t.indicies { + index.put(tx, after) + } +} + +func (t *table) deleteIndicies(tx *Tx, before dbPtrValue) { + for _, index := range t.indicies { + index.delete(tx, before) + } +} + +func (t *table) updateIndiciesRaw(tx *Tx, before, after dbPtrValue) { + for _, index := range t.indicies { + index.update(tx, before, after) + } +} + +func (t *table) updateIndicies(tx *Tx, before, after dbPtrValue) { + if before.IsNil() { + t.putIndicies(tx, after) + return + } + if after.IsNil() { + t.deleteIndicies(tx, before) + return + } + t.updateIndiciesRaw(tx, before, after) + return +} + +func (t *table) ReadTx(ta Transaction) error { + return t.db.ReadTx(ta) +} + +func (t *table) readTxHelper(ta Transaction, txs ...*Tx) error { + return t.db.readTxHelper(ta, txs...) +} + +func (t *table) WriteTx(ta Transaction) error { + return t.db.WriteTx(ta) +} + +func (t *table) writeTxHelper(ta Transaction, txs ...*Tx) error { + return t.db.writeTxHelper(ta, txs...) +} diff --git a/test.proto b/test.proto new file mode 100644 index 0000000..80f95f0 --- /dev/null +++ b/test.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package tdb; + +message TEST_Main { + uint64 id = 1; + string guarantee = 2; +} + +message TEST_OwnedBy { + uint64 id = 1; + uint64 mainId = 2; + string guarantee = 3; +} + +message TEST_ArrayHas { + uint64 id = 1; + repeated uint64 mainIds = 2; + string guarantee = 3; +} diff --git a/test.sh b/test.sh new file mode 100644 index 0000000..1e5771d --- /dev/null +++ b/test.sh @@ -0,0 +1,14 @@ +set -Eeuxo pipefail + +# ./tdb/stringy +go test -count=1 -v ./stringy + +# ./tdb +rm -f *.pb.go proto_test.go +PATH="${GOPATH}/bin:${PATH}" protoc -I="." --go_out="." test.proto +mv test.pb.go proto_test.go + +# go test -count=1 -v -run TestEmptyStringUniqueConstraint ./tdb +go test -count=1 -v . + +rm -f *.pb.go proto_test.go *.testdb diff --git a/transaction.go b/transaction.go new file mode 100644 index 0000000..ef30828 --- /dev/null +++ b/transaction.go @@ -0,0 +1,14 @@ +package tdb + +// import ( +// bolt "go.etcd.io/bbolt" +// ) + +// type Tx is defined in internals + +type Transaction func(*Tx) error + +type Transactable interface { + ReadTx(Transaction) error + WriteTx(Transaction) error +}