diff --git a/.gitignore b/.gitignore index 66fd13c..54f3fb7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,13 @@ +.idea +.DS_Store + # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib +data-endpoints # Test binary, built with `go test -c` *.test diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/config/naming.go b/config/naming.go new file mode 100644 index 0000000..9d99872 --- /dev/null +++ b/config/naming.go @@ -0,0 +1,58 @@ +package config + +import "github.com/iancoleman/strcase" + +type NamingConvention interface { + // ToCQLColumn converts a GraphQL/REST name to a CQL column name. + ToCQLColumn(name string) string + + // ToCQLColumn converts a GraphQL/REST name to a CQL table name. + ToCQLTable(name string) string + + // ToGraphQLField converts a CQL name (typically a column name) to a GraphQL field name. + ToGraphQLField(name string) string + + // ToGraphQLOperation converts a CQL name (typically a table name) to a GraphQL operation name. + ToGraphQLOperation(prefix string, name string) string + + // ToGraphQLType converts a CQL name (typically a table name) to a GraphQL type name. + ToGraphQLType(name string) string + + // ToGraphQLEnumValue converts a CQL name to a GraphQL enumeration value name. + ToGraphQLEnumValue(name string) string +} + +type defaultNaming struct{} + +// Default naming implementation. +var DefaultNaming = &defaultNaming{} + +func (n *defaultNaming) ToCQLColumn(name string) string { + // TODO: Fix numbers: "Table2" or "table2" --> "table_2" + return strcase.ToSnake(name) +} + +func (n *defaultNaming) ToCQLTable(name string) string { + // TODO: Fix numbers: "Table2" or "table2" --> "table_2" + return strcase.ToSnake(name) +} + +func (n *defaultNaming) ToGraphQLField(name string) string { + return strcase.ToLowerCamel(name) +} + +func (n *defaultNaming) ToGraphQLOperation(prefix string, name string) string { + if prefix == "" { + return strcase.ToLowerCamel(name) + } else { + return strcase.ToLowerCamel(prefix) + strcase.ToCamel(name) + } +} + +func (n *defaultNaming) ToGraphQLType(name string) string { + return strcase.ToCamel(name) +} + +func (n *defaultNaming) ToGraphQLEnumValue(name string) string { + return strcase.ToCamel(name) +} diff --git a/data.json b/data.json new file mode 100644 index 0000000..cd563ff --- /dev/null +++ b/data.json @@ -0,0 +1,14 @@ +{ + "1": { + "id": "1", + "name": "Dan" + }, + "2": { + "id": "2", + "name": "Lee" + }, + "3": { + "id": "3", + "name": "Nick" + } +} diff --git a/db/db.go b/db/db.go new file mode 100644 index 0000000..d1b2c1e --- /dev/null +++ b/db/db.go @@ -0,0 +1,56 @@ +package db + +import ( + "github.com/gocql/gocql" +) + +// Db represents a connection to a db +type Db struct { + session DbSession +} + +// NewDb Gets a pointer to a db +func NewDb(username string, password string, hosts ...string) (*Db, error) { + cluster := gocql.NewCluster(hosts...) + + if username != "" && password != "" { + cluster.Authenticator = gocql.PasswordAuthenticator{ + Username: username, + Password: password, + } + } + + var ( + session *gocql.Session + err error + ) + + if session, err = cluster.CreateSession(); err != nil { + return nil, err + } + + return &Db{ + session: &GoCqlSession{ref: session}, + }, nil +} + +// Keyspace Retrieves a keyspace +func (db *Db) Keyspace(keyspace string) (*gocql.KeyspaceMetadata, error) { + // We expose gocql types for now, we should wrap them in the future instead + return db.session.KeyspaceMetadata(keyspace) +} + +// Keyspaces Retrieves all the keyspace names +func (db *Db) Keyspaces() ([]string, error) { + iter, err := db.session.ExecuteIter("SELECT keyspace_name FROM system_schema.keyspaces", nil) + if err != nil { + return nil, err + } + + var keyspaces []string + for _, row := range iter.Values() { + keyspaces = append(keyspaces, *row["keyspace_name"].(*string)) + } + + return keyspaces, nil +} diff --git a/db/db_session.go b/db/db_session.go new file mode 100644 index 0000000..0eb3aee --- /dev/null +++ b/db/db_session.go @@ -0,0 +1,114 @@ +package db + +import ( + "encoding/hex" + "github.com/gocql/gocql" +) + +type QueryOptions struct { + UserOrRole string + Consistency gocql.Consistency +} + +func NewQueryOptions() *QueryOptions { + return &QueryOptions{ + Consistency: gocql.LocalOne, + } +} + +func (q *QueryOptions) WithUserOrRole(userOrRole string) *QueryOptions { + q.UserOrRole = userOrRole + return q +} + +func (q *QueryOptions) WithConsistency(userOrRole string) *QueryOptions { + q.UserOrRole = userOrRole + return q +} + +type DbSession interface { + // Execute executes a statement without returning row results + Execute(query string, options *QueryOptions, values ...interface{}) error + + // ExecuteIterSimple executes a statement and returns iterator to the result set + ExecuteIter(query string, options *QueryOptions, values ...interface{}) (ResultSet, error) + + //TODO: Extract metadata methods from interface into another interface + KeyspaceMetadata(keyspaceName string) (*gocql.KeyspaceMetadata, error) +} + +type ResultSet interface { + PageState() string + Values() []map[string]interface{} +} + +func (r *goCqlResultIterator) PageState() string { + return hex.EncodeToString(r.pageState) +} + +func (r *goCqlResultIterator) Values() []map[string]interface{} { + return r.values +} + +type goCqlResultIterator struct { + pageState []byte + values []map[string]interface{} +} + +func newResultIterator(iter *gocql.Iter) (*goCqlResultIterator, error) { + columns := iter.Columns() + scanner := iter.Scanner() + + items := make([]map[string]interface{}, 0) + + for scanner.Next() { + row, err := mapScan(scanner, columns) + if err != nil { + return nil, err + } + items = append(items, row) + } + + if err := iter.Close(); err != nil { + return nil, err + } + + return &goCqlResultIterator{ + pageState: iter.PageState(), + values: items, + }, nil +} + +type GoCqlSession struct { + ref *gocql.Session +} + +func (db *Db) Execute(query string, options *QueryOptions, values ...interface{}) (ResultSet, error) { + return db.session.ExecuteIter(query, options, values...) +} + +func (db *Db) ExecuteNoResult(query string, options *QueryOptions, values ...interface{}) error { + return db.session.Execute(query, options, values) +} + +func (session *GoCqlSession) Execute(query string, options *QueryOptions, values ...interface{}) error { + _, err := session.ExecuteIter(query, options, values...) + return err +} + +func (session *GoCqlSession) ExecuteIter(query string, options *QueryOptions, values ...interface{}) (ResultSet, error) { + q := session.ref.Query(query, values...) + if options != nil { + q.Consistency(options.Consistency) + if options.UserOrRole != "" { + q.CustomPayload(map[string][]byte{ + "ProxyExecute": []byte(options.UserOrRole), + }) + } + } + return newResultIterator(q.Iter()) +} + +func (session *GoCqlSession) KeyspaceMetadata(keyspaceName string) (*gocql.KeyspaceMetadata, error) { + return session.ref.KeyspaceMetadata(keyspaceName) +} diff --git a/db/keyspace.go b/db/keyspace.go new file mode 100644 index 0000000..d411b69 --- /dev/null +++ b/db/keyspace.go @@ -0,0 +1,31 @@ +package db + +import ( + "fmt" +) + +func (db *Db) CreateKeyspace(name string, dcReplicas map[string]int, options *QueryOptions) (bool, error) { + // TODO: Escape keyspace datacenter names? + dcs := "" + for name, replicas := range dcReplicas { + comma := "" + if len(dcs) > 0 { + comma = " ," + } + dcs += fmt.Sprintf("%s'%s': %d", comma, name, replicas) + } + + query := fmt.Sprintf("CREATE KEYSPACE %s WITH REPLICATION = { 'class': 'NetworkTopologyStrategy', %s }", name, dcs) + + err := db.session.Execute(query, options) + + return err == nil, err +} + +func (db *Db) DropKeyspace(name string, options *QueryOptions) (bool, error) { + // TODO: Escape keyspace name? + query := fmt.Sprintf("DROP KEYSPACE %s", name) + err := db.session.Execute(query, options) + + return err == nil, err +} diff --git a/db/query_generators.go b/db/query_generators.go new file mode 100644 index 0000000..c18cb08 --- /dev/null +++ b/db/query_generators.go @@ -0,0 +1,250 @@ +package db + +import ( + "errors" + "fmt" + "github.com/gocql/gocql" + "github.com/riptano/data-endpoints/types" + "reflect" + "strings" +) + +type SelectInfo struct { + Keyspace string + Table string + Where []types.ConditionItem + Options *types.QueryOptions + OrderBy []ColumnOrder +} + +type InsertInfo struct { + Keyspace string + Table string + Columns []string + QueryParams []interface{} + IfNotExists bool + TTL int +} + +type DeleteInfo struct { + Keyspace string + Table string + Columns []string + QueryParams []interface{} + IfCondition []types.ConditionItem + IfExists bool +} + +type UpdateInfo struct { + Keyspace string + Table *gocql.TableMetadata + Columns []string + QueryParams []interface{} + IfCondition []types.ConditionItem + IfExists bool + TTL int +} + +type ColumnOrder struct { + Column string + Order string +} + +func mapScan(scanner gocql.Scanner, columns []gocql.ColumnInfo) (map[string]interface{}, error) { + values := make([]interface{}, len(columns)) + + for i := range values { + typeInfo := columns[i].TypeInfo + switch typeInfo.Type() { + case gocql.TypeVarchar, gocql.TypeAscii, gocql.TypeInet, gocql.TypeText, + gocql.TypeBigInt, gocql.TypeCounter: + values[i] = new(*string) + case gocql.TypeBoolean: + values[i] = new(*bool) + case gocql.TypeFloat: + values[i] = new(*float32) + case gocql.TypeDouble: + values[i] = new(*float64) + case gocql.TypeInt: + values[i] = new(*int) + case gocql.TypeSmallInt: + values[i] = new(*int16) + case gocql.TypeTinyInt: + values[i] = new(*int8) + case gocql.TypeTimeUUID, gocql.TypeUUID: + values[i] = new(*gocql.UUID) + default: + values[i] = columns[i].TypeInfo.New() + } + } + + if err := scanner.Scan(values...); err != nil { + return nil, err + } + + mapped := make(map[string]interface{}, len(values)) + for i, column := range columns { + value := values[i] + switch column.TypeInfo.Type() { + case gocql.TypeVarchar, gocql.TypeAscii, gocql.TypeInet, gocql.TypeText, + gocql.TypeBigInt, gocql.TypeInt, gocql.TypeSmallInt, gocql.TypeTinyInt, + gocql.TypeCounter, gocql.TypeBoolean, + gocql.TypeTimeUUID, gocql.TypeUUID, + gocql.TypeFloat, gocql.TypeDouble: + value = reflect.Indirect(reflect.ValueOf(value)).Interface() + } + + mapped[column.Name] = value + } + + return mapped, nil +} + +func (db *Db) Select(info *SelectInfo, options *QueryOptions) (ResultSet, error) { + values := make([]interface{}, 0, len(info.Where)) + whereClause := buildCondition(info.Where, &values) + query := fmt.Sprintf("SELECT * FROM %s.%s WHERE %s", info.Keyspace, info.Table, whereClause) + + if len(info.OrderBy) > 0 { + query += " ORDER BY " + for i, order := range info.OrderBy { + if i > 0 { + query += ", " + } + query += order.Column + " " + order.Order + } + } + + if info.Options.Limit > 0 { + query += " LIMIT ?" + values = append(values, info.Options.Limit) + } + + return db.session.ExecuteIter(query, options, values...) +} + +func (db *Db) Insert(info *InsertInfo, options *QueryOptions) (*types.ModificationResult, error) { + + placeholders := "?" + for i := 1; i < len(info.Columns); i++ { + placeholders += ", ?" + } + + query := fmt.Sprintf( + "INSERT INTO %s.%s (%s) VALUES (%s)", + info.Keyspace, info.Table, strings.Join(info.Columns, ", "), placeholders) + + if info.IfNotExists { + query += " IF NOT EXISTS" + } + + if info.TTL >= 0 { + query += " USING TTL ?" + info.QueryParams = append(info.QueryParams, info.TTL) + } + + err := db.session.Execute(query, options, info.QueryParams...) + + return &types.ModificationResult{Applied: err == nil}, err +} + +func (db *Db) Delete(info *DeleteInfo, options *QueryOptions) (*types.ModificationResult, error) { + whereClause := buildWhereClause(info.Columns) + query := fmt.Sprintf("DELETE FROM %s.%s WHERE %s", info.Keyspace, info.Table, whereClause) + queryParameters := make([]interface{}, len(info.QueryParams)) + copy(queryParameters, info.QueryParams) + + if info.IfExists { + query += " IF EXISTS" + } else if len(info.IfCondition) > 0 { + query += " IF " + buildCondition(info.IfCondition, &queryParameters) + } + + err := db.session.Execute(query, options, queryParameters...) + return &types.ModificationResult{Applied: err == nil}, err +} + +func (db *Db) Update(info *UpdateInfo, options *QueryOptions) (*types.ModificationResult, error) { + // We have to differentiate between WHERE and SET clauses + setClause := "" + whereClause := "" + setParameters := make([]interface{}, 0, len(info.QueryParams)) + whereParameters := make([]interface{}, 0, len(info.QueryParams)) + + keys := make(map[string]bool) + for _, c := range info.Table.PartitionKey { + keys[c.Name] = true + } + for _, c := range info.Table.ClusteringColumns { + keys[c.Name] = true + } + + for i, columnName := range info.Columns { + if keys[columnName] { + whereClause += fmt.Sprintf(" AND %s = ?", columnName) + whereParameters = append(whereParameters, info.QueryParams[i]) + } else { + setClause += fmt.Sprintf(", %s = ?", columnName) + setParameters = append(setParameters, info.QueryParams[i]) + } + } + + if len(whereClause) == 0 { + return nil, errors.New("Partition and clustering keys must be included in query") + } + if len(setClause) == 0 { + return nil, errors.New("Query must include columns to update") + } + + queryParameters := make([]interface{}, 0, len(info.QueryParams)) + + ttl := "" + if info.TTL >= 0 { + ttl = " USING TTL ?" + queryParameters = append(queryParameters, info.TTL) + } + + for _, v := range setParameters { + queryParameters = append(queryParameters, v) + } + for _, v := range whereParameters { + queryParameters = append(queryParameters, v) + } + + // Remove the initial AND operator + whereClause = whereClause[5:] + // Remove the initial , operator + setClause = setClause[2:] + + query := fmt.Sprintf("UPDATE %s.%s%s SET %s WHERE %s", info.Keyspace, info.Table.Name, ttl, setClause, whereClause) + + if info.IfExists { + query += " IF EXISTS" + } else if len(info.IfCondition) > 0 { + query += " IF " + buildCondition(info.IfCondition, &queryParameters) + } + + err := db.session.Execute(query, options, queryParameters...) + return &types.ModificationResult{Applied: err == nil}, err +} + +func buildWhereClause(columnNames []string) string { + whereClause := columnNames[0] + " = ?" + for i := 1; i < len(columnNames); i++ { + whereClause += " AND " + columnNames[i] + " = ?" + } + return whereClause +} + +func buildCondition(condition []types.ConditionItem, queryParameters *[]interface{}) string { + conditionClause := "" + for _, item := range condition { + if conditionClause != "" { + conditionClause += " AND " + } + + conditionClause += fmt.Sprintf("%s %s ?", item.Column, item.Operator) + *queryParameters = append(*queryParameters, item.Value) + } + return conditionClause +} diff --git a/db/query_generators_test.go b/db/query_generators_test.go new file mode 100644 index 0000000..e4f0a23 --- /dev/null +++ b/db/query_generators_test.go @@ -0,0 +1,234 @@ +package db + +import ( + "github.com/gocql/gocql" + "github.com/riptano/data-endpoints/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "testing" +) + +func TestDeleteGeneration(t *testing.T) { + items := []struct { + columnNames []string + queryParams []interface{} + query string + ifExists bool + ifCondition []types.ConditionItem + }{ + {[]string{"a"}, []interface{}{"b"}, "DELETE FROM ks1.tbl1 WHERE a = ?", false, nil}, + {[]string{"a", "b"}, []interface{}{"A Value", 2}, "DELETE FROM ks1.tbl1 WHERE a = ? AND b = ?", false, nil}, + {[]string{"a"}, []interface{}{"b"}, "DELETE FROM ks1.tbl1 WHERE a = ? IF EXISTS", true, nil}, + {[]string{"a"}, []interface{}{"b"}, "DELETE FROM ks1.tbl1 WHERE a = ? IF c = ?", false, []types.ConditionItem{{"c", "=", "z"}}}, + } + + for _, item := range items { + sessionMock := SessionMock{} + db := &Db{ + session: &sessionMock, + } + + sessionMock.On("Execute", mock.Anything, mock.Anything, mock.Anything).Return(nil) + + _, err := db.Delete(&DeleteInfo{ + Keyspace: "ks1", + Table: "tbl1", + Columns: item.columnNames, + QueryParams: item.queryParams, + IfExists: item.ifExists, + IfCondition: item.ifCondition, + }, nil) + assert.Nil(t, err) + + expectedQueryParams := make([]interface{}, len(item.queryParams)) + copy(expectedQueryParams, item.queryParams) + + if len(item.ifCondition) > 0 { + for _, condition := range item.ifCondition { + expectedQueryParams = append(expectedQueryParams, condition.Value) + } + } + sessionMock.AssertCalled(t, "Execute", item.query, mock.Anything, expectedQueryParams) + sessionMock.AssertExpectations(t) + } +} + +func TestUpdateGeneration(t *testing.T) { + table := &gocql.TableMetadata{ + Name: "tbl1", + PartitionKey: []*gocql.ColumnMetadata{{Name: "pk1"}, {Name: "pk2"}}, + ClusteringColumns: []*gocql.ColumnMetadata{{Name: "ck1"}}, + } + + items := []struct { + columnNames []string + queryParams []interface{} + ifExists bool + ifCondition []types.ConditionItem + ttl int + query string + expectedParams []interface{} + }{ + {[]string{"ck1", "a", "b", "pk2", "pk1"}, []interface{}{1, 2, 3, 4, 5}, false, nil, -1, + "UPDATE ks1.tbl1 SET a = ?, b = ? WHERE ck1 = ? AND pk2 = ? AND pk1 = ?", []interface{}{2, 3, 1, 4, 5}}, + {[]string{"a", "ck1", "pk1", "pk2"}, []interface{}{1, 2, 3, 4}, true, nil, 60, + "UPDATE ks1.tbl1 USING TTL ? SET a = ? WHERE ck1 = ? AND pk1 = ? AND pk2 = ? IF EXISTS", + []interface{}{60, 1, 2, 3, 4}}, + {[]string{"a", "ck1", "pk1", "pk2"}, []interface{}{1, 2, 3, 4}, false, + []types.ConditionItem{{"c", ">", 100}}, -1, + "UPDATE ks1.tbl1 SET a = ? WHERE ck1 = ? AND pk1 = ? AND pk2 = ? IF c > ?", + []interface{}{1, 2, 3, 4, 100}}, + } + + for _, item := range items { + sessionMock := SessionMock{} + db := &Db{ + session: &sessionMock, + } + + sessionMock.On("Execute", mock.Anything, mock.Anything, mock.Anything).Return(nil) + + _, err := db.Update(&UpdateInfo{ + Keyspace: "ks1", + Table: table, + Columns: item.columnNames, + QueryParams: item.queryParams, + IfExists: item.ifExists, + IfCondition: item.ifCondition, + TTL: item.ttl, + }, nil) + assert.Nil(t, err) + + sessionMock.AssertCalled(t, "Execute", item.query, mock.Anything, item.expectedParams) + sessionMock.AssertExpectations(t) + } +} + +func TestInsertGeneration(t *testing.T) { + items := []struct { + columnNames []string + queryParams []interface{} + ttl int + ifNotExists bool + query string + }{ + {[]string{"a"}, []interface{}{100}, -1, false, "INSERT INTO ks1.tbl1 (a) VALUES (?)"}, + {[]string{"a", "b"}, []interface{}{100, 2}, -1, false, "INSERT INTO ks1.tbl1 (a, b) VALUES (?, ?)"}, + {[]string{"a"}, []interface{}{100}, -1, true, "INSERT INTO ks1.tbl1 (a) VALUES (?) IF NOT EXISTS"}, + {[]string{"a"}, []interface{}{"z"}, 3600, true, + "INSERT INTO ks1.tbl1 (a) VALUES (?) IF NOT EXISTS USING TTL ?"}, + } + + for _, item := range items { + sessionMock := SessionMock{} + db := &Db{ + session: &sessionMock, + } + + sessionMock.On("Execute", mock.Anything, mock.Anything, mock.Anything).Return(nil) + + expectedQueryParams := make([]interface{}, len(item.queryParams)) + copy(expectedQueryParams, item.queryParams) + + if item.ttl >= 0 { + expectedQueryParams = append(expectedQueryParams, item.ttl) + } + + _, err := db.Insert(&InsertInfo{ + Keyspace: "ks1", + Table: "tbl1", + Columns: item.columnNames, + QueryParams: item.queryParams, + TTL: item.ttl, + IfNotExists: item.ifNotExists, + }, nil) + assert.Nil(t, err) + sessionMock.AssertCalled(t, "Execute", item.query, mock.Anything, expectedQueryParams) + sessionMock.AssertExpectations(t) + } +} + +func TestSelectGeneration(t *testing.T) { + resultMock := &ResultMock{} + resultMock. + On("PageState").Return(""). + On("Values").Return([]map[string]interface{}{}, nil) + + items := []struct { + where []types.ConditionItem + options *types.QueryOptions + orderBy []ColumnOrder + query string + }{ + {[]types.ConditionItem{{"a", "=", 1}}, &types.QueryOptions{}, nil, + "SELECT * FROM ks1.tbl1 WHERE a = ?"}, + {[]types.ConditionItem{{"a", "=", 1}, {"b", ">", 2}}, &types.QueryOptions{}, nil, + "SELECT * FROM ks1.tbl1 WHERE a = ? AND b > ?"}, + {[]types.ConditionItem{{"a", "=", 1}, {"b", ">", 2}, {"b", "<=", 5}}, &types.QueryOptions{}, nil, + "SELECT * FROM ks1.tbl1 WHERE a = ? AND b > ? AND b <= ?"}, + {[]types.ConditionItem{{"a", "=", 1}}, &types.QueryOptions{}, []ColumnOrder{{"c", "DESC"}}, + "SELECT * FROM ks1.tbl1 WHERE a = ? ORDER BY c DESC"}, + {[]types.ConditionItem{{"a", "=", "z"}}, &types.QueryOptions{Limit: 1}, []ColumnOrder{{"c", "ASC"}}, + "SELECT * FROM ks1.tbl1 WHERE a = ? ORDER BY c ASC LIMIT ?"}, + } + + for _, item := range items { + sessionMock := SessionMock{} + db := &Db{ + session: &sessionMock, + } + sessionMock.On("ExecuteIter", mock.Anything, mock.Anything, mock.Anything).Return(resultMock, nil) + queryParams := make([]interface{}, 0) + + for _, v := range item.where { + queryParams = append(queryParams, v.Value) + } + + if item.options != nil && item.options.Limit > 0 { + queryParams = append(queryParams, item.options.Limit) + } + + _, err := db.Select(&SelectInfo{ + Keyspace: "ks1", + Table: "tbl1", + Where: item.where, + Options: item.options, + OrderBy: item.orderBy, + }, nil) + assert.Nil(t, err) + sessionMock.AssertCalled(t, "ExecuteIter", item.query, mock.Anything, queryParams) + sessionMock.AssertExpectations(t) + } +} + +type SessionMock struct { + mock.Mock +} + +func (o *SessionMock) Execute(query string, options *QueryOptions, values ...interface{}) error { + args := o.Called(query, options, values) + return args.Error(0) +} + +func (o *SessionMock) ExecuteIter(query string, options *QueryOptions, values ...interface{}) (ResultSet, error) { + args := o.Called(query, options, values) + return args.Get(0).(ResultSet), args.Error(1) +} + +func (o *SessionMock) KeyspaceMetadata(keyspaceName string) (*gocql.KeyspaceMetadata, error) { + args := o.Called(keyspaceName) + return args.Get(0).(*gocql.KeyspaceMetadata), args.Error(1) +} + +type ResultMock struct { + mock.Mock +} + +func (o ResultMock) PageState() string { + return o.Called().String(0) +} + +func (o ResultMock) Values() []map[string]interface{} { + args := o.Called() + return args.Get(0).([]map[string]interface{}) +} diff --git a/db/table.go b/db/table.go new file mode 100644 index 0000000..47882ef --- /dev/null +++ b/db/table.go @@ -0,0 +1,73 @@ +package db + +import ( + "fmt" + "github.com/gocql/gocql" +) + +type CreateTableInfo struct { + Keyspace string + Table string + PartitionKeys []*gocql.ColumnMetadata + ClusteringKeys []*gocql.ColumnMetadata + Values []*gocql.ColumnMetadata +} + +type DropTableInfo struct { + Keyspace string + Table string +} + +func (db *Db) CreateTable(info* CreateTableInfo, options *QueryOptions) (bool, error) { + + columns := "" + primaryKeys := "" + clusteringOrder := "" + + for _, c := range info.PartitionKeys { + columns += fmt.Sprintf("%s %s, ", c.Name, c.Type) + if len(primaryKeys) > 0 { + primaryKeys += ", " + } + primaryKeys += c.Name + } + + if info.ClusteringKeys != nil { + primaryKeys = fmt.Sprintf("(%s)", primaryKeys) + + for _, c := range info.ClusteringKeys { + columns += fmt.Sprintf("%s %s, ", c.Name, c.Type) + primaryKeys += fmt.Sprintf(", %s", c.Name) + if len(clusteringOrder) > 0 { + clusteringOrder += ", " + } + order := c.ClusteringOrder + if order == "" { + order = "ASC" + } + clusteringOrder += fmt.Sprintf("%s %s", c.Name, order) + } + } + + if info.Values != nil { + for _, c := range info.Values { + columns += fmt.Sprintf("%s %s, ", c.Name, c.Type) + } + } + + query := fmt.Sprintf("CREATE TABLE %s.%s (%sPRIMARY KEY (%s))", info.Keyspace, info.Table, columns, primaryKeys) + + if clusteringOrder != "" { + query += fmt.Sprintf(" WITH CLUSTERING ORDER BY (%s)", clusteringOrder) + } + + err := db.session.Execute(query, options) + return err == nil, err +} + +func (db *Db) DropTable(info* DropTableInfo, options *QueryOptions) (bool, error) { + // TODO: Escape keyspace/table name? + query := fmt.Sprintf("DROP TABLE %s.%s", info.Table, info.Keyspace) + err := db.session.Execute(query, options) + return err == nil, err +} diff --git a/docs/GraphQL.md b/docs/GraphQL.md index 0ebfdc3..3f0c917 100644 --- a/docs/GraphQL.md +++ b/docs/GraphQL.md @@ -65,7 +65,7 @@ type SomeType { `blob` (byte array) -`boolean` +`boolean` `counter` (signed 64-bit integer) @@ -99,7 +99,7 @@ type SomeType { | `double` | `Float` | | `decimal` | `String` | | `date` | `String` of the form `"yyyy-mm-dd"` | -| `duration` | ? | +| `duration` | `String` in ISO 8601 formats or digits and units, for example: `P21Y5M`, `12h30m`, `33us1ns` | | `time` | `String` of the form `"hh:mm:ss[.fff]"` | | `timestamp`| `String` of the form `"yyyy-mm-dd [hh:MM:ss[.fff]][+/-NNNN]"` | | `uuid` | `String` of the form `"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"` where x are hex digits `[a-f0-9]` | @@ -107,7 +107,7 @@ type SomeType { | `blob` | `String` of base64 encoded data | | `boolean` | `Boolean` | | `counter` | `String` | -| `inet` | `String` | +| `inet` | `String` | One possible idea here is to "tag" types using objects. `bigint` and other similar CQL specific types could be wrapped in a type instead of using just @@ -222,7 +222,7 @@ type Mutation { ### Table management and CRUD operations Table management would live in under each keyspace directory along with the -tables CRUD operations. +tables CRUD operations. I think it makes sense to have all the tables CRUD operations live in a combined keyspace GraphQL schema so that result can be combined together in different @@ -252,7 +252,7 @@ type Column { type Table { name: String! keyspaceName: String! - primaryKey: [Column]! + partitionKey: [Column]! clusteringKey: [Column] values: [Column] # ... @@ -270,7 +270,7 @@ type Query { } type Mutation { - createTable(name: String!, primaryKey: [Column]!, clusteringKey: [Column], values: [Column]): Table + createTable(name: String!, partitionKey: [Column]!, clusteringKey: [Column], values: [Column]): Table dropTable(name: String!) # Do we put the CRUD operations here? @@ -287,23 +287,23 @@ camelcase) simultaneously. Path: `/graphql/cycling` ```cql -CREATE KEYSPACE cycling - WITH REPLICATION = { - 'class' : 'NetworkTopologyStrategy', - 'datacenter1' : 1 +CREATE KEYSPACE cycling + WITH REPLICATION = { + 'class' : 'NetworkTopologyStrategy', + 'datacenter1' : 1 } ; -CREATE TABLE cycling.cyclist_name ( - id UUID PRIMARY KEY, - lastname text, +CREATE TABLE cycling.cyclist_name ( + id UUID PRIMARY KEY, + lastname text, firstname text ); -CREATE TABLE cycling.cyclist_category ( - category text, - points int, - id UUID, - lastname text, - PRIMARY KEY (category, points)) +CREATE TABLE cycling.cyclist_category ( + category text, + points int, + id UUID, + lastname text, + PRIMARY KEY (category, points)) ``` ```graphql @@ -330,7 +330,7 @@ schema { # aggregates (Maybe the can be their own query: `countCyclistName()`?). # We can potentially handle optional clustering filters by making them -# non-nullable parameters? +# non-nullable parameters? # Things to think about: # * Expression @@ -343,7 +343,7 @@ schema { # * Static filters type Query { - cyclistName(id: String!): CyclistName + cyclistName(id: String!): CyclistName cyclistCategory(category: String!, points: Int!): CyclistCategory } diff --git a/endpoint/endpoint.go b/endpoint/endpoint.go new file mode 100644 index 0000000..d35bc48 --- /dev/null +++ b/endpoint/endpoint.go @@ -0,0 +1,47 @@ +package endpoint // TODO: Change package name? + +import ( + "github.com/riptano/data-endpoints/config" + "github.com/riptano/data-endpoints/db" + "github.com/riptano/data-endpoints/graphql" + "time" +) + +type DataEndpointConfig struct { + DbHosts []string + DbUsername string + DbPassword string + ExcludedKeyspaces []string + SchemaUpdateInterval time.Duration + Naming config.NamingConvention +} + +type DataEndpoint struct { + graphQLRouteGen *graphql.RouteGenerator +} + +func NewEndpointConfig(hosts ...string) *DataEndpointConfig { + return &DataEndpointConfig{ + DbHosts: hosts, + SchemaUpdateInterval: 10 * time.Second, + Naming: config.DefaultNaming, + } +} + +func (cfg *DataEndpointConfig) NewEndpoint() (*DataEndpoint, error) { + dbClient, err := db.NewDb(cfg.DbUsername, cfg.DbPassword, cfg.DbHosts...) + if err != nil { + return nil, err + } + return &DataEndpoint{ + graphQLRouteGen: graphql.NewRouteGenerator(dbClient, cfg.ExcludedKeyspaces, cfg.SchemaUpdateInterval, cfg.Naming), + }, nil +} + +func (e *DataEndpoint) RoutesGraphQL(pattern string) ([]graphql.Route, error) { + return e.graphQLRouteGen.Routes(pattern) +} + +func (e *DataEndpoint) RoutesKeyspaceGraphQL(pattern string, ksName string) ([]graphql.Route, error) { + return e.graphQLRouteGen.RoutesKeyspace(pattern, ksName) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..ca4f354 --- /dev/null +++ b/go.mod @@ -0,0 +1,15 @@ +module github.com/riptano/data-endpoints + +go 1.13 + +require ( + github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb + github.com/graphql-go/graphql v0.7.9 + github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 + github.com/julienschmidt/httprouter v1.3.0 + github.com/mitchellh/mapstructure v1.1.2 + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.5.1 + gopkg.in/inf.v0 v0.9.1 + gopkg.in/yaml.v2 v2.2.8 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..74456d6 --- /dev/null +++ b/go.sum @@ -0,0 +1,45 @@ +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb h1:H3tisfjQwq9FTyWqlKsZpgoYrsvn2pmTWvAiDHa5pho= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/graphql-go/graphql v0.7.9 h1:5Va/Rt4l5g3YjwDnid3vFfn43faaQBq7rMcIZ0VnV34= +github.com/graphql-go/graphql v0.7.9/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8= +github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/graphql/keyspace.go b/graphql/keyspace.go new file mode 100644 index 0000000..d9a87dc --- /dev/null +++ b/graphql/keyspace.go @@ -0,0 +1,181 @@ +package graphql + +import ( + "fmt" + "github.com/gocql/gocql" + "github.com/graphql-go/graphql" + "github.com/riptano/data-endpoints/db" + "os" + "strconv" + "strings" +) + +var dataCenterType = graphql.NewObject(graphql.ObjectConfig{ + Name: "DataCenter", + Fields: graphql.Fields{ + "name": &graphql.Field{ + Type: graphql.NewNonNull(graphql.String), + }, + "replicas": &graphql.Field{ + Type: graphql.NewNonNull(graphql.Int), + }, + }, +}) + +var dataCenterInput = graphql.NewInputObject(graphql.InputObjectConfig{ + Name: "DataCenterInput", + Fields: graphql.InputObjectConfigFieldMap{ + "name": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + "replicas": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.Int), + }, + }, +}) + +var keyspaceType = graphql.NewObject(graphql.ObjectConfig{ + Name: "Keyspace", + Fields: graphql.Fields{ + "name": &graphql.Field{ + Type: graphql.NewNonNull(graphql.String), + }, + "dcs": &graphql.Field{ + Type: graphql.NewList(dataCenterType), + }, + }, +}) + +func (sg *SchemaGenerator) BuildKeyspaceSchema() (graphql.Schema, error) { + return graphql.NewSchema( + graphql.SchemaConfig{ + Query: sg.buildKeyspaceQuery(), + Mutation: sg.buildKeyspaceMutation(), + }) +} + +type dataCenterValue struct { + Name string `json:"name"` + Replicas int `json:"replicas"` +} + +type ksValue struct { + Name string `json:"name"` + DCs []dataCenterValue `json:"dcs"` +} + +func buildKeyspaceValue(keyspace *gocql.KeyspaceMetadata) ksValue { + dcs := make([]dataCenterValue, 0) + if strings.Contains(keyspace.StrategyClass, "NetworkTopologyStrategy") { + for dc, replicas := range keyspace.StrategyOptions { + count, err := strconv.Atoi(replicas.(string)) + if err != nil { + // TODO: We need logging + fmt.Fprintf(os.Stderr, "invalid replicas value ('%s') for keyspace '%s'\n", replicas, keyspace.Name) + continue + } + dcs = append(dcs, dataCenterValue{ + Name: dc, + Replicas: count, + }) + } + } + return ksValue{keyspace.Name, dcs} +} + +func (sg *SchemaGenerator) buildKeyspaceQuery() *graphql.Object { + return graphql.NewObject(graphql.ObjectConfig{ + Name: "KeyspaceQuery", + Fields: graphql.Fields{ + "keyspace": &graphql.Field{ + Type: keyspaceType, + Args: graphql.FieldConfigArgument{ + "name": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + }, + Resolve: func(params graphql.ResolveParams) (interface{}, error) { + ksName := params.Args["name"].(string) + keyspace, err := sg.dbClient.Keyspace(ksName) + if err != nil { + return nil, err + } + + return buildKeyspaceValue(keyspace), nil + }, + }, + "keyspaces": &graphql.Field{ + Type: graphql.NewList(keyspaceType), + Resolve: func(params graphql.ResolveParams) (interface{}, error) { + ksNames, err := sg.dbClient.Keyspaces() + if err != nil { + return nil, err + } + + ksValues := make([]ksValue, 0) + for _, ksName := range ksNames { + keyspace, err := sg.dbClient.Keyspace(ksName) + if err != nil { + return nil, err + } + ksValues = append(ksValues, buildKeyspaceValue(keyspace)) + } + + return ksValues, nil + }, + }, + }, + }) +} + +func (sg *SchemaGenerator) buildKeyspaceMutation() *graphql.Object { + return graphql.NewObject(graphql.ObjectConfig{ + Name: "KeyspaceMutation", + Fields: graphql.Fields{ + "createKeyspace": &graphql.Field{ + Type: graphql.Boolean, + Args: graphql.FieldConfigArgument{ + "name": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + "dcs": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.NewList(dataCenterInput)), + }, + }, + Resolve: func(params graphql.ResolveParams) (interface{}, error) { + ksName := params.Args["name"].(string) + dcs := params.Args["dcs"].([]interface{}) + + dcReplicas := make(map[string]int) + for _, dc := range dcs { + dcReplica := dc.(map[string]interface{}) + dcReplicas[dcReplica["name"].(string)] = dcReplica["replicas"].(int) + } + + userOrRole, err := checkAuthUserOrRole(params) + if err != nil { + return nil, err + } + return sg.dbClient.CreateKeyspace(ksName, dcReplicas, db.NewQueryOptions().WithUserOrRole(userOrRole)) + }, + }, + "dropKeyspace": &graphql.Field{ + Type: graphql.Boolean, + Args: graphql.FieldConfigArgument{ + "name": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + }, + Resolve: func(params graphql.ResolveParams) (interface{}, error) { + ksName := params.Args["name"].(string) + + userOrRole, err := checkAuthUserOrRole(params) + if err != nil { + return nil, err + } + return sg.dbClient.DropKeyspace(ksName, db.NewQueryOptions().WithUserOrRole(userOrRole)) + }, + }, + }, + }) +} diff --git a/graphql/keyspace_schema.go b/graphql/keyspace_schema.go new file mode 100644 index 0000000..6fc45d3 --- /dev/null +++ b/graphql/keyspace_schema.go @@ -0,0 +1,162 @@ +package graphql + +import ( + "fmt" + "github.com/gocql/gocql" + "github.com/graphql-go/graphql" + "github.com/riptano/data-endpoints/config" + "log" +) + +type KeyspaceGraphQLSchema struct { + // A set of ignored tables + ignoredTables map[string]bool + // A map containing the table type by table name, with each column as scalar value + tableValueTypes map[string]*graphql.Object + // A map containing the table input type by table name, with each column as scalar value + tableScalarInputTypes map[string]*graphql.InputObject + // A map containing the table type by table name, with each column as input filter + tableOperatorInputTypes map[string]*graphql.InputObject + // A map containing the result type by table name for a select query + resultSelectTypes map[string]*graphql.Object + // A map containing the result type by table name for a update/insert/delete query + resultUpdateTypes map[string]*graphql.Object + // A map containing the order enum by table name + orderEnums map[string]*graphql.Enum +} + +var inputQueryOptions = graphql.NewInputObject(graphql.InputObjectConfig{ + Name: "QueryOptions", + Fields: graphql.InputObjectConfigFieldMap{ + "limit": {Type: graphql.Int}, + "pageSize": {Type: graphql.Int}, + "pageState": {Type: graphql.String}, + }, +}) + +var inputMutationOptions = graphql.NewInputObject(graphql.InputObjectConfig{ + Name: "UpdateOptions", + Fields: graphql.InputObjectConfigFieldMap{ + "ttl": {Type: graphql.Int}, + }, +}) + +func (s *KeyspaceGraphQLSchema) BuildTypes(keyspace *gocql.KeyspaceMetadata, naming config.NamingConvention) error { + s.buildOrderEnums(keyspace, naming) + s.buildTableTypes(keyspace, naming) + s.buildResultTypes(keyspace, naming) + return nil +} + +func (s *KeyspaceGraphQLSchema) buildOrderEnums(keyspace *gocql.KeyspaceMetadata, naming config.NamingConvention) { + s.orderEnums = make(map[string]*graphql.Enum, len(keyspace.Tables)) + for _, table := range keyspace.Tables { + values := make(map[string]*graphql.EnumValueConfig, len(table.Columns)) + for _, column := range table.Columns { + values[naming.ToGraphQLEnumValue(column.Name)+"_ASC"] = &graphql.EnumValueConfig{ + Value: column.Name + "_ASC", + Description: fmt.Sprintf("Order %s by %s in a scending order", table.Name, column.Name), + } + values[naming.ToGraphQLEnumValue(column.Name)+"_DESC"] = &graphql.EnumValueConfig{ + Value: column.Name + "_DESC", + Description: fmt.Sprintf("Order %s by %s in descending order", table.Name, column.Name), + } + } + + s.orderEnums[table.Name] = graphql.NewEnum(graphql.EnumConfig{ + Name: naming.ToGraphQLType(table.Name + "Order"), + Values: values, + }) + } +} + +func (s *KeyspaceGraphQLSchema) buildTableTypes(keyspace *gocql.KeyspaceMetadata, naming config.NamingConvention) { + s.tableValueTypes = make(map[string]*graphql.Object, len(keyspace.Tables)) + s.tableScalarInputTypes = make(map[string]*graphql.InputObject, len(keyspace.Tables)) + s.tableOperatorInputTypes = make(map[string]*graphql.InputObject, len(keyspace.Tables)) + + for _, table := range keyspace.Tables { + fields := graphql.Fields{} + inputFields := graphql.InputObjectConfigFieldMap{} + inputOperatorFields := graphql.InputObjectConfigFieldMap{} + var err error + + for name, column := range table.Columns { + var fieldType graphql.Output + fieldName := naming.ToGraphQLField(name) + fieldType, err = buildType(column.Type) + if err != nil { + log.Println(err) + break + } + + fields[fieldName] = &graphql.Field{Type: fieldType} + inputFields[fieldName] = &graphql.InputObjectFieldConfig{Type: fieldType} + + t := operatorsInputTypes[column.Type.Type()] + if t == nil { + // Exit: this is a bug as no operator type was defined for a type + log.Fatalf("No operator input type found for %s", column.Type.Type()) + } + + inputOperatorFields[fieldName] = &graphql.InputObjectFieldConfig{ + Type: t, + } + } + + if err != nil { + log.Printf("Ignoring table %s", table.Name) + s.ignoredTables[table.Name] = true + err = nil + continue + } + + s.tableValueTypes[table.Name] = graphql.NewObject(graphql.ObjectConfig{ + Name: naming.ToGraphQLType(table.Name), + Fields: fields, + }) + + s.tableScalarInputTypes[table.Name] = graphql.NewInputObject(graphql.InputObjectConfig{ + Name: naming.ToGraphQLType(table.Name) + "Input", + Fields: inputFields, + }) + + s.tableOperatorInputTypes[table.Name] = graphql.NewInputObject(graphql.InputObjectConfig{ + Name: naming.ToGraphQLType(table.Name) + "FilterInput", + Fields: inputOperatorFields, + }) + } +} + +func (s *KeyspaceGraphQLSchema) buildResultTypes(keyspace *gocql.KeyspaceMetadata, naming config.NamingConvention) { + s.resultSelectTypes = make(map[string]*graphql.Object, len(keyspace.Tables)) + s.resultUpdateTypes = make(map[string]*graphql.Object, len(keyspace.Tables)) + + for _, table := range keyspace.Tables { + if s.ignoredTables[table.Name] { + continue + } + + itemType, ok := s.tableValueTypes[table.Name] + + if !ok { + panic(fmt.Sprintf("Table value type for table '%s' not found", table.Name)) + } + + s.resultSelectTypes[table.Name] = graphql.NewObject(graphql.ObjectConfig{ + Name: naming.ToGraphQLType(table.Name + "Result"), + Fields: graphql.Fields{ + "pageState": {Type: graphql.String}, + "values": {Type: graphql.NewList(graphql.NewNonNull(itemType))}, + }, + }) + + s.resultUpdateTypes[table.Name] = graphql.NewObject(graphql.ObjectConfig{ + Name: naming.ToGraphQLType(table.Name + "MutationResult"), + Fields: graphql.Fields{ + "applied": {Type: graphql.NewNonNull(graphql.Boolean)}, + "value": {Type: itemType}, + }, + }) + } +} diff --git a/graphql/operators_input_types.go b/graphql/operators_input_types.go new file mode 100644 index 0000000..fd4c8d4 --- /dev/null +++ b/graphql/operators_input_types.go @@ -0,0 +1,52 @@ +package graphql + +import ( + "github.com/gocql/gocql" + "github.com/graphql-go/graphql" +) + +var stringOperatorType = operatorType(graphql.String) +var intOperatorType = operatorType(graphql.Int) +var floatOperatorType = operatorType(graphql.Float) + +// cqlOperators contains the CQL operator for a given "graphql" operator +var cqlOperators = map[string]string{ + "eq": "=", + "notEq": "!=", + "gt": ">", + "gte": ">=", + "lt": "<", + "lte": "<=", + "in": "IN", +} + +var operatorsInputTypes = map[gocql.Type]*graphql.InputObject{ + gocql.TypeInt: intOperatorType, + gocql.TypeTinyInt: intOperatorType, + gocql.TypeSmallInt: intOperatorType, + gocql.TypeText: stringOperatorType, + gocql.TypeVarchar: stringOperatorType, + gocql.TypeFloat: floatOperatorType, + gocql.TypeDouble: floatOperatorType, + gocql.TypeUUID: operatorType(uuid), + gocql.TypeTimestamp: operatorType(timestamp), + gocql.TypeTimeUUID: operatorType(timeuuid), + gocql.TypeInet: operatorType(ip), + gocql.TypeBigInt: operatorType(bigint), + gocql.TypeDecimal: operatorType(decimal), +} + +func operatorType(graphqlType graphql.Type) *graphql.InputObject { + return graphql.NewInputObject(graphql.InputObjectConfig{ + Name: graphqlType.Name() + "FilterInput", + Fields: graphql.InputObjectConfigFieldMap{ + "eq": {Type: graphqlType}, + "notEq": {Type: graphqlType}, + "gt": {Type: graphqlType}, + "gte": {Type: graphqlType}, + "lt": {Type: graphqlType}, + "lte": {Type: graphqlType}, + "in": {Type: graphql.NewList(graphqlType)}, + }, + }) +} diff --git a/graphql/routes.go b/graphql/routes.go new file mode 100644 index 0000000..e65c5b1 --- /dev/null +++ b/graphql/routes.go @@ -0,0 +1,154 @@ +package graphql + +import ( + "context" + "encoding/json" + "fmt" + "github.com/graphql-go/graphql" + "github.com/riptano/data-endpoints/config" + "github.com/riptano/data-endpoints/db" + "net/http" + "path" + "time" +) + +var systemKeyspaces = []string{ + "system", "system_auth", "system_distributed", "system_schema", "system_traces", "system_views", "system_virtual_schema", + "dse_insights", "dse_insights_local", "dse_leases", "dse_perf", "dse_security", "dse_system", "dse_system_local", + "solr_admin", +} + +type executeQueryFunc func(query string, ctx context.Context) *graphql.Result + +type RouteGenerator struct { + dbClient *db.Db + ksExcluded []string + updateInterval time.Duration + schemaGen *SchemaGenerator +} + +type Route struct { + Method string + Pattern string + HandlerFunc http.HandlerFunc +} + +type Config struct { + ksExcluded []string +} + +type requestBody struct { + Query string `json:"query"` +} + +func NewRouteGenerator(dbClient *db.Db, ksExcluded []string, updateInterval time.Duration, naming config.NamingConvention) *RouteGenerator { + return &RouteGenerator{ + dbClient: dbClient, + ksExcluded: ksExcluded, + updateInterval: updateInterval, + schemaGen: NewSchemaGenerator(dbClient, naming), + } +} + +func (rg *RouteGenerator) Routes(prefixPattern string) ([]Route, error) { + ksNames, err := rg.dbClient.Keyspaces() + if err != nil { + return nil, fmt.Errorf("unable to retrieve keyspace names: %s", err) + } + + routes := make([]Route, 0, len(ksNames)+1) + + ksManageRoutes, err := rg.RoutesKeyspaceManagement(prefixPattern) + if err != nil { + return nil, err + } + routes = append(routes, ksManageRoutes...) + + for _, ksName := range ksNames { + if isKeyspaceExcluded(ksName, systemKeyspaces) || isKeyspaceExcluded(ksName, rg.ksExcluded) { + continue + } + ksRoutes, err := rg.RoutesKeyspace(path.Join(prefixPattern, ksName), ksName) + if err != nil { + return nil, err + } + routes = append(routes, ksRoutes...) + } + + return routes, nil +} + +func (rg *RouteGenerator) RoutesKeyspaceManagement(pattern string) ([]Route, error) { + schema, err := rg.schemaGen.BuildKeyspaceSchema() + if err != nil { + return nil, fmt.Errorf("unable to build graphql schema for keyspace management: %s", err) + } + return routesForSchema(pattern, func(query string, ctx context.Context) *graphql.Result { + return executeQuery(query, ctx, schema) + }), nil +} + +func (rg *RouteGenerator) RoutesKeyspace(pattern string, ksName string) ([]Route, error) { + updater, err := NewUpdater(rg.schemaGen, ksName, rg.updateInterval) + if err != nil { + return nil, fmt.Errorf("unable to build graphql schema for keyspace '%s': %s", ksName, err) + } + go updater.Start() + return routesForSchema(pattern, func(query string, ctx context.Context) *graphql.Result { + return executeQuery(query, ctx, *updater.Schema()) + }), nil +} + +func isKeyspaceExcluded(ksName string, ksExcluded []string) bool { + for _, excluded := range ksExcluded { + if ksName == excluded { + return true + } + } + return false +} + +func routesForSchema(pattern string, execute executeQueryFunc) []Route { + return []Route{ + { + Method: http.MethodGet, + Pattern: pattern, + HandlerFunc: func(w http.ResponseWriter, r *http.Request) { + result := execute(r.URL.Query().Get("query"), r.Context()) + json.NewEncoder(w).Encode(result) + }, + }, + { + Method: http.MethodPost, + Pattern: pattern, + HandlerFunc: func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + http.Error(w, "No request body", 400) + return + } + + var body requestBody + err := json.NewDecoder(r.Body).Decode(&body) + if err != nil { + http.Error(w, "Request body is invalid", 400) + return + } + + result := execute(body.Query, r.Context()) + json.NewEncoder(w).Encode(result) + }, + }, + } +} + +func executeQuery(query string, ctx context.Context, schema graphql.Schema) *graphql.Result { + result := graphql.Do(graphql.Params{ + Schema: schema, + RequestString: query, + Context: ctx, + }) + if len(result.Errors) > 0 { + fmt.Printf("wrong result, unexpected errors: %v", result.Errors) + } + return result +} diff --git a/graphql/scalars.go b/graphql/scalars.go new file mode 100644 index 0000000..246ed31 --- /dev/null +++ b/graphql/scalars.go @@ -0,0 +1,186 @@ +package graphql + +import ( + "encoding" + "github.com/gocql/gocql" + "github.com/graphql-go/graphql" + "github.com/graphql-go/graphql/language/ast" + "gopkg.in/inf.v0" + "net" + "strconv" + "time" +) + +var timestamp = newStringScalar( + "Timestamp", "The `Timestamp` scalar type represents a DateTime."+ + " The Timestamp is serialized as an RFC 3339 quoted string", + serializeTimestamp, + deserializeTimestamp) + +var uuid = newStringScalar( + "Uuid", "The `Uuid` scalar type represents a CQL uuid as a string.", serializeUuid, deserializeUuid) + +var timeuuid = newStringScalar( + "TimeUuid", "The `TimeUuid` scalar type represents a CQL timeuuid as a string.", serializeUuid, deserializeUuid) + +var ip = newStringScalar( + "Inet", "The `Inet` scalar type represents a CQL inet as a string.", serializeIp, deserializeIp) + +var bigint = newStringScalar( + "BigInt", "The `BigInt` scalar type represents a CQL bigint (64-bit signed integer) as a string.", + serializeBigInt, deserializeBigInt) + +var decimal = newStringScalar( + "Decimal", "The `Decimal` scalar type represents a CQL decimal as a string.", + serializeDecimal, deserializeDecimal) + +func newStringScalar( + name string, description string, serializeFn graphql.SerializeFn, deserializeFn graphql.ParseValueFn, +) *graphql.Scalar { + return graphql.NewScalar(graphql.ScalarConfig{ + Name: name, + Description: description, + Serialize: serializeFn, + ParseValue: deserializeFn, + ParseLiteral: parseLiteralFromStringHandler(deserializeFn), + }) +} + +var deserializeUuid = deserializeFromUnmarshaler(func() encoding.TextUnmarshaler { + return &gocql.UUID{} +}) + +var deserializeTimestamp = deserializeFromUnmarshaler(func() encoding.TextUnmarshaler { + return &time.Time{} +}) + +var deserializeIp = deserializeFromUnmarshaler(func() encoding.TextUnmarshaler { + return &net.IP{} +}) + +var deserializeDecimal = deserializeFromUnmarshaler(func() encoding.TextUnmarshaler { + return &inf.Dec{} +}) + +func parseLiteralFromStringHandler(parser graphql.ParseValueFn) graphql.ParseLiteralFn { + return func(valueAST ast.Value) interface{} { + switch valueAST := valueAST.(type) { + case *ast.StringValue: + return parser(valueAST.Value) + } + return nil + } +} + +func deserializeBigInt(value interface{}) interface{} { + switch value := value.(type) { + case []byte: + return deserializeBigInt(string(value)) + case string: + intValue, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil + } + return intValue + case *string: + if value == nil { + return nil + } + return deserializeBigInt(*value) + default: + return nil + } +} + +func deserializeFromUnmarshaler(factory func() encoding.TextUnmarshaler) graphql.ParseValueFn { + var fn func(value interface{}) interface{} + + fn = func(value interface{}) interface{} { + switch value := value.(type) { + case []byte: + t := factory() + err := t.UnmarshalText(value) + if err != nil { + return nil + } + + return t + case string: + return fn([]byte(value)) + case *string: + if value == nil { + return nil + } + return fn([]byte(*value)) + default: + return nil + } + } + + return fn +} + +func serializeTimestamp(value interface{}) interface{} { + switch value := value.(type) { + case time.Time: + return marshalText(value) + case *time.Time: + return marshalText(value) + default: + return nil + } +} + +func serializeUuid(value interface{}) interface{} { + switch value := value.(type) { + case gocql.UUID: + return marshalText(value) + case *gocql.UUID: + return marshalText(value) + default: + return nil + } +} + +func serializeIp(value interface{}) interface{} { + switch value := value.(type) { + case net.IP: + return marshalText(value) + case *net.IP: + return marshalText(value) + default: + return nil + } +} + +func serializeBigInt(value interface{}) interface{} { + switch value := value.(type) { + case int64: + return strconv.FormatInt(value, 10) + case *int64: + return strconv.FormatInt(*value, 10) + default: + return nil + } +} + +func serializeDecimal(value interface{}) interface{} { + switch value := value.(type) { + case inf.Dec: + return value.String() + case *inf.Dec: + return value.String() + default: + return nil + } +} + +func marshalText(value encoding.TextMarshaler) *string { + buff, err := value.MarshalText() + if err != nil { + return nil + } + + var s = string(buff) + return &s +} diff --git a/graphql/schema.go b/graphql/schema.go new file mode 100644 index 0000000..3583ce4 --- /dev/null +++ b/graphql/schema.go @@ -0,0 +1,441 @@ +package graphql + +import ( + "fmt" + "github.com/mitchellh/mapstructure" + "github.com/riptano/data-endpoints/config" + "github.com/riptano/data-endpoints/types" + "log" + "strings" + + "github.com/gocql/gocql" + "github.com/graphql-go/graphql" + "github.com/riptano/data-endpoints/db" +) + +const ( + insertPrefix = "insert" + deletePrefix = "delete" + updatePrefix = "update" +) + +const AuthUserOrRole = "userOrRole" + +type SchemaGenerator struct { + dbClient *db.Db + naming config.NamingConvention +} + +func buildType(typeInfo gocql.TypeInfo) (graphql.Output, error) { + switch typeInfo.Type() { + case gocql.TypeInt, gocql.TypeTinyInt, gocql.TypeSmallInt: + return graphql.Int, nil + case gocql.TypeFloat, gocql.TypeDouble: + return graphql.Float, nil + case gocql.TypeText, gocql.TypeVarchar: + return graphql.String, nil + case gocql.TypeBigInt: + return bigint, nil + case gocql.TypeDecimal: + return decimal, nil + case gocql.TypeBoolean: + return graphql.Boolean, nil + case gocql.TypeUUID: + return uuid, nil + case gocql.TypeTimeUUID: + return timeuuid, nil + case gocql.TypeTimestamp: + return timestamp, nil + case gocql.TypeInet: + return ip, nil + default: + return nil, fmt.Errorf("Unsupported type %s", typeInfo.Type().String()) + } +} + +func NewSchemaGenerator(dbClient *db.Db, naming config.NamingConvention) *SchemaGenerator { + return &SchemaGenerator{ + dbClient: dbClient, + naming: naming, + } +} + +func (sg *SchemaGenerator) buildQueriesFields(schema *KeyspaceGraphQLSchema, tables map[string]*gocql.TableMetadata, resolve graphql.FieldResolveFn) graphql.Fields { + fields := graphql.Fields{} + for name, table := range tables { + if schema.ignoredTables[table.Name] { + continue + } + + fields[sg.naming.ToGraphQLOperation("", name)] = &graphql.Field{ + Type: schema.resultSelectTypes[table.Name], + Args: graphql.FieldConfigArgument{ + "data": {Type: graphql.NewNonNull(schema.tableScalarInputTypes[table.Name])}, + "orderBy": {Type: graphql.NewList(schema.orderEnums[table.Name])}, + "options": {Type: inputQueryOptions}, + }, + Resolve: resolve, + } + + fields[sg.naming.ToGraphQLOperation("", name)+"Filter"] = &graphql.Field{ + Type: schema.resultSelectTypes[table.Name], + Args: graphql.FieldConfigArgument{ + "filter": {Type: graphql.NewNonNull(schema.tableOperatorInputTypes[table.Name])}, + "orderBy": {Type: graphql.NewList(schema.orderEnums[table.Name])}, + "options": {Type: inputQueryOptions}, + }, + Resolve: resolve, + } + } + fields["table"] = &graphql.Field{ + Type: tableType, + Args: graphql.FieldConfigArgument{ + "name": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + }, + Resolve: resolve, + } + fields["tables"] = &graphql.Field{ + Type: graphql.NewList(tableType), + Resolve: resolve, + } + return fields +} + +func (sg *SchemaGenerator) buildQuery(schema *KeyspaceGraphQLSchema, tables map[string]*gocql.TableMetadata, resolve graphql.FieldResolveFn) *graphql.Object { + return graphql.NewObject( + graphql.ObjectConfig{ + Name: "TableQuery", + Fields: sg.buildQueriesFields(schema, tables, resolve), + }) +} + +func (sg *SchemaGenerator) buildMutationFields(schema *KeyspaceGraphQLSchema, tables map[string]*gocql.TableMetadata, resolve graphql.FieldResolveFn) graphql.Fields { + fields := graphql.Fields{} + for name, table := range tables { + if schema.ignoredTables[table.Name] { + continue + } + fields[sg.naming.ToGraphQLOperation(insertPrefix, name)] = &graphql.Field{ + Type: schema.resultUpdateTypes[table.Name], + Args: graphql.FieldConfigArgument{ + "data": {Type: graphql.NewNonNull(schema.tableScalarInputTypes[table.Name])}, + "ifNotExists": {Type: graphql.Boolean}, + "options": {Type: inputMutationOptions}, + }, + Resolve: resolve, + } + + fields[sg.naming.ToGraphQLOperation(deletePrefix, name)] = &graphql.Field{ + Type: schema.resultUpdateTypes[table.Name], + Args: graphql.FieldConfigArgument{ + "data": {Type: graphql.NewNonNull(schema.tableScalarInputTypes[table.Name])}, + "ifExists": {Type: graphql.Boolean}, + "ifCondition": {Type: schema.tableOperatorInputTypes[table.Name]}, + "options": {Type: inputMutationOptions}, + }, + Resolve: resolve, + } + + fields[sg.naming.ToGraphQLOperation(updatePrefix, name)] = &graphql.Field{ + Type: schema.resultUpdateTypes[table.Name], + Args: graphql.FieldConfigArgument{ + "data": {Type: graphql.NewNonNull(schema.tableScalarInputTypes[table.Name])}, + "ifExists": {Type: graphql.Boolean}, + "ifCondition": {Type: schema.tableOperatorInputTypes[table.Name]}, + "options": {Type: inputMutationOptions}, + }, + Resolve: resolve, + } + } + fields["createTable"] = &graphql.Field{ + Type: graphql.Boolean, + Args: graphql.FieldConfigArgument{ + "name": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + "partitionKeys": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.NewList(columnInput)), + }, + "clusteringKeys": &graphql.ArgumentConfig{ + Type: graphql.NewList(clusteringKeyInput), + }, + "values": &graphql.ArgumentConfig{ + Type: graphql.NewList(columnInput), + }, + }, + Resolve: resolve, + } + fields["dropTable"] = &graphql.Field{ + Type: graphql.Boolean, + Args: graphql.FieldConfigArgument{ + "name": &graphql.ArgumentConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + }, + Resolve: resolve, + } + return fields +} + +func (sg *SchemaGenerator) buildMutation(schema *KeyspaceGraphQLSchema, tables map[string]*gocql.TableMetadata, resolveFn graphql.FieldResolveFn) *graphql.Object { + return graphql.NewObject( + graphql.ObjectConfig{ + Name: "TableMutation", + Fields: sg.buildMutationFields(schema, tables, resolveFn), + }) +} + +// Build GraphQL schema for tables in the provided keyspace metadata +func (sg *SchemaGenerator) BuildSchema(keyspaceName string) (graphql.Schema, error) { + keyspace, err := sg.dbClient.Keyspace(keyspaceName) + if err != nil { + return graphql.Schema{}, err + } + + log.Printf("Building schema for %s", keyspace.Name) + + keyspaceSchema := &KeyspaceGraphQLSchema{ + ignoredTables: make(map[string]bool), + } + if err := keyspaceSchema.BuildTypes(keyspace, sg.naming); err != nil { + return graphql.Schema{}, err + } + + return graphql.NewSchema( + graphql.SchemaConfig{ + Query: sg.buildQuery(keyspaceSchema, keyspace.Tables, sg.queryFieldResolver(keyspace)), + Mutation: sg.buildMutation(keyspaceSchema, keyspace.Tables, sg.mutationFieldResolver(keyspace)), + }, + ) +} + +func (sg *SchemaGenerator) queryFieldResolver(keyspace *gocql.KeyspaceMetadata) graphql.FieldResolveFn { + return func(params graphql.ResolveParams) (interface{}, error) { + fieldName := params.Info.FieldName + switch fieldName { + case "table": + return sg.getTable(keyspace, params.Args) + case "tables": + return sg.getTables(keyspace) + default: + var table *gocql.TableMetadata + table, tableFound := keyspace.Tables[sg.naming.ToCQLTable(fieldName)] + var data map[string]interface{} + if params.Args["data"] != nil { + data = params.Args["data"].(map[string]interface{}) + } else { + data = params.Args["filter"].(map[string]interface{}) + } + + var whereClause []types.ConditionItem + + if tableFound { + whereClause = make([]types.ConditionItem, 0, len(data)) + for key, value := range data { + whereClause = append(whereClause, types.ConditionItem{ + Column: sg.naming.ToCQLColumn(key), + Operator: "=", + Value: value, + }) + } + } else { + if strings.HasSuffix(fieldName, "Filter") { + table, tableFound = keyspace.Tables[sg.naming.ToCQLTable(strings.TrimSuffix(fieldName, "Filter"))] + if !tableFound { + return nil, fmt.Errorf("unable to find table '%s'", params.Info.FieldName) + } + + whereClause = sg.adaptCondition(data) + } + } + + var orderBy []interface{} + var options types.QueryOptions + if err := mapstructure.Decode(params.Args["options"], &options); err != nil { + return nil, err + } + + if params.Args["orderBy"] != nil { + orderBy = params.Args["orderBy"].([]interface{}) + } + + userOrRole, err := checkAuthUserOrRole(params) + if err != nil { + return nil, err + } + + result, err := sg.dbClient.Select(&db.SelectInfo{ + Keyspace: keyspace.Name, + Table: table.Name, + Where: whereClause, + OrderBy: parseColumnOrder(orderBy), + Options: &options, + }, db.NewQueryOptions().WithUserOrRole(userOrRole)) + + if err != nil { + return nil, err + } + + return &types.QueryResult{ + PageState: result.PageState(), + Values: sg.adaptResultValues(result.Values()), + }, nil + } + } +} + +func (sg *SchemaGenerator) adaptCondition(data map[string]interface{}) []types.ConditionItem { + result := make([]types.ConditionItem, 0, len(data)) + for key, value := range data { + if value == nil { + continue + } + mapValue := value.(map[string]interface{}) + + for operatorName, itemValue := range mapValue { + result = append(result, types.ConditionItem{ + Column: sg.naming.ToCQLColumn(key), + Operator: cqlOperators[operatorName], + Value: itemValue, + }) + } + } + return result +} + +func (sg *SchemaGenerator) adaptResultValues(values []map[string]interface{}) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(values)) + for _, item := range values { + resultItem := make(map[string]interface{}) + for k, v := range item { + resultItem[sg.naming.ToGraphQLField(k)] = v + } + result = append(result, resultItem) + } + + return result +} + +func (sg *SchemaGenerator) mutationFieldResolver(keyspace *gocql.KeyspaceMetadata) graphql.FieldResolveFn { + return func(params graphql.ResolveParams) (interface{}, error) { + fieldName := params.Info.FieldName + switch fieldName { + case "createTable": + return sg.createTable(keyspace.Name, params) + case "dropTable": + return sg.dropTable(keyspace.Name, params) + default: + operation, typeName := mutationPrefix(fieldName) + if table, ok := keyspace.Tables[sg.naming.ToCQLTable(typeName)]; ok { + data := params.Args["data"].(map[string]interface{}) + columnNames := make([]string, 0, len(data)) + queryParams := make([]interface{}, 0, len(data)) + + for key, value := range data { + columnNames = append(columnNames, sg.naming.ToCQLColumn(key)) + queryParams = append(queryParams, value) + } + + var options map[string]interface{} + + if params.Args["options"] != nil { + options = params.Args["options"].(map[string]interface{}) + } + + userOrRole, err := checkAuthUserOrRole(params) + if err != nil { + return nil, err + } + queryOptions := db.NewQueryOptions().WithUserOrRole(userOrRole) + switch operation { + case insertPrefix: + ttl := -1 + if options != nil { + ttl = options["ttl"].(int) + } + ifNotExists := params.Args["ifNotExists"] == true + return sg.dbClient.Insert(&db.InsertInfo{ + Keyspace: keyspace.Name, + Table: table.Name, + Columns: columnNames, + QueryParams: queryParams, + IfNotExists: ifNotExists, + TTL: ttl, + }, queryOptions) + case deletePrefix: + var ifCondition []types.ConditionItem + if params.Args["ifCondition"] != nil { + ifCondition = sg.adaptCondition(params.Args["ifCondition"].(map[string]interface{})) + } + return sg.dbClient.Delete(&db.DeleteInfo{ + Keyspace: keyspace.Name, + Table: table.Name, + Columns: columnNames, + QueryParams: queryParams, + IfCondition: ifCondition, + IfExists: params.Args["ifExists"] == true}, queryOptions) + case updatePrefix: + var ifCondition []types.ConditionItem + if params.Args["ifCondition"] != nil { + ifCondition = sg.adaptCondition(params.Args["ifCondition"].(map[string]interface{})) + } + ttl := -1 + if options != nil { + ttl = options["ttl"].(int) + } + return sg.dbClient.Update(&db.UpdateInfo{ + Keyspace: keyspace.Name, + Table: table, + Columns: columnNames, + QueryParams: queryParams, + IfCondition: ifCondition, + TTL: ttl, + IfExists: params.Args["ifExists"] == true}, queryOptions) + } + + return false, fmt.Errorf("operation '%s' not supported", operation) + } else { + return nil, fmt.Errorf("unable to find table for type name '%s'", params.Info.FieldName) + } + + } + } +} + +func mutationPrefix(value string) (string, string) { + mutationPrefixes := []string{insertPrefix, deletePrefix, updatePrefix} + + for _, prefix := range mutationPrefixes { + if strings.Index(value, prefix) == 0 { + return prefix, value[len(prefix):] + } + } + + panic("Unsupported mutation") +} + +func parseColumnOrder(values []interface{}) []db.ColumnOrder { + result := make([]db.ColumnOrder, 0, len(values)) + + for _, value := range values { + strValue := value.(string) + index := strings.LastIndex(strValue, "_") + result = append(result, db.ColumnOrder{ + Column: strValue[0:index], + Order: strValue[index+1:], + }) + } + + return result +} + +func checkAuthUserOrRole(params graphql.ResolveParams) (string, error) { + // TODO: Return an error if we're expecting a user/role, but one isn't provided + value := params.Context.Value(AuthUserOrRole) + if value == nil { + return "", nil + } + return value.(string), nil +} diff --git a/graphql/table.go b/graphql/table.go new file mode 100644 index 0000000..2dafbc2 --- /dev/null +++ b/graphql/table.go @@ -0,0 +1,382 @@ +package graphql + +import ( + "fmt" + "github.com/gocql/gocql" + "github.com/graphql-go/graphql" + "github.com/mitchellh/mapstructure" + "github.com/riptano/data-endpoints/db" +) + +const ( + typeInt = iota + typeVarchar + typeText + typeUUID + // ... +) + +const ( + kindUnknown = iota + kindPartition + kindClustering + kindRegular + kindStatic + kindCompact +) + +type dataTypeValue struct { + Basic int `json:"basic"` + SubTypes []*dataTypeValue `json:"subTypes"` +} + +type columnValue struct { + Name string `json:"name"` + Kind int `json:"kind"` + Type *dataTypeValue `json:"type"` +} + +type clusteringInfo struct { + // mapstructure.Decode() calls don't work when embedding values + //columnValue //embedded + Name string `json:"name"` + Kind int `json:"kind"` + Type *dataTypeValue `json:"type"` + Order string `json:"order"` +} + +type tableValue struct { + Name string `json:"name"` + Columns []*columnValue `json:"columns"` +} + +var basicTypeEnum = graphql.NewEnum(graphql.EnumConfig{ + Name: "BasicType", + Values: graphql.EnumValueConfigMap{ + "INT": &graphql.EnumValueConfig{ + Value: typeInt, + }, + "VARCHAR": &graphql.EnumValueConfig{ + Value: typeVarchar, + }, + "TEXT": &graphql.EnumValueConfig{ + Value: typeText, + }, + "UUID": &graphql.EnumValueConfig{ + Value: typeUUID, + }, + // ... + }, +}) + +var dataType = buildDataType() + +func buildDataType() *graphql.Object { + dataType := graphql.NewObject(graphql.ObjectConfig{ + Name: "DataType", + Fields: graphql.Fields{ + "basic": &graphql.Field{ + Type: graphql.NewNonNull(basicTypeEnum), + }, + }, + }) + dataType.AddFieldConfig("subTypes", &graphql.Field{ + Type: graphql.NewList(dataType), + }) + return dataType +} + +var columnKindEnum = graphql.NewEnum(graphql.EnumConfig{ + Name: "ColumnKind", + Values: graphql.EnumValueConfigMap{ + "UNKNOWN": &graphql.EnumValueConfig{ + Value: kindUnknown, + }, + "PARTITION": &graphql.EnumValueConfig{ + Value: kindPartition, + }, + "CLUSTERING": &graphql.EnumValueConfig{ + Value: kindClustering, + }, + "REGULAR": &graphql.EnumValueConfig{ + Value: kindRegular, + }, + "STATIC": &graphql.EnumValueConfig{ + Value: kindStatic, + }, + "COMPACT": &graphql.EnumValueConfig{ + Value: kindCompact, + }, + }, +}) + +var columnType = graphql.NewObject(graphql.ObjectConfig{ + Name: "Column", + Fields: graphql.Fields{ + "name": &graphql.Field{ + Type: graphql.NewNonNull(graphql.String), + }, + "kind": &graphql.Field{ + Type: graphql.NewNonNull(columnKindEnum), + }, + "type": &graphql.Field{ + Type: graphql.NewNonNull(dataType), + }, + }, +}) + +var dataTypeInput = buildDataTypeInput() + +func buildDataTypeInput() *graphql.InputObject { + dataType := graphql.NewInputObject(graphql.InputObjectConfig{ + Name: "DataTypeInput", + Fields: graphql.InputObjectConfigFieldMap{ + "basic": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(basicTypeEnum), + }, + }, + }) + dataType.AddFieldConfig("subTypes", &graphql.InputObjectFieldConfig{ + Type: graphql.NewList(dataType), + }) + return dataType +} + +var columnInput = graphql.NewInputObject(graphql.InputObjectConfig{ + Name: "ColumnInput", + Fields: graphql.InputObjectConfigFieldMap{ + "name": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + "type": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(dataTypeInput), + }, + }, +}) + +var clusteringKeyInput = graphql.NewInputObject(graphql.InputObjectConfig{ + Name: "ClusteringKeyInput", + Fields: graphql.InputObjectConfigFieldMap{ + "name": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(graphql.String), + }, + "type": &graphql.InputObjectFieldConfig{ + Type: graphql.NewNonNull(dataTypeInput), + }, + "order": &graphql.InputObjectFieldConfig{ + Type: graphql.String, + }, + }, +}) + +var tableType = graphql.NewObject(graphql.ObjectConfig{ + Name: "Table", + Fields: graphql.Fields{ + "name": &graphql.Field{ + Type: graphql.NewNonNull(graphql.String), + }, + "columns": &graphql.Field{ + Type: graphql.NewList(columnType), + }, + }, +}) + +func (sg *SchemaGenerator) getTable(keyspace *gocql.KeyspaceMetadata, args map[string]interface{}) (interface{}, error) { + name := args["name"].(string) + table := keyspace.Tables[sg.naming.ToCQLTable(name)] + if table == nil { + return nil, fmt.Errorf("unable to find table '%s'", name) + } + return &tableValue{ + Name: sg.naming.ToGraphQLType(name), + Columns: sg.toColumnValues(table.Columns), + }, nil +} + +func (sg *SchemaGenerator) getTables(keyspace *gocql.KeyspaceMetadata) (interface{}, error) { + tableValues := make([]*tableValue, 0) + for _, table := range keyspace.Tables { + tableValues = append(tableValues, &tableValue{ + Name: sg.naming.ToGraphQLType(table.Name), + Columns: sg.toColumnValues(table.Columns), + }) + } + return tableValues, nil +} + +func decodeColumns(columns []interface{}) ([]*gocql.ColumnMetadata, error) { + columnValues := make([]*gocql.ColumnMetadata, 0) + for _, column := range columns { + var value columnValue + if err := mapstructure.Decode(column, &value); err != nil { + return nil, err + } + + // Adapt from GraphQL column to gocql column + cqlColumn := &gocql.ColumnMetadata{ + Name: value.Name, + Kind: toDbColumnKind(value.Kind), + Type: toDbColumnType(value.Type), + } + + columnValues = append(columnValues, cqlColumn) + } + return columnValues, nil +} + +func decodeClusteringInfo(columns []interface{}) ([]*gocql.ColumnMetadata, error) { + columnValues := make([]*gocql.ColumnMetadata, 0) + for _, column := range columns { + var value clusteringInfo + if err := mapstructure.Decode(column, &value); err != nil { + return nil, err + } + + // Adapt from GraphQL column to gocql column + cqlColumn := &gocql.ColumnMetadata{ + Name: value.Name, + Kind: toDbColumnKind(value.Kind), + Type: toDbColumnType(value.Type), + //TODO: Use enums + ClusteringOrder: value.Order, + } + + columnValues = append(columnValues, cqlColumn) + } + return columnValues, nil +} + +func (sg *SchemaGenerator) createTable(ksName string, params graphql.ResolveParams) (interface{}, error) { + var values []*gocql.ColumnMetadata = nil + var clusteringKeys []*gocql.ColumnMetadata = nil + args := params.Args + name := args["name"].(string) + + partitionKeys, err := decodeColumns(args["partitionKeys"].([]interface{})) + + if err != nil { + return false, err + } + + if args["values"] != nil { + if values, err = decodeColumns(args["values"].([]interface{})); err != nil { + return nil, err + } + } + + if args["clusteringKeys"] != nil { + if clusteringKeys, err = decodeClusteringInfo(args["clusteringKeys"].([]interface{})); err != nil { + return nil, err + } + } + + userOrRole, err := checkAuthUserOrRole(params) + if err != nil { + return nil, err + } + return sg.dbClient.CreateTable(&db.CreateTableInfo{ + Keyspace: ksName, + Table: name, + PartitionKeys: partitionKeys, + ClusteringKeys: clusteringKeys, + Values: values}, db.NewQueryOptions().WithUserOrRole(userOrRole)) +} + +func (sg *SchemaGenerator) dropTable(ksName string, params graphql.ResolveParams) (interface{}, error) { + name := sg.naming.ToCQLTable(params.Args["name"].(string)) + userOrRole, err := checkAuthUserOrRole(params) + if err != nil { + return nil, err + } + return sg.dbClient.DropTable(&db.DropTableInfo{ + Keyspace: ksName, + Table: name}, db.NewQueryOptions().WithUserOrRole(userOrRole)) +} + +func toColumnKind(kind gocql.ColumnKind) int { + switch kind { + case gocql.ColumnPartitionKey: + return kindPartition + case gocql.ColumnClusteringKey: + return kindClustering + case gocql.ColumnRegular: + return kindRegular + case gocql.ColumnStatic: + return kindStatic + case gocql.ColumnCompact: + return kindCompact + default: + return kindUnknown + } +} + +func toDbColumnKind(kind int) gocql.ColumnKind { + switch kind { + case kindPartition: + return gocql.ColumnPartitionKey + case kindClustering: + return gocql.ColumnClusteringKey + case kindRegular: + return gocql.ColumnRegular + case kindStatic: + return gocql.ColumnStatic + case kindCompact: + return gocql.ColumnCompact + default: + return kindUnknown + } +} + +func toColumnType(info gocql.TypeInfo) *dataTypeValue { + switch info.Type() { + case gocql.TypeInt: + return &dataTypeValue{ + Basic: typeInt, + SubTypes: nil, + } + case gocql.TypeVarchar: + return &dataTypeValue{ + Basic: typeVarchar, + SubTypes: nil, + } + case gocql.TypeText: + return &dataTypeValue{ + Basic: typeText, + SubTypes: nil, + } + case gocql.TypeUUID: + return &dataTypeValue{ + Basic: typeUUID, + SubTypes: nil, + } + // ... + } + return nil +} + +func toDbColumnType(info *dataTypeValue) gocql.TypeInfo { + switch info.Basic { + case typeInt: + return gocql.NewNativeType(0, gocql.TypeInt, "") + case typeVarchar: + return gocql.NewNativeType(0, gocql.TypeVarchar, "") + case typeText: + return gocql.NewNativeType(0, gocql.TypeText, "") + case typeUUID: + return gocql.NewNativeType(0, gocql.TypeUUID, "") + } + + return nil +} + +func (sg *SchemaGenerator) toColumnValues(columns map[string]*gocql.ColumnMetadata) []*columnValue { + columnValues := make([]*columnValue, 0) + for _, column := range columns { + columnValues = append(columnValues, &columnValue{ + Name: sg.naming.ToGraphQLField(column.Name), + Kind: toColumnKind(column.Kind), + Type: toColumnType(column.Type), + }) + } + return columnValues +} diff --git a/graphql/updater.go b/graphql/updater.go new file mode 100644 index 0000000..4e1a23c --- /dev/null +++ b/graphql/updater.go @@ -0,0 +1,97 @@ +package graphql + +import ( + "context" + "fmt" + "github.com/gocql/gocql" + "github.com/graphql-go/graphql" + "os" + "sync" + "time" +) + +type SchemaUpdater struct { + ctx context.Context + cancel context.CancelFunc + mutex sync.Mutex + updateInterval time.Duration + schema *graphql.Schema + schemaGen *SchemaGenerator + ksName string + schemaVersion gocql.UUID +} + +func (su *SchemaUpdater) Schema() *graphql.Schema { + // This should be pretty fast, but an atomic pointer swap wouldn't require a lock here + su.mutex.Lock() + defer su.mutex.Unlock() + return su.schema +} + +func NewUpdater(schemaGen *SchemaGenerator, ksName string, updateInterval time.Duration) (*SchemaUpdater, error) { + schema, err := schemaGen.BuildSchema(ksName) + if err != nil { + return nil, err + } + updater := &SchemaUpdater{ + ctx: nil, + cancel: nil, + mutex: sync.Mutex{}, + updateInterval: updateInterval, + schema: &schema, + schemaGen: schemaGen, + ksName: ksName, + } + return updater, nil +} + +func (su *SchemaUpdater) Start() { + su.ctx, su.cancel = context.WithCancel(context.Background()) + for { + result, err := su.schemaGen.dbClient.Execute("SELECT schema_version FROM system.local", nil) + + if err != nil { + // TODO: Log error + fmt.Fprintf(os.Stderr, "error attempting to determine schema version: %s", err) + } + + shouldUpdate := false + for _, row := range result.Values() { + if schemaVersion, ok := row["schema_version"].(gocql.UUID); ok { + if schemaVersion != su.schemaVersion { + shouldUpdate = true + su.schemaVersion = schemaVersion + } + } + } + + if shouldUpdate { + schema, err := su.schemaGen.BuildSchema(su.ksName) + if err != nil { + // TODO: Log error + fmt.Fprintf(os.Stderr, "error trying to build graphql schema for keyspace '%s': %s", su.ksName, err) + } else { + su.mutex.Lock() + su.schema = &schema + su.mutex.Unlock() + } + } + + if !su.sleep() { + return + } + } +} + +func (su *SchemaUpdater) Stop() { + su.cancel() +} + +func (su *SchemaUpdater) sleep() bool { + select { + case <-time.After(su.updateInterval): + return true + case <-su.ctx.Done(): + return false + } +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..a08b90e --- /dev/null +++ b/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "fmt" + "github.com/riptano/data-endpoints/endpoint" + "github.com/riptano/data-endpoints/graphql" + "log" + "net/http" + "os" + "strings" + + "github.com/julienschmidt/httprouter" +) + +func getEnvOrDefault(key string, defaultValue string) string { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + return value +} + +func main() { + hosts := getEnvOrDefault("DB_HOSTS", "127.0.0.1") + singleKsName := os.Getenv("SINGLE_KEYSPACE") + + cfg := endpoint.NewEndpointConfig(strings.Split(hosts, ",")...) + cfg.DbUsername = os.Getenv("DB_USERNAME"); + cfg.DbPassword = os.Getenv("DB_PASSWORD"); + + endpoint, err := cfg.NewEndpoint() + if err != nil { + log.Fatalf("unable create new endpoint: %s", err) + } + + var routes []graphql.Route + if singleKsName != "" { // Single keyspace mode (useful for cloud) + routes, err = endpoint.RoutesKeyspaceGraphQL("/graphql", singleKsName) + } else { + routes, err = endpoint.RoutesGraphQL("/graphql") + } + + if err != nil { + log.Fatalf("unable to generate graphql routes: %s", err) + } + + router := httprouter.New() + for _, route := range routes { + router.HandlerFunc(route.Method, route.Pattern, route.HandlerFunc) + } + + finish := make(chan bool) + go listenAndServe(router, 8080) + // go listenAndServe(rest.ApiRouter(dbClient), 8081) + <-finish +} + +func listenAndServe(router *httprouter.Router, port int) { + fmt.Printf("Start listening on %d\n", port) + log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), router)) +} diff --git a/rest/router.go b/rest/router.go new file mode 100644 index 0000000..ae4d228 --- /dev/null +++ b/rest/router.go @@ -0,0 +1,56 @@ +package rest + +import ( + "encoding/json" + "fmt" + "github.com/julienschmidt/httprouter" + "github.com/riptano/data-endpoints/db" + "net/http" +) + +// jsonResult provides a basic root object in order to avoid using a scalar at root level. +type jsonResult struct { + Meta interface{} `json:"meta"` + Data interface{} `json:"data"` +} + +// ApiRouter gets the router for the REST API +func ApiRouter(dbClient *db.Db) *httprouter.Router { + router := httprouter.New() + router.GET("/", index) + router.GET("/keyspaces", keyspacesHandler(dbClient)) + return router +} + +func keyspacesHandler(dbClient *db.Db) httprouter.Handle { + return func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + data, err := dbClient.Keyspaces() + writeResponse(w, &data, err) + } +} + +func writeResponse(w http.ResponseWriter, data interface{}, err error) { + if err != nil { + writeErrorResponse(w, http.StatusInternalServerError, err.Error()) + return + } + + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(&jsonResult{Data: data}); err != nil { + writeErrorResponse(w, http.StatusInternalServerError, err.Error()) + } +} + +func writeErrorResponse(w http.ResponseWriter, errorCode int, errorMsg string) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.WriteHeader(errorCode) + _ = json.NewEncoder(w).Encode(errorMsg) +} + +func index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + // TODO: Print API paths + if _, err := fmt.Fprint(w, "Welcome to the REST API!\n"); err != nil { + panic(err) + } +} diff --git a/store.cql b/store.cql new file mode 100644 index 0000000..b67b56e --- /dev/null +++ b/store.cql @@ -0,0 +1,73 @@ +// GraphQL schema: +// type BooksByAuthor { +// authorId: String +// firstname: String +// lastname: String +// title: String +// } +// +// type BooksByTitle { +// authorId: String +// pages: Int +// title: String +// year: Int +// } +// +// type Query { +// booksByAuthor( +// firstname: String! +// lastname: String! +// title: String +// ): [BooksByAuthor] +// booksByTitle(authorId: String, title: String!): [BooksByTitle] +// } + +// Example GraphQL query: +// query { +// booksByAuthor(firstname: "Ariel", lastname: "Stein") { +// title +// firstname +// lastname +// authorId +// } +// booksByTitle(title: "Book 4") { +// title +// pages +// year +// } +// } + +CREATE KEYSPACE IF NOT EXISTS store WITH replication = { + 'class': 'NetworkTopologyStrategy', 'dc1': '1' +}; + +DROP TABLE IF EXISTS store.books_by_title; +DROP TABLE IF EXISTS store.books_by_author; + +CREATE TABLE store.books_by_title ( + title text, + author_id uuid, + pages int, + year int, + PRIMARY KEY (title, author_id) +); + +INSERT INTO store.books_by_title (title, author_id, pages, year) VALUES ('Book 1', 1c3eb87a-9ce7-491e-9dd9-fb3c819875cf, 123, 1901); +INSERT INTO store.books_by_title (title, author_id, pages, year) VALUES ('Book 2', 8fec26ff-09a0-4c23-9c8b-8bbf8e198f12, 456, 1902); +INSERT INTO store.books_by_title (title, author_id, pages, year) VALUES ('Book 3', f8f5a9de-4bc6-4177-be64-87d0db7bf9be, 789, 2001); +INSERT INTO store.books_by_title (title, author_id, pages, year) VALUES ('Book 4', 9e390783-f4c2-4a6e-ac82-818d35cada68, 101, 2002); +INSERT INTO store.books_by_title (title, author_id, pages, year) VALUES ('Book 5', 9e390783-f4c2-4a6e-ac82-818d35cada68, 201, 2020); + +CREATE TABLE store.books_by_author ( + firstname text, + lastname text, + title text, + author_id uuid, + PRIMARY KEY ((firstname, lastname), title) +); + +INSERT INTO store.books_by_author (firstname, lastname, title, author_id) VALUES ('Mike', 'Hoff', 'Book 1', 1c3eb87a-9ce7-491e-9dd9-fb3c819875cf); +INSERT INTO store.books_by_author (firstname, lastname, title, author_id) VALUES ('Joe', 'Smith', 'Book 2', 8fec26ff-09a0-4c23-9c8b-8bbf8e198f12); +INSERT INTO store.books_by_author (firstname, lastname, title, author_id) VALUES ('Adam', 'Samsung', 'Book 3', f8f5a9de-4bc6-4177-be64-87d0db7bf9be); +INSERT INTO store.books_by_author (firstname, lastname, title, author_id) VALUES ('Ariel', 'Stein', 'Book 4', 9e390783-f4c2-4a6e-ac82-818d35cada68); +INSERT INTO store.books_by_author (firstname, lastname, title, author_id) VALUES ('Ariel', 'Stein', 'Book 5', 9e390783-f4c2-4a6e-ac82-818d35cada68); diff --git a/types/types.go b/types/types.go new file mode 100644 index 0000000..682ca86 --- /dev/null +++ b/types/types.go @@ -0,0 +1,25 @@ +// types package contains the public API types +// that are shared between both REST and GraphQL +package types + +type ModificationResult struct { + Applied bool `json:"applied"` + Value map[string]interface{} `json:"value"` +} + +type QueryResult struct { + PageState string `json:"pageState"` + Values []map[string]interface{} `json:"values"` +} + +type QueryOptions struct { + PageState string `json:"pageState"` + PageSize int `json:"pageSize"` + Limit int `json:"pageState"` +} + +type ConditionItem struct { + Column string + Operator string + Value interface{} +}